Repository: google-gemini/gemini-cli
Branch: main
Commit: 8615315711a8
Files: 2279
Total size: 18.6 MB
Directory structure:
gitextract_3lf9y2ue/
├── .allstar/
│ └── branch_protection.yaml
├── .editorconfig
├── .gcp/
│ ├── Dockerfile.gemini-code-builder
│ └── release-docker.yml
├── .gemini/
│ ├── config.yaml
│ └── settings.json
├── .geminiignore
├── .gitattributes
├── .github/
│ ├── CODEOWNERS
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.yml
│ │ ├── feature_request.yml
│ │ └── website_issue.yml
│ ├── actions/
│ │ ├── calculate-vars/
│ │ │ └── action.yml
│ │ ├── create-pull-request/
│ │ │ └── action.yml
│ │ ├── npm-auth-token/
│ │ │ └── action.yml
│ │ ├── post-coverage-comment/
│ │ │ └── action.yml
│ │ ├── publish-release/
│ │ │ └── action.yml
│ │ ├── push-docker/
│ │ │ └── action.yml
│ │ ├── push-sandbox/
│ │ │ └── action.yml
│ │ ├── run-tests/
│ │ │ └── action.yml
│ │ ├── setup-npmrc/
│ │ │ └── action.yml
│ │ ├── tag-npm-release/
│ │ │ └── action.yml
│ │ └── verify-release/
│ │ └── action.yml
│ ├── dependabot.yml
│ ├── pull_request_template.md
│ ├── scripts/
│ │ ├── backfill-need-triage.cjs
│ │ ├── backfill-pr-notification.cjs
│ │ ├── pr-triage.sh
│ │ └── sync-maintainer-labels.cjs
│ └── workflows/
│ ├── chained_e2e.yml
│ ├── ci.yml
│ ├── community-report.yml
│ ├── deflake.yml
│ ├── docs-page-action.yml
│ ├── docs-rebuild.yml
│ ├── eval.yml
│ ├── evals-nightly.yml
│ ├── gemini-automated-issue-dedup.yml
│ ├── gemini-automated-issue-triage.yml
│ ├── gemini-scheduled-issue-dedup.yml
│ ├── gemini-scheduled-issue-triage.yml
│ ├── gemini-scheduled-pr-triage.yml
│ ├── gemini-scheduled-stale-issue-closer.yml
│ ├── gemini-scheduled-stale-pr-closer.yml
│ ├── gemini-self-assign-issue.yml
│ ├── issue-opened-labeler.yml
│ ├── label-backlog-child-issues.yml
│ ├── label-workstream-rollup.yml
│ ├── links.yml
│ ├── no-response.yml
│ ├── pr-contribution-guidelines-notifier.yml
│ ├── pr-rate-limiter.yaml
│ ├── release-change-tags.yml
│ ├── release-manual.yml
│ ├── release-nightly.yml
│ ├── release-notes.yml
│ ├── release-patch-0-from-comment.yml
│ ├── release-patch-1-create-pr.yml
│ ├── release-patch-2-trigger.yml
│ ├── release-patch-3-release.yml
│ ├── release-promote.yml
│ ├── release-rollback.yml
│ ├── release-sandbox.yml
│ ├── smoke-test.yml
│ ├── stale.yml
│ ├── test-build-binary.yml
│ ├── trigger_e2e.yml
│ ├── unassign-inactive-assignees.yml
│ └── verify-release.yml
├── .gitignore
├── .husky/
│ └── pre-commit
├── .lycheeignore
├── .npmrc
├── .nvmrc
├── .prettierignore
├── .prettierrc.json
├── .vscode/
│ ├── extensions.json
│ ├── launch.json
│ ├── settings.json
│ └── tasks.json
├── .yamllint.yml
├── CONTRIBUTING.md
├── Dockerfile
├── GEMINI.md
├── LICENSE
├── Makefile
├── README.md
├── ROADMAP.md
├── SECURITY.md
├── docs/
│ ├── admin/
│ │ └── enterprise-controls.md
│ ├── changelogs/
│ │ ├── index.md
│ │ ├── latest.md
│ │ └── preview.md
│ ├── cli/
│ │ ├── checkpointing.md
│ │ ├── cli-reference.md
│ │ ├── creating-skills.md
│ │ ├── custom-commands.md
│ │ ├── enterprise.md
│ │ ├── gemini-ignore.md
│ │ ├── gemini-md.md
│ │ ├── generation-settings.md
│ │ ├── headless.md
│ │ ├── model-routing.md
│ │ ├── model-steering.md
│ │ ├── model.md
│ │ ├── notifications.md
│ │ ├── plan-mode.md
│ │ ├── rewind.md
│ │ ├── sandbox.md
│ │ ├── session-management.md
│ │ ├── settings.md
│ │ ├── skills.md
│ │ ├── system-prompt.md
│ │ ├── telemetry.md
│ │ ├── themes.md
│ │ ├── token-caching.md
│ │ ├── trusted-folders.md
│ │ └── tutorials/
│ │ ├── automation.md
│ │ ├── file-management.md
│ │ ├── mcp-setup.md
│ │ ├── memory-management.md
│ │ ├── plan-mode-steering.md
│ │ ├── session-management.md
│ │ ├── shell-commands.md
│ │ ├── skills-getting-started.md
│ │ ├── task-planning.md
│ │ └── web-tools.md
│ ├── core/
│ │ ├── index.md
│ │ ├── local-model-routing.md
│ │ ├── remote-agents.md
│ │ └── subagents.md
│ ├── examples/
│ │ └── proxy-script.md
│ ├── extensions/
│ │ ├── best-practices.md
│ │ ├── index.md
│ │ ├── reference.md
│ │ ├── releasing.md
│ │ └── writing-extensions.md
│ ├── get-started/
│ │ ├── authentication.md
│ │ ├── examples.md
│ │ ├── gemini-3.md
│ │ ├── index.md
│ │ └── installation.md
│ ├── hooks/
│ │ ├── best-practices.md
│ │ ├── index.md
│ │ ├── reference.md
│ │ └── writing-hooks.md
│ ├── ide-integration/
│ │ ├── ide-companion-spec.md
│ │ └── index.md
│ ├── index.md
│ ├── integration-tests.md
│ ├── issue-and-pr-automation.md
│ ├── local-development.md
│ ├── mermaid/
│ │ ├── context.mmd
│ │ └── render-path.mmd
│ ├── npm.md
│ ├── redirects.json
│ ├── reference/
│ │ ├── commands.md
│ │ ├── configuration.md
│ │ ├── keyboard-shortcuts.md
│ │ ├── memport.md
│ │ ├── policy-engine.md
│ │ └── tools.md
│ ├── release-confidence.md
│ ├── releases.md
│ ├── resources/
│ │ ├── faq.md
│ │ ├── quota-and-pricing.md
│ │ ├── tos-privacy.md
│ │ ├── troubleshooting.md
│ │ └── uninstall.md
│ ├── sidebar.json
│ └── tools/
│ ├── activate-skill.md
│ ├── ask-user.md
│ ├── file-system.md
│ ├── internal-docs.md
│ ├── mcp-server.md
│ ├── memory.md
│ ├── planning.md
│ ├── shell.md
│ ├── todos.md
│ ├── web-fetch.md
│ └── web-search.md
├── esbuild.config.js
├── eslint.config.js
├── evals/
│ ├── README.md
│ ├── answer-vs-act.eval.ts
│ ├── app-test-helper.ts
│ ├── ask_user.eval.ts
│ ├── automated-tool-use.eval.ts
│ ├── concurrency-safety.eval.ts
│ ├── edit-locations-eval.eval.ts
│ ├── frugalReads.eval.ts
│ ├── frugalSearch.eval.ts
│ ├── generalist_agent.eval.ts
│ ├── generalist_delegation.eval.ts
│ ├── gitRepo.eval.ts
│ ├── grep_search_functionality.eval.ts
│ ├── hierarchical_memory.eval.ts
│ ├── interactive-hang.eval.ts
│ ├── model_steering.eval.ts
│ ├── plan_mode.eval.ts
│ ├── save_memory.eval.ts
│ ├── shell-efficiency.eval.ts
│ ├── subagents.eval.ts
│ ├── test-helper.ts
│ ├── tool_output_masking.eval.ts
│ ├── tracker.eval.ts
│ ├── validation_fidelity.eval.ts
│ ├── validation_fidelity_pre_existing_errors.eval.ts
│ └── vitest.config.ts
├── integration-tests/
│ ├── acp-env-auth.test.ts
│ ├── acp-telemetry.test.ts
│ ├── api-resilience.responses
│ ├── api-resilience.test.ts
│ ├── browser-agent.cleanup.responses
│ ├── browser-agent.confirmation.responses
│ ├── browser-agent.interaction.responses
│ ├── browser-agent.navigate-snapshot.responses
│ ├── browser-agent.screenshot.responses
│ ├── browser-agent.sequential.responses
│ ├── browser-agent.test.ts
│ ├── browser-policy.responses
│ ├── browser-policy.test.ts
│ ├── checkpointing.test.ts
│ ├── clipboard-linux.test.ts
│ ├── concurrency-limit.responses
│ ├── concurrency-limit.test.ts
│ ├── context-compress-interactive.compress-empty.responses
│ ├── context-compress-interactive.compress-failure.responses
│ ├── context-compress-interactive.compress.responses
│ ├── context-compress-interactive.test.ts
│ ├── ctrl-c-exit.test.ts
│ ├── deprecation-warnings.test.ts
│ ├── extensions-install.test.ts
│ ├── extensions-reload.test.ts
│ ├── file-system-interactive.test.ts
│ ├── file-system.test.ts
│ ├── flicker-detector.max-height.responses
│ ├── flicker.test.ts
│ ├── globalSetup.ts
│ ├── google_web_search.test.ts
│ ├── hooks-agent-flow-multistep.responses
│ ├── hooks-agent-flow.responses
│ ├── hooks-agent-flow.test.ts
│ ├── hooks-system.after-agent.responses
│ ├── hooks-system.after-model.responses
│ ├── hooks-system.after-tool-context.responses
│ ├── hooks-system.allow-tool.responses
│ ├── hooks-system.before-agent.responses
│ ├── hooks-system.before-model.responses
│ ├── hooks-system.before-tool-selection.responses
│ ├── hooks-system.before-tool-stop.responses
│ ├── hooks-system.block-tool.responses
│ ├── hooks-system.compress-auto.responses
│ ├── hooks-system.disabled-via-command.responses
│ ├── hooks-system.disabled-via-settings.responses
│ ├── hooks-system.error-handling.responses
│ ├── hooks-system.input-modification.responses
│ ├── hooks-system.input-validation.responses
│ ├── hooks-system.multiple-events.responses
│ ├── hooks-system.notification.responses
│ ├── hooks-system.sequential-execution.responses
│ ├── hooks-system.session-clear.responses
│ ├── hooks-system.session-startup.responses
│ ├── hooks-system.tail-tool-call.responses
│ ├── hooks-system.telemetry.responses
│ ├── hooks-system.test.ts
│ ├── json-output.error.responses
│ ├── json-output.france.responses
│ ├── json-output.session-id.responses
│ ├── json-output.test.ts
│ ├── list_directory.test.ts
│ ├── mcp_server_cyclic_schema.test.ts
│ ├── mixed-input-crash.test.ts
│ ├── parallel-tools.responses
│ ├── parallel-tools.test.ts
│ ├── plan-mode.test.ts
│ ├── policy-headless-readonly.responses
│ ├── policy-headless-shell-allowed.responses
│ ├── policy-headless-shell-denied.responses
│ ├── policy-headless.test.ts
│ ├── read_many_files.test.ts
│ ├── replace.test.ts
│ ├── resume_repro.responses
│ ├── resume_repro.test.ts
│ ├── ripgrep-real.test.ts
│ ├── run_shell_command.test.ts
│ ├── simple-mcp-server.test.ts
│ ├── skill-creator-scripts.test.ts
│ ├── skill-creator-vulnerabilities.test.ts
│ ├── stdin-context.test.ts
│ ├── stdout-stderr-output-error.responses
│ ├── stdout-stderr-output.responses
│ ├── stdout-stderr-output.test.ts
│ ├── symlink-install.test.ts
│ ├── telemetry.test.ts
│ ├── test-helper.ts
│ ├── test-mcp-server.ts
│ ├── tsconfig.json
│ ├── user-policy.responses
│ ├── user-policy.test.ts
│ ├── utf-bom-encoding.test.ts
│ ├── vitest.config.ts
│ └── write_file.test.ts
├── package.json
├── packages/
│ ├── a2a-server/
│ │ ├── GEMINI.md
│ │ ├── README.md
│ │ ├── development-extension-rfc.md
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── agent/
│ │ │ │ ├── executor.test.ts
│ │ │ │ ├── executor.ts
│ │ │ │ ├── task-event-driven.test.ts
│ │ │ │ ├── task.test.ts
│ │ │ │ └── task.ts
│ │ │ ├── commands/
│ │ │ │ ├── command-registry.test.ts
│ │ │ │ ├── command-registry.ts
│ │ │ │ ├── extensions.test.ts
│ │ │ │ ├── extensions.ts
│ │ │ │ ├── init.test.ts
│ │ │ │ ├── init.ts
│ │ │ │ ├── memory.test.ts
│ │ │ │ ├── memory.ts
│ │ │ │ ├── restore.test.ts
│ │ │ │ ├── restore.ts
│ │ │ │ └── types.ts
│ │ │ ├── config/
│ │ │ │ ├── config.test.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── extension.ts
│ │ │ │ ├── settings.test.ts
│ │ │ │ └── settings.ts
│ │ │ ├── http/
│ │ │ │ ├── app.test.ts
│ │ │ │ ├── app.ts
│ │ │ │ ├── endpoints.test.ts
│ │ │ │ ├── requestStorage.ts
│ │ │ │ └── server.ts
│ │ │ ├── index.ts
│ │ │ ├── persistence/
│ │ │ │ ├── gcs.test.ts
│ │ │ │ └── gcs.ts
│ │ │ ├── types.ts
│ │ │ └── utils/
│ │ │ ├── executor_utils.ts
│ │ │ ├── logger.ts
│ │ │ └── testing_utils.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── cli/
│ │ ├── GEMINI.md
│ │ ├── examples/
│ │ │ ├── ask-user-dialog-demo.tsx
│ │ │ └── scrollable-list-demo.tsx
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── __snapshots__/
│ │ │ │ └── nonInteractiveCli.test.ts.snap
│ │ │ ├── acp/
│ │ │ │ ├── acpClient.test.ts
│ │ │ │ ├── acpClient.ts
│ │ │ │ ├── acpErrors.test.ts
│ │ │ │ ├── acpErrors.ts
│ │ │ │ ├── acpResume.test.ts
│ │ │ │ ├── commandHandler.test.ts
│ │ │ │ ├── commandHandler.ts
│ │ │ │ ├── commands/
│ │ │ │ │ ├── commandRegistry.ts
│ │ │ │ │ ├── extensions.ts
│ │ │ │ │ ├── init.ts
│ │ │ │ │ ├── memory.ts
│ │ │ │ │ ├── restore.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── fileSystemService.test.ts
│ │ │ │ └── fileSystemService.ts
│ │ │ ├── commands/
│ │ │ │ ├── extensions/
│ │ │ │ │ ├── configure.test.ts
│ │ │ │ │ ├── configure.ts
│ │ │ │ │ ├── disable.test.ts
│ │ │ │ │ ├── disable.ts
│ │ │ │ │ ├── enable.test.ts
│ │ │ │ │ ├── enable.ts
│ │ │ │ │ ├── examples/
│ │ │ │ │ │ ├── custom-commands/
│ │ │ │ │ │ │ ├── .gitignore
│ │ │ │ │ │ │ ├── commands/
│ │ │ │ │ │ │ │ └── fs/
│ │ │ │ │ │ │ │ └── grep-code.toml
│ │ │ │ │ │ │ └── gemini-extension.json
│ │ │ │ │ │ ├── exclude-tools/
│ │ │ │ │ │ │ ├── .gitignore
│ │ │ │ │ │ │ └── gemini-extension.json
│ │ │ │ │ │ ├── hooks/
│ │ │ │ │ │ │ ├── .gitignore
│ │ │ │ │ │ │ ├── gemini-extension.json
│ │ │ │ │ │ │ ├── hooks/
│ │ │ │ │ │ │ │ └── hooks.json
│ │ │ │ │ │ │ └── scripts/
│ │ │ │ │ │ │ └── on-start.js
│ │ │ │ │ │ ├── mcp-server/
│ │ │ │ │ │ │ ├── .gitignore
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── example.js
│ │ │ │ │ │ │ ├── gemini-extension.json
│ │ │ │ │ │ │ └── package.json
│ │ │ │ │ │ ├── policies/
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── gemini-extension.json
│ │ │ │ │ │ │ └── policies/
│ │ │ │ │ │ │ └── policies.toml
│ │ │ │ │ │ ├── skills/
│ │ │ │ │ │ │ ├── .gitignore
│ │ │ │ │ │ │ ├── gemini-extension.json
│ │ │ │ │ │ │ └── skills/
│ │ │ │ │ │ │ └── greeter/
│ │ │ │ │ │ │ └── SKILL.md
│ │ │ │ │ │ └── themes-example/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── gemini-extension.json
│ │ │ │ │ ├── install.test.ts
│ │ │ │ │ ├── install.ts
│ │ │ │ │ ├── link.test.ts
│ │ │ │ │ ├── link.ts
│ │ │ │ │ ├── list.test.ts
│ │ │ │ │ ├── list.ts
│ │ │ │ │ ├── new.test.ts
│ │ │ │ │ ├── new.ts
│ │ │ │ │ ├── uninstall.test.ts
│ │ │ │ │ ├── uninstall.ts
│ │ │ │ │ ├── update.test.ts
│ │ │ │ │ ├── update.ts
│ │ │ │ │ ├── utils.ts
│ │ │ │ │ ├── validate.test.ts
│ │ │ │ │ └── validate.ts
│ │ │ │ ├── extensions.test.tsx
│ │ │ │ ├── extensions.tsx
│ │ │ │ ├── hooks/
│ │ │ │ │ ├── migrate.test.ts
│ │ │ │ │ └── migrate.ts
│ │ │ │ ├── hooks.tsx
│ │ │ │ ├── mcp/
│ │ │ │ │ ├── add.test.ts
│ │ │ │ │ ├── add.ts
│ │ │ │ │ ├── enableDisable.ts
│ │ │ │ │ ├── list.test.ts
│ │ │ │ │ ├── list.ts
│ │ │ │ │ ├── remove.test.ts
│ │ │ │ │ └── remove.ts
│ │ │ │ ├── mcp.test.ts
│ │ │ │ ├── mcp.ts
│ │ │ │ ├── skills/
│ │ │ │ │ ├── disable.test.ts
│ │ │ │ │ ├── disable.ts
│ │ │ │ │ ├── enable.test.ts
│ │ │ │ │ ├── enable.ts
│ │ │ │ │ ├── install.test.ts
│ │ │ │ │ ├── install.ts
│ │ │ │ │ ├── link.test.ts
│ │ │ │ │ ├── link.ts
│ │ │ │ │ ├── list.test.ts
│ │ │ │ │ ├── list.ts
│ │ │ │ │ ├── uninstall.test.ts
│ │ │ │ │ └── uninstall.ts
│ │ │ │ ├── skills.test.tsx
│ │ │ │ ├── skills.tsx
│ │ │ │ ├── utils.test.ts
│ │ │ │ └── utils.ts
│ │ │ ├── config/
│ │ │ │ ├── auth.test.ts
│ │ │ │ ├── auth.ts
│ │ │ │ ├── config.integration.test.ts
│ │ │ │ ├── config.test.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── extension-manager-agents.test.ts
│ │ │ │ ├── extension-manager-hydration.test.ts
│ │ │ │ ├── extension-manager-permissions.test.ts
│ │ │ │ ├── extension-manager-scope.test.ts
│ │ │ │ ├── extension-manager-skills.test.ts
│ │ │ │ ├── extension-manager-themes.spec.ts
│ │ │ │ ├── extension-manager.test.ts
│ │ │ │ ├── extension-manager.ts
│ │ │ │ ├── extension.test.ts
│ │ │ │ ├── extension.ts
│ │ │ │ ├── extensionRegistryClient.test.ts
│ │ │ │ ├── extensionRegistryClient.ts
│ │ │ │ ├── extensions/
│ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ └── consent.test.ts.snap
│ │ │ │ │ ├── consent.test.ts
│ │ │ │ │ ├── consent.ts
│ │ │ │ │ ├── extensionEnablement.test.ts
│ │ │ │ │ ├── extensionEnablement.ts
│ │ │ │ │ ├── extensionSettings.test.ts
│ │ │ │ │ ├── extensionSettings.ts
│ │ │ │ │ ├── extensionUpdates.test.ts
│ │ │ │ │ ├── github.test.ts
│ │ │ │ │ ├── github.ts
│ │ │ │ │ ├── github_fetch.test.ts
│ │ │ │ │ ├── github_fetch.ts
│ │ │ │ │ ├── storage.test.ts
│ │ │ │ │ ├── storage.ts
│ │ │ │ │ ├── update.test.ts
│ │ │ │ │ ├── update.ts
│ │ │ │ │ ├── variableSchema.ts
│ │ │ │ │ ├── variables.test.ts
│ │ │ │ │ └── variables.ts
│ │ │ │ ├── footerItems.test.ts
│ │ │ │ ├── footerItems.ts
│ │ │ │ ├── mcp/
│ │ │ │ │ ├── index.ts
│ │ │ │ │ ├── mcpServerEnablement.test.ts
│ │ │ │ │ └── mcpServerEnablement.ts
│ │ │ │ ├── policy-engine.integration.test.ts
│ │ │ │ ├── policy.test.ts
│ │ │ │ ├── policy.ts
│ │ │ │ ├── sandboxConfig.test.ts
│ │ │ │ ├── sandboxConfig.ts
│ │ │ │ ├── settingPaths.test.ts
│ │ │ │ ├── settingPaths.ts
│ │ │ │ ├── settings-validation.test.ts
│ │ │ │ ├── settings-validation.ts
│ │ │ │ ├── settings.test.ts
│ │ │ │ ├── settings.ts
│ │ │ │ ├── settingsSchema.test.ts
│ │ │ │ ├── settingsSchema.ts
│ │ │ │ ├── settings_repro.test.ts
│ │ │ │ ├── settings_validation_warning.test.ts
│ │ │ │ ├── trustedFolders.test.ts
│ │ │ │ ├── trustedFolders.ts
│ │ │ │ └── workspace-policy-cli.test.ts
│ │ │ ├── core/
│ │ │ │ ├── auth.test.ts
│ │ │ │ ├── auth.ts
│ │ │ │ ├── initializer.test.ts
│ │ │ │ ├── initializer.ts
│ │ │ │ ├── theme.test.ts
│ │ │ │ └── theme.ts
│ │ │ ├── deferred.test.ts
│ │ │ ├── deferred.ts
│ │ │ ├── gemini.test.tsx
│ │ │ ├── gemini.tsx
│ │ │ ├── gemini_cleanup.test.tsx
│ │ │ ├── integration-tests/
│ │ │ │ └── modelSteering.test.tsx
│ │ │ ├── interactiveCli.tsx
│ │ │ ├── nonInteractiveCli.test.ts
│ │ │ ├── nonInteractiveCli.ts
│ │ │ ├── nonInteractiveCliCommands.ts
│ │ │ ├── patches/
│ │ │ │ └── is-in-ci.ts
│ │ │ ├── services/
│ │ │ │ ├── BuiltinCommandLoader.test.ts
│ │ │ │ ├── BuiltinCommandLoader.ts
│ │ │ │ ├── CommandService.test.ts
│ │ │ │ ├── CommandService.ts
│ │ │ │ ├── FileCommandLoader.test.ts
│ │ │ │ ├── FileCommandLoader.ts
│ │ │ │ ├── McpPromptLoader.test.ts
│ │ │ │ ├── McpPromptLoader.ts
│ │ │ │ ├── SkillCommandLoader.test.ts
│ │ │ │ ├── SkillCommandLoader.ts
│ │ │ │ ├── SlashCommandConflictHandler.test.ts
│ │ │ │ ├── SlashCommandConflictHandler.ts
│ │ │ │ ├── SlashCommandResolver.test.ts
│ │ │ │ ├── SlashCommandResolver.ts
│ │ │ │ ├── prompt-processors/
│ │ │ │ │ ├── argumentProcessor.test.ts
│ │ │ │ │ ├── argumentProcessor.ts
│ │ │ │ │ ├── atFileProcessor.test.ts
│ │ │ │ │ ├── atFileProcessor.ts
│ │ │ │ │ ├── injectionParser.test.ts
│ │ │ │ │ ├── injectionParser.ts
│ │ │ │ │ ├── shellProcessor.test.ts
│ │ │ │ │ ├── shellProcessor.ts
│ │ │ │ │ └── types.ts
│ │ │ │ └── types.ts
│ │ │ ├── test-utils/
│ │ │ │ ├── AppRig.test.tsx
│ │ │ │ ├── AppRig.tsx
│ │ │ │ ├── MockShellExecutionService.ts
│ │ │ │ ├── async.ts
│ │ │ │ ├── createExtension.ts
│ │ │ │ ├── customMatchers.ts
│ │ │ │ ├── fixtures/
│ │ │ │ │ ├── simple.responses
│ │ │ │ │ └── steering.responses
│ │ │ │ ├── mockCommandContext.test.ts
│ │ │ │ ├── mockCommandContext.ts
│ │ │ │ ├── mockConfig.ts
│ │ │ │ ├── mockDebugLogger.ts
│ │ │ │ ├── persistentStateFake.ts
│ │ │ │ ├── render.test.tsx
│ │ │ │ ├── render.tsx
│ │ │ │ ├── settings.ts
│ │ │ │ └── svg.ts
│ │ │ ├── ui/
│ │ │ │ ├── App.test.tsx
│ │ │ │ ├── App.tsx
│ │ │ │ ├── AppContainer.test.tsx
│ │ │ │ ├── AppContainer.tsx
│ │ │ │ ├── IdeIntegrationNudge.test.tsx
│ │ │ │ ├── IdeIntegrationNudge.tsx
│ │ │ │ ├── __snapshots__/
│ │ │ │ │ └── App.test.tsx.snap
│ │ │ │ ├── auth/
│ │ │ │ │ ├── ApiAuthDialog.test.tsx
│ │ │ │ │ ├── ApiAuthDialog.tsx
│ │ │ │ │ ├── AuthDialog.test.tsx
│ │ │ │ │ ├── AuthDialog.tsx
│ │ │ │ │ ├── AuthInProgress.test.tsx
│ │ │ │ │ ├── AuthInProgress.tsx
│ │ │ │ │ ├── BannedAccountDialog.test.tsx
│ │ │ │ │ ├── BannedAccountDialog.tsx
│ │ │ │ │ ├── LoginWithGoogleRestartDialog.test.tsx
│ │ │ │ │ ├── LoginWithGoogleRestartDialog.tsx
│ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ ├── ApiAuthDialog.test.tsx.snap
│ │ │ │ │ │ ├── AuthDialog.test.tsx.snap
│ │ │ │ │ │ ├── BannedAccountDialog.test.tsx.snap
│ │ │ │ │ │ └── LoginWithGoogleRestartDialog.test.tsx.snap
│ │ │ │ │ ├── useAuth.test.tsx
│ │ │ │ │ └── useAuth.ts
│ │ │ │ ├── colors.ts
│ │ │ │ ├── commands/
│ │ │ │ │ ├── aboutCommand.test.ts
│ │ │ │ │ ├── aboutCommand.ts
│ │ │ │ │ ├── agentsCommand.test.ts
│ │ │ │ │ ├── agentsCommand.ts
│ │ │ │ │ ├── authCommand.test.ts
│ │ │ │ │ ├── authCommand.ts
│ │ │ │ │ ├── bugCommand.test.ts
│ │ │ │ │ ├── bugCommand.ts
│ │ │ │ │ ├── chatCommand.test.ts
│ │ │ │ │ ├── chatCommand.ts
│ │ │ │ │ ├── clearCommand.test.ts
│ │ │ │ │ ├── clearCommand.ts
│ │ │ │ │ ├── commandsCommand.test.ts
│ │ │ │ │ ├── commandsCommand.ts
│ │ │ │ │ ├── compressCommand.test.ts
│ │ │ │ │ ├── compressCommand.ts
│ │ │ │ │ ├── copyCommand.test.ts
│ │ │ │ │ ├── copyCommand.ts
│ │ │ │ │ ├── corgiCommand.test.ts
│ │ │ │ │ ├── corgiCommand.ts
│ │ │ │ │ ├── directoryCommand.test.tsx
│ │ │ │ │ ├── directoryCommand.tsx
│ │ │ │ │ ├── docsCommand.test.ts
│ │ │ │ │ ├── docsCommand.ts
│ │ │ │ │ ├── editorCommand.test.ts
│ │ │ │ │ ├── editorCommand.ts
│ │ │ │ │ ├── extensionsCommand.test.ts
│ │ │ │ │ ├── extensionsCommand.ts
│ │ │ │ │ ├── footerCommand.tsx
│ │ │ │ │ ├── helpCommand.test.ts
│ │ │ │ │ ├── helpCommand.ts
│ │ │ │ │ ├── hooksCommand.test.ts
│ │ │ │ │ ├── hooksCommand.ts
│ │ │ │ │ ├── ideCommand.test.ts
│ │ │ │ │ ├── ideCommand.ts
│ │ │ │ │ ├── initCommand.test.ts
│ │ │ │ │ ├── initCommand.ts
│ │ │ │ │ ├── mcpCommand.test.ts
│ │ │ │ │ ├── mcpCommand.ts
│ │ │ │ │ ├── memoryCommand.test.ts
│ │ │ │ │ ├── memoryCommand.ts
│ │ │ │ │ ├── modelCommand.test.ts
│ │ │ │ │ ├── modelCommand.ts
│ │ │ │ │ ├── oncallCommand.tsx
│ │ │ │ │ ├── permissionsCommand.test.ts
│ │ │ │ │ ├── permissionsCommand.ts
│ │ │ │ │ ├── planCommand.test.ts
│ │ │ │ │ ├── planCommand.ts
│ │ │ │ │ ├── policiesCommand.test.ts
│ │ │ │ │ ├── policiesCommand.ts
│ │ │ │ │ ├── privacyCommand.test.ts
│ │ │ │ │ ├── privacyCommand.ts
│ │ │ │ │ ├── profileCommand.ts
│ │ │ │ │ ├── quitCommand.test.ts
│ │ │ │ │ ├── quitCommand.ts
│ │ │ │ │ ├── restoreCommand.test.ts
│ │ │ │ │ ├── restoreCommand.ts
│ │ │ │ │ ├── resumeCommand.test.ts
│ │ │ │ │ ├── resumeCommand.ts
│ │ │ │ │ ├── rewindCommand.test.tsx
│ │ │ │ │ ├── rewindCommand.tsx
│ │ │ │ │ ├── settingsCommand.test.ts
│ │ │ │ │ ├── settingsCommand.ts
│ │ │ │ │ ├── setupGithubCommand.test.ts
│ │ │ │ │ ├── setupGithubCommand.ts
│ │ │ │ │ ├── shellsCommand.test.ts
│ │ │ │ │ ├── shellsCommand.ts
│ │ │ │ │ ├── shortcutsCommand.ts
│ │ │ │ │ ├── skillsCommand.test.ts
│ │ │ │ │ ├── skillsCommand.ts
│ │ │ │ │ ├── statsCommand.test.ts
│ │ │ │ │ ├── statsCommand.ts
│ │ │ │ │ ├── terminalSetupCommand.test.ts
│ │ │ │ │ ├── terminalSetupCommand.ts
│ │ │ │ │ ├── themeCommand.test.ts
│ │ │ │ │ ├── themeCommand.ts
│ │ │ │ │ ├── toolsCommand.test.ts
│ │ │ │ │ ├── toolsCommand.ts
│ │ │ │ │ ├── types.ts
│ │ │ │ │ ├── upgradeCommand.test.ts
│ │ │ │ │ ├── upgradeCommand.ts
│ │ │ │ │ └── vimCommand.ts
│ │ │ │ ├── components/
│ │ │ │ │ ├── AboutBox.test.tsx
│ │ │ │ │ ├── AboutBox.tsx
│ │ │ │ │ ├── AdminSettingsChangedDialog.test.tsx
│ │ │ │ │ ├── AdminSettingsChangedDialog.tsx
│ │ │ │ │ ├── AgentConfigDialog.test.tsx
│ │ │ │ │ ├── AgentConfigDialog.tsx
│ │ │ │ │ ├── AlternateBufferQuittingDisplay.test.tsx
│ │ │ │ │ ├── AlternateBufferQuittingDisplay.tsx
│ │ │ │ │ ├── AnsiOutput.test.tsx
│ │ │ │ │ ├── AnsiOutput.tsx
│ │ │ │ │ ├── AppHeader.test.tsx
│ │ │ │ │ ├── AppHeader.tsx
│ │ │ │ │ ├── AppHeaderIcon.test.tsx
│ │ │ │ │ ├── ApprovalModeIndicator.test.tsx
│ │ │ │ │ ├── ApprovalModeIndicator.tsx
│ │ │ │ │ ├── AsciiArt.ts
│ │ │ │ │ ├── AskUserDialog.test.tsx
│ │ │ │ │ ├── AskUserDialog.tsx
│ │ │ │ │ ├── BackgroundShellDisplay.test.tsx
│ │ │ │ │ ├── BackgroundShellDisplay.tsx
│ │ │ │ │ ├── Banner.test.tsx
│ │ │ │ │ ├── Banner.tsx
│ │ │ │ │ ├── BubblingRegression.test.tsx
│ │ │ │ │ ├── Checklist.test.tsx
│ │ │ │ │ ├── Checklist.tsx
│ │ │ │ │ ├── ChecklistItem.test.tsx
│ │ │ │ │ ├── ChecklistItem.tsx
│ │ │ │ │ ├── CliSpinner.test.tsx
│ │ │ │ │ ├── CliSpinner.tsx
│ │ │ │ │ ├── ColorsDisplay.test.tsx
│ │ │ │ │ ├── ColorsDisplay.tsx
│ │ │ │ │ ├── Composer.test.tsx
│ │ │ │ │ ├── Composer.tsx
│ │ │ │ │ ├── ConfigExtensionDialog.tsx
│ │ │ │ │ ├── ConfigInitDisplay.test.tsx
│ │ │ │ │ ├── ConfigInitDisplay.tsx
│ │ │ │ │ ├── ConsentPrompt.test.tsx
│ │ │ │ │ ├── ConsentPrompt.tsx
│ │ │ │ │ ├── ConsoleSummaryDisplay.test.tsx
│ │ │ │ │ ├── ConsoleSummaryDisplay.tsx
│ │ │ │ │ ├── ContextSummaryDisplay.test.tsx
│ │ │ │ │ ├── ContextSummaryDisplay.tsx
│ │ │ │ │ ├── ContextUsageDisplay.test.tsx
│ │ │ │ │ ├── ContextUsageDisplay.tsx
│ │ │ │ │ ├── CopyModeWarning.test.tsx
│ │ │ │ │ ├── CopyModeWarning.tsx
│ │ │ │ │ ├── DebugProfiler.test.tsx
│ │ │ │ │ ├── DebugProfiler.tsx
│ │ │ │ │ ├── DetailedMessagesDisplay.test.tsx
│ │ │ │ │ ├── DetailedMessagesDisplay.tsx
│ │ │ │ │ ├── DialogManager.test.tsx
│ │ │ │ │ ├── DialogManager.tsx
│ │ │ │ │ ├── EditorSettingsDialog.test.tsx
│ │ │ │ │ ├── EditorSettingsDialog.tsx
│ │ │ │ │ ├── EmptyWalletDialog.test.tsx
│ │ │ │ │ ├── EmptyWalletDialog.tsx
│ │ │ │ │ ├── ExitPlanModeDialog.test.tsx
│ │ │ │ │ ├── ExitPlanModeDialog.tsx
│ │ │ │ │ ├── ExitWarning.test.tsx
│ │ │ │ │ ├── ExitWarning.tsx
│ │ │ │ │ ├── FolderTrustDialog.test.tsx
│ │ │ │ │ ├── FolderTrustDialog.tsx
│ │ │ │ │ ├── Footer.test.tsx
│ │ │ │ │ ├── Footer.tsx
│ │ │ │ │ ├── FooterConfigDialog.test.tsx
│ │ │ │ │ ├── FooterConfigDialog.tsx
│ │ │ │ │ ├── GeminiRespondingSpinner.test.tsx
│ │ │ │ │ ├── GeminiRespondingSpinner.tsx
│ │ │ │ │ ├── GeminiSpinner.tsx
│ │ │ │ │ ├── GradientRegression.test.tsx
│ │ │ │ │ ├── Header.test.tsx
│ │ │ │ │ ├── Header.tsx
│ │ │ │ │ ├── Help.test.tsx
│ │ │ │ │ ├── Help.tsx
│ │ │ │ │ ├── HistoryItemDisplay.test.tsx
│ │ │ │ │ ├── HistoryItemDisplay.tsx
│ │ │ │ │ ├── HookStatusDisplay.test.tsx
│ │ │ │ │ ├── HookStatusDisplay.tsx
│ │ │ │ │ ├── HooksDialog.test.tsx
│ │ │ │ │ ├── HooksDialog.tsx
│ │ │ │ │ ├── IdeTrustChangeDialog.test.tsx
│ │ │ │ │ ├── IdeTrustChangeDialog.tsx
│ │ │ │ │ ├── InputPrompt.test.tsx
│ │ │ │ │ ├── InputPrompt.tsx
│ │ │ │ │ ├── LoadingIndicator.test.tsx
│ │ │ │ │ ├── LoadingIndicator.tsx
│ │ │ │ │ ├── LogoutConfirmationDialog.test.tsx
│ │ │ │ │ ├── LogoutConfirmationDialog.tsx
│ │ │ │ │ ├── LoopDetectionConfirmation.test.tsx
│ │ │ │ │ ├── LoopDetectionConfirmation.tsx
│ │ │ │ │ ├── MainContent.test.tsx
│ │ │ │ │ ├── MainContent.tsx
│ │ │ │ │ ├── MemoryUsageDisplay.test.tsx
│ │ │ │ │ ├── MemoryUsageDisplay.tsx
│ │ │ │ │ ├── ModelDialog.test.tsx
│ │ │ │ │ ├── ModelDialog.tsx
│ │ │ │ │ ├── ModelStatsDisplay.test.tsx
│ │ │ │ │ ├── ModelStatsDisplay.tsx
│ │ │ │ │ ├── MultiFolderTrustDialog.test.tsx
│ │ │ │ │ ├── MultiFolderTrustDialog.tsx
│ │ │ │ │ ├── NewAgentsNotification.test.tsx
│ │ │ │ │ ├── NewAgentsNotification.tsx
│ │ │ │ │ ├── Notifications.test.tsx
│ │ │ │ │ ├── Notifications.tsx
│ │ │ │ │ ├── OverageMenuDialog.test.tsx
│ │ │ │ │ ├── OverageMenuDialog.tsx
│ │ │ │ │ ├── PermissionsModifyTrustDialog.test.tsx
│ │ │ │ │ ├── PermissionsModifyTrustDialog.tsx
│ │ │ │ │ ├── PolicyUpdateDialog.test.tsx
│ │ │ │ │ ├── PolicyUpdateDialog.tsx
│ │ │ │ │ ├── ProQuotaDialog.test.tsx
│ │ │ │ │ ├── ProQuotaDialog.tsx
│ │ │ │ │ ├── QueuedMessageDisplay.test.tsx
│ │ │ │ │ ├── QueuedMessageDisplay.tsx
│ │ │ │ │ ├── QuittingDisplay.test.tsx
│ │ │ │ │ ├── QuittingDisplay.tsx
│ │ │ │ │ ├── QuotaDisplay.test.tsx
│ │ │ │ │ ├── QuotaDisplay.tsx
│ │ │ │ │ ├── QuotaStatsInfo.tsx
│ │ │ │ │ ├── RawMarkdownIndicator.test.tsx
│ │ │ │ │ ├── RawMarkdownIndicator.tsx
│ │ │ │ │ ├── RewindConfirmation.test.tsx
│ │ │ │ │ ├── RewindConfirmation.tsx
│ │ │ │ │ ├── RewindViewer.test.tsx
│ │ │ │ │ ├── RewindViewer.tsx
│ │ │ │ │ ├── SessionBrowser/
│ │ │ │ │ │ ├── SessionBrowserEmpty.tsx
│ │ │ │ │ │ ├── SessionBrowserError.tsx
│ │ │ │ │ │ ├── SessionBrowserLoading.tsx
│ │ │ │ │ │ ├── SessionBrowserNav.tsx
│ │ │ │ │ │ ├── SessionBrowserSearchNav.test.tsx
│ │ │ │ │ │ ├── SessionBrowserStates.test.tsx
│ │ │ │ │ │ ├── SessionListHeader.tsx
│ │ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ │ ├── SessionBrowserSearchNav.test.tsx.snap
│ │ │ │ │ │ │ └── SessionBrowserStates.test.tsx.snap
│ │ │ │ │ │ ├── utils.test.ts
│ │ │ │ │ │ └── utils.ts
│ │ │ │ │ ├── SessionBrowser.test.tsx
│ │ │ │ │ ├── SessionBrowser.tsx
│ │ │ │ │ ├── SessionSummaryDisplay.test.tsx
│ │ │ │ │ ├── SessionSummaryDisplay.tsx
│ │ │ │ │ ├── SettingsDialog.test.tsx
│ │ │ │ │ ├── SettingsDialog.tsx
│ │ │ │ │ ├── ShellInputPrompt.test.tsx
│ │ │ │ │ ├── ShellInputPrompt.tsx
│ │ │ │ │ ├── ShellModeIndicator.test.tsx
│ │ │ │ │ ├── ShellModeIndicator.tsx
│ │ │ │ │ ├── ShortcutsHelp.test.tsx
│ │ │ │ │ ├── ShortcutsHelp.tsx
│ │ │ │ │ ├── ShortcutsHint.tsx
│ │ │ │ │ ├── ShowMoreLines.test.tsx
│ │ │ │ │ ├── ShowMoreLines.tsx
│ │ │ │ │ ├── ShowMoreLinesLayout.test.tsx
│ │ │ │ │ ├── StatsDisplay.test.tsx
│ │ │ │ │ ├── StatsDisplay.tsx
│ │ │ │ │ ├── StatusDisplay.test.tsx
│ │ │ │ │ ├── StatusDisplay.tsx
│ │ │ │ │ ├── StickyHeader.test.tsx
│ │ │ │ │ ├── StickyHeader.tsx
│ │ │ │ │ ├── SuggestionsDisplay.test.tsx
│ │ │ │ │ ├── SuggestionsDisplay.tsx
│ │ │ │ │ ├── Table.test.tsx
│ │ │ │ │ ├── Table.tsx
│ │ │ │ │ ├── ThemeDialog.test.tsx
│ │ │ │ │ ├── ThemeDialog.tsx
│ │ │ │ │ ├── ThemedGradient.test.tsx
│ │ │ │ │ ├── ThemedGradient.tsx
│ │ │ │ │ ├── Tips.test.tsx
│ │ │ │ │ ├── Tips.tsx
│ │ │ │ │ ├── ToastDisplay.test.tsx
│ │ │ │ │ ├── ToastDisplay.tsx
│ │ │ │ │ ├── ToolConfirmationQueue.test.tsx
│ │ │ │ │ ├── ToolConfirmationQueue.tsx
│ │ │ │ │ ├── ToolStatsDisplay.test.tsx
│ │ │ │ │ ├── ToolStatsDisplay.tsx
│ │ │ │ │ ├── UpdateNotification.test.tsx
│ │ │ │ │ ├── UpdateNotification.tsx
│ │ │ │ │ ├── UserIdentity.test.tsx
│ │ │ │ │ ├── UserIdentity.tsx
│ │ │ │ │ ├── ValidationDialog.test.tsx
│ │ │ │ │ ├── ValidationDialog.tsx
│ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ ├── AdminSettingsChangedDialog.test.tsx.snap
│ │ │ │ │ │ ├── AlternateBufferQuittingDisplay.test.tsx.snap
│ │ │ │ │ │ ├── AppHeader.test.tsx.snap
│ │ │ │ │ │ ├── AppHeaderIcon.test.tsx.snap
│ │ │ │ │ │ ├── ApprovalModeIndicator.test.tsx.snap
│ │ │ │ │ │ ├── AskUserDialog.test.tsx.snap
│ │ │ │ │ │ ├── BackgroundShellDisplay.test.tsx.snap
│ │ │ │ │ │ ├── Banner.test.tsx.snap
│ │ │ │ │ │ ├── Checklist.test.tsx.snap
│ │ │ │ │ │ ├── ChecklistItem.test.tsx.snap
│ │ │ │ │ │ ├── Composer.test.tsx.snap
│ │ │ │ │ │ ├── ConfigInitDisplay.test.tsx.snap
│ │ │ │ │ │ ├── ContextSummaryDisplay.test.tsx.snap
│ │ │ │ │ │ ├── DetailedMessagesDisplay.test.tsx.snap
│ │ │ │ │ │ ├── EditorSettingsDialog.test.tsx.snap
│ │ │ │ │ │ ├── EmptyWalletDialog.test.tsx.snap
│ │ │ │ │ │ ├── ExitPlanModeDialog.test.tsx.snap
│ │ │ │ │ │ ├── Footer.test.tsx.snap
│ │ │ │ │ │ ├── FooterConfigDialog.test.tsx.snap
│ │ │ │ │ │ ├── HistoryItemDisplay.test.tsx.snap
│ │ │ │ │ │ ├── HookStatusDisplay.test.tsx.snap
│ │ │ │ │ │ ├── HooksDialog.test.tsx.snap
│ │ │ │ │ │ ├── IDEContextDetailDisplay.test.tsx.snap
│ │ │ │ │ │ ├── InputPrompt.test.tsx.snap
│ │ │ │ │ │ ├── LoadingIndicator.test.tsx.snap
│ │ │ │ │ │ ├── LoopDetectionConfirmation.test.tsx.snap
│ │ │ │ │ │ ├── MainContent.test.tsx.snap
│ │ │ │ │ │ ├── ModelStatsDisplay.test.tsx.snap
│ │ │ │ │ │ ├── NewAgentsNotification.test.tsx.snap
│ │ │ │ │ │ ├── Notifications.test.tsx.snap
│ │ │ │ │ │ ├── OverageMenuDialog.test.tsx.snap
│ │ │ │ │ │ ├── PolicyUpdateDialog.test.tsx.snap
│ │ │ │ │ │ ├── PrepareLabel.test.tsx.snap
│ │ │ │ │ │ ├── QuotaDisplay.test.tsx.snap
│ │ │ │ │ │ ├── RewindConfirmation.test.tsx.snap
│ │ │ │ │ │ ├── RewindViewer.test.tsx.snap
│ │ │ │ │ │ ├── SessionBrowser.test.tsx.snap
│ │ │ │ │ │ ├── SessionSummaryDisplay.test.tsx.snap
│ │ │ │ │ │ ├── SettingsDialog.test.tsx.snap
│ │ │ │ │ │ ├── ShortcutsHelp.test.tsx.snap
│ │ │ │ │ │ ├── StatsDisplay.test.tsx.snap
│ │ │ │ │ │ ├── StatusDisplay.test.tsx.snap
│ │ │ │ │ │ ├── SuggestionsDisplay.test.tsx.snap
│ │ │ │ │ │ ├── Table.test.tsx.snap
│ │ │ │ │ │ ├── ThemeDialog.test.tsx.snap
│ │ │ │ │ │ ├── Tips.test.tsx.snap
│ │ │ │ │ │ ├── ToastDisplay.test.tsx.snap
│ │ │ │ │ │ ├── ToolConfirmationQueue.test.tsx.snap
│ │ │ │ │ │ └── ToolStatsDisplay.test.tsx.snap
│ │ │ │ │ ├── messages/
│ │ │ │ │ │ ├── CompressionMessage.test.tsx
│ │ │ │ │ │ ├── CompressionMessage.tsx
│ │ │ │ │ │ ├── DiffRenderer.test.tsx
│ │ │ │ │ │ ├── DiffRenderer.tsx
│ │ │ │ │ │ ├── ErrorMessage.test.tsx
│ │ │ │ │ │ ├── ErrorMessage.tsx
│ │ │ │ │ │ ├── GeminiMessage.test.tsx
│ │ │ │ │ │ ├── GeminiMessage.tsx
│ │ │ │ │ │ ├── GeminiMessageContent.tsx
│ │ │ │ │ │ ├── HintMessage.tsx
│ │ │ │ │ │ ├── InfoMessage.test.tsx
│ │ │ │ │ │ ├── InfoMessage.tsx
│ │ │ │ │ │ ├── ModelMessage.tsx
│ │ │ │ │ │ ├── RedirectionConfirmation.test.tsx
│ │ │ │ │ │ ├── ShellToolMessage.test.tsx
│ │ │ │ │ │ ├── ShellToolMessage.tsx
│ │ │ │ │ │ ├── SubagentGroupDisplay.test.tsx
│ │ │ │ │ │ ├── SubagentGroupDisplay.tsx
│ │ │ │ │ │ ├── SubagentProgressDisplay.test.tsx
│ │ │ │ │ │ ├── SubagentProgressDisplay.tsx
│ │ │ │ │ │ ├── ThinkingMessage.test.tsx
│ │ │ │ │ │ ├── ThinkingMessage.tsx
│ │ │ │ │ │ ├── Todo.test.tsx
│ │ │ │ │ │ ├── Todo.tsx
│ │ │ │ │ │ ├── ToolConfirmationMessage.test.tsx
│ │ │ │ │ │ ├── ToolConfirmationMessage.tsx
│ │ │ │ │ │ ├── ToolGroupMessage.test.tsx
│ │ │ │ │ │ ├── ToolGroupMessage.tsx
│ │ │ │ │ │ ├── ToolMessage.test.tsx
│ │ │ │ │ │ ├── ToolMessage.tsx
│ │ │ │ │ │ ├── ToolMessageFocusHint.test.tsx
│ │ │ │ │ │ ├── ToolMessageRawMarkdown.test.tsx
│ │ │ │ │ │ ├── ToolOverflowConsistencyChecks.test.tsx
│ │ │ │ │ │ ├── ToolResultDisplay.test.tsx
│ │ │ │ │ │ ├── ToolResultDisplay.tsx
│ │ │ │ │ │ ├── ToolResultDisplayOverflow.test.tsx
│ │ │ │ │ │ ├── ToolShared.test.tsx
│ │ │ │ │ │ ├── ToolShared.tsx
│ │ │ │ │ │ ├── ToolStickyHeaderRegression.test.tsx
│ │ │ │ │ │ ├── UserMessage.test.tsx
│ │ │ │ │ │ ├── UserMessage.tsx
│ │ │ │ │ │ ├── UserShellMessage.tsx
│ │ │ │ │ │ ├── WarningMessage.test.tsx
│ │ │ │ │ │ ├── WarningMessage.tsx
│ │ │ │ │ │ └── __snapshots__/
│ │ │ │ │ │ ├── DiffRenderer.test.tsx.snap
│ │ │ │ │ │ ├── ErrorMessage.test.tsx.snap
│ │ │ │ │ │ ├── GeminiMessage.test.tsx.snap
│ │ │ │ │ │ ├── InfoMessage.test.tsx.snap
│ │ │ │ │ │ ├── RedirectionConfirmation.test.tsx.snap
│ │ │ │ │ │ ├── ShellToolMessage.test.tsx.snap
│ │ │ │ │ │ ├── SubagentGroupDisplay.test.tsx.snap
│ │ │ │ │ │ ├── SubagentProgressDisplay.test.tsx.snap
│ │ │ │ │ │ ├── ThinkingMessage.test.tsx.snap
│ │ │ │ │ │ ├── Todo.test.tsx.snap
│ │ │ │ │ │ ├── ToolConfirmationMessage.test.tsx.snap
│ │ │ │ │ │ ├── ToolConfirmationMessageOverflow.test.tsx.snap
│ │ │ │ │ │ ├── ToolGroupMessage.test.tsx.snap
│ │ │ │ │ │ ├── ToolMessage.test.tsx.snap
│ │ │ │ │ │ ├── ToolMessageFocusHint.test.tsx.snap
│ │ │ │ │ │ ├── ToolMessageRawMarkdown.test.tsx.snap
│ │ │ │ │ │ ├── ToolResultDisplay.test.tsx.snap
│ │ │ │ │ │ ├── ToolShared.test.tsx.snap
│ │ │ │ │ │ ├── ToolStickyHeaderRegression.test.tsx.snap
│ │ │ │ │ │ ├── UserMessage.test.tsx.snap
│ │ │ │ │ │ └── WarningMessage.test.tsx.snap
│ │ │ │ │ ├── shared/
│ │ │ │ │ │ ├── BaseSelectionList.test.tsx
│ │ │ │ │ │ ├── BaseSelectionList.tsx
│ │ │ │ │ │ ├── BaseSettingsDialog.test.tsx
│ │ │ │ │ │ ├── BaseSettingsDialog.tsx
│ │ │ │ │ │ ├── DescriptiveRadioButtonSelect.test.tsx
│ │ │ │ │ │ ├── DescriptiveRadioButtonSelect.tsx
│ │ │ │ │ │ ├── DialogFooter.tsx
│ │ │ │ │ │ ├── EnumSelector.test.tsx
│ │ │ │ │ │ ├── EnumSelector.tsx
│ │ │ │ │ │ ├── ExpandableText.test.tsx
│ │ │ │ │ │ ├── ExpandableText.tsx
│ │ │ │ │ │ ├── HalfLinePaddedBox.test.tsx
│ │ │ │ │ │ ├── HalfLinePaddedBox.tsx
│ │ │ │ │ │ ├── HorizontalLine.tsx
│ │ │ │ │ │ ├── MaxSizedBox.test.tsx
│ │ │ │ │ │ ├── MaxSizedBox.tsx
│ │ │ │ │ │ ├── RadioButtonSelect.test.tsx
│ │ │ │ │ │ ├── RadioButtonSelect.tsx
│ │ │ │ │ │ ├── ScopeSelector.tsx
│ │ │ │ │ │ ├── Scrollable.test.tsx
│ │ │ │ │ │ ├── Scrollable.tsx
│ │ │ │ │ │ ├── ScrollableList.test.tsx
│ │ │ │ │ │ ├── ScrollableList.tsx
│ │ │ │ │ │ ├── SearchableList.test.tsx
│ │ │ │ │ │ ├── SearchableList.tsx
│ │ │ │ │ │ ├── SectionHeader.test.tsx
│ │ │ │ │ │ ├── SectionHeader.tsx
│ │ │ │ │ │ ├── SlicingMaxSizedBox.test.tsx
│ │ │ │ │ │ ├── SlicingMaxSizedBox.tsx
│ │ │ │ │ │ ├── TabHeader.test.tsx
│ │ │ │ │ │ ├── TabHeader.tsx
│ │ │ │ │ │ ├── TextInput.test.tsx
│ │ │ │ │ │ ├── TextInput.tsx
│ │ │ │ │ │ ├── VirtualizedList.test.tsx
│ │ │ │ │ │ ├── VirtualizedList.tsx
│ │ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ │ ├── BaseSelectionList.test.tsx.snap
│ │ │ │ │ │ │ ├── DescriptiveRadioButtonSelect.test.tsx.snap
│ │ │ │ │ │ │ ├── EnumSelector.test.tsx.snap
│ │ │ │ │ │ │ ├── ExpandablePrompt.test.tsx.snap
│ │ │ │ │ │ │ ├── ExpandableText.test.tsx.snap
│ │ │ │ │ │ │ ├── HalfLinePaddedBox.test.tsx.snap
│ │ │ │ │ │ │ ├── MaxSizedBox.test.tsx.snap
│ │ │ │ │ │ │ ├── Scrollable.test.tsx.snap
│ │ │ │ │ │ │ ├── SearchableList.test.tsx.snap
│ │ │ │ │ │ │ ├── SectionHeader.test.tsx.snap
│ │ │ │ │ │ │ ├── TabHeader.test.tsx.snap
│ │ │ │ │ │ │ └── VirtualizedList.test.tsx.snap
│ │ │ │ │ │ ├── performance.test.ts
│ │ │ │ │ │ ├── text-buffer.test.ts
│ │ │ │ │ │ ├── text-buffer.ts
│ │ │ │ │ │ ├── vim-buffer-actions.test.ts
│ │ │ │ │ │ └── vim-buffer-actions.ts
│ │ │ │ │ ├── triage/
│ │ │ │ │ │ ├── TriageDuplicates.tsx
│ │ │ │ │ │ └── TriageIssues.tsx
│ │ │ │ │ └── views/
│ │ │ │ │ ├── AgentsStatus.tsx
│ │ │ │ │ ├── ChatList.test.tsx
│ │ │ │ │ ├── ChatList.tsx
│ │ │ │ │ ├── ExtensionDetails.test.tsx
│ │ │ │ │ ├── ExtensionDetails.tsx
│ │ │ │ │ ├── ExtensionRegistryView.test.tsx
│ │ │ │ │ ├── ExtensionRegistryView.tsx
│ │ │ │ │ ├── ExtensionsList.test.tsx
│ │ │ │ │ ├── ExtensionsList.tsx
│ │ │ │ │ ├── McpStatus.test.tsx
│ │ │ │ │ ├── McpStatus.tsx
│ │ │ │ │ ├── SkillsList.test.tsx
│ │ │ │ │ ├── SkillsList.tsx
│ │ │ │ │ ├── ToolsList.test.tsx
│ │ │ │ │ ├── ToolsList.tsx
│ │ │ │ │ └── __snapshots__/
│ │ │ │ │ ├── ChatList.test.tsx.snap
│ │ │ │ │ ├── McpStatus.test.tsx.snap
│ │ │ │ │ └── ToolsList.test.tsx.snap
│ │ │ │ ├── constants/
│ │ │ │ │ ├── tips.ts
│ │ │ │ │ └── wittyPhrases.ts
│ │ │ │ ├── constants.ts
│ │ │ │ ├── contexts/
│ │ │ │ │ ├── AppContext.tsx
│ │ │ │ │ ├── AskUserActionsContext.tsx
│ │ │ │ │ ├── ConfigContext.tsx
│ │ │ │ │ ├── KeypressContext.test.tsx
│ │ │ │ │ ├── KeypressContext.tsx
│ │ │ │ │ ├── MouseContext.test.tsx
│ │ │ │ │ ├── MouseContext.tsx
│ │ │ │ │ ├── OverflowContext.tsx
│ │ │ │ │ ├── ScrollProvider.drag.test.tsx
│ │ │ │ │ ├── ScrollProvider.test.tsx
│ │ │ │ │ ├── ScrollProvider.tsx
│ │ │ │ │ ├── SessionContext.test.tsx
│ │ │ │ │ ├── SessionContext.tsx
│ │ │ │ │ ├── SettingsContext.test.tsx
│ │ │ │ │ ├── SettingsContext.tsx
│ │ │ │ │ ├── ShellFocusContext.tsx
│ │ │ │ │ ├── StreamingContext.tsx
│ │ │ │ │ ├── TerminalContext.test.tsx
│ │ │ │ │ ├── TerminalContext.tsx
│ │ │ │ │ ├── ToolActionsContext.test.tsx
│ │ │ │ │ ├── ToolActionsContext.tsx
│ │ │ │ │ ├── UIActionsContext.tsx
│ │ │ │ │ ├── UIStateContext.tsx
│ │ │ │ │ └── VimModeContext.tsx
│ │ │ │ ├── debug.ts
│ │ │ │ ├── editors/
│ │ │ │ │ └── editorSettingsManager.ts
│ │ │ │ ├── hooks/
│ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ └── usePhraseCycler.test.tsx.snap
│ │ │ │ │ ├── atCommandProcessor.test.ts
│ │ │ │ │ ├── atCommandProcessor.ts
│ │ │ │ │ ├── atCommandProcessor_agents.test.ts
│ │ │ │ │ ├── creditsFlowHandler.test.ts
│ │ │ │ │ ├── creditsFlowHandler.ts
│ │ │ │ │ ├── shell-completions/
│ │ │ │ │ │ ├── gitProvider.test.ts
│ │ │ │ │ │ ├── gitProvider.ts
│ │ │ │ │ │ ├── index.ts
│ │ │ │ │ │ ├── npmProvider.test.ts
│ │ │ │ │ │ ├── npmProvider.ts
│ │ │ │ │ │ └── types.ts
│ │ │ │ │ ├── shellCommandProcessor.test.tsx
│ │ │ │ │ ├── shellCommandProcessor.ts
│ │ │ │ │ ├── shellReducer.test.ts
│ │ │ │ │ ├── shellReducer.ts
│ │ │ │ │ ├── slashCommandProcessor.test.tsx
│ │ │ │ │ ├── slashCommandProcessor.ts
│ │ │ │ │ ├── toolMapping.test.ts
│ │ │ │ │ ├── toolMapping.ts
│ │ │ │ │ ├── useAlternateBuffer.test.ts
│ │ │ │ │ ├── useAlternateBuffer.ts
│ │ │ │ │ ├── useAnimatedScrollbar.test.tsx
│ │ │ │ │ ├── useAnimatedScrollbar.ts
│ │ │ │ │ ├── useApprovalModeIndicator.test.ts
│ │ │ │ │ ├── useApprovalModeIndicator.ts
│ │ │ │ │ ├── useAtCompletion.test.ts
│ │ │ │ │ ├── useAtCompletion.ts
│ │ │ │ │ ├── useAtCompletion_agents.test.ts
│ │ │ │ │ ├── useBackgroundShellManager.test.tsx
│ │ │ │ │ ├── useBackgroundShellManager.ts
│ │ │ │ │ ├── useBanner.test.ts
│ │ │ │ │ ├── useBanner.ts
│ │ │ │ │ ├── useBatchedScroll.test.ts
│ │ │ │ │ ├── useBatchedScroll.ts
│ │ │ │ │ ├── useCommandCompletion.test.tsx
│ │ │ │ │ ├── useCommandCompletion.tsx
│ │ │ │ │ ├── useCompletion.ts
│ │ │ │ │ ├── useConfirmingTool.ts
│ │ │ │ │ ├── useConsoleMessages.test.tsx
│ │ │ │ │ ├── useConsoleMessages.ts
│ │ │ │ │ ├── useEditorSettings.test.tsx
│ │ │ │ │ ├── useEditorSettings.ts
│ │ │ │ │ ├── useExtensionRegistry.ts
│ │ │ │ │ ├── useExtensionUpdates.test.tsx
│ │ │ │ │ ├── useExtensionUpdates.ts
│ │ │ │ │ ├── useFlickerDetector.test.ts
│ │ │ │ │ ├── useFlickerDetector.ts
│ │ │ │ │ ├── useFocus.test.tsx
│ │ │ │ │ ├── useFocus.ts
│ │ │ │ │ ├── useFolderTrust.test.ts
│ │ │ │ │ ├── useFolderTrust.ts
│ │ │ │ │ ├── useGeminiStream.test.tsx
│ │ │ │ │ ├── useGeminiStream.ts
│ │ │ │ │ ├── useGitBranchName.test.tsx
│ │ │ │ │ ├── useGitBranchName.ts
│ │ │ │ │ ├── useHistoryManager.test.ts
│ │ │ │ │ ├── useHistoryManager.ts
│ │ │ │ │ ├── useHookDisplayState.test.ts
│ │ │ │ │ ├── useHookDisplayState.ts
│ │ │ │ │ ├── useIdeTrustListener.test.tsx
│ │ │ │ │ ├── useIdeTrustListener.ts
│ │ │ │ │ ├── useInactivityTimer.ts
│ │ │ │ │ ├── useIncludeDirsTrust.test.tsx
│ │ │ │ │ ├── useIncludeDirsTrust.tsx
│ │ │ │ │ ├── useInlineEditBuffer.test.ts
│ │ │ │ │ ├── useInlineEditBuffer.ts
│ │ │ │ │ ├── useInputHistory.test.ts
│ │ │ │ │ ├── useInputHistory.ts
│ │ │ │ │ ├── useInputHistoryStore.test.ts
│ │ │ │ │ ├── useInputHistoryStore.ts
│ │ │ │ │ ├── useKeyMatchers.tsx
│ │ │ │ │ ├── useKeypress.test.tsx
│ │ │ │ │ ├── useKeypress.ts
│ │ │ │ │ ├── useKittyKeyboardProtocol.ts
│ │ │ │ │ ├── useLoadingIndicator.test.tsx
│ │ │ │ │ ├── useLoadingIndicator.ts
│ │ │ │ │ ├── useLogger.test.tsx
│ │ │ │ │ ├── useLogger.ts
│ │ │ │ │ ├── useMcpStatus.test.tsx
│ │ │ │ │ ├── useMcpStatus.ts
│ │ │ │ │ ├── useMemoryMonitor.test.tsx
│ │ │ │ │ ├── useMemoryMonitor.ts
│ │ │ │ │ ├── useMessageQueue.test.tsx
│ │ │ │ │ ├── useMessageQueue.ts
│ │ │ │ │ ├── useModelCommand.test.tsx
│ │ │ │ │ ├── useModelCommand.ts
│ │ │ │ │ ├── useMouse.test.ts
│ │ │ │ │ ├── useMouse.ts
│ │ │ │ │ ├── useMouseClick.test.ts
│ │ │ │ │ ├── useMouseClick.ts
│ │ │ │ │ ├── usePermissionsModifyTrust.test.ts
│ │ │ │ │ ├── usePermissionsModifyTrust.ts
│ │ │ │ │ ├── usePhraseCycler.test.tsx
│ │ │ │ │ ├── usePhraseCycler.ts
│ │ │ │ │ ├── usePrivacySettings.test.tsx
│ │ │ │ │ ├── usePrivacySettings.ts
│ │ │ │ │ ├── usePromptCompletion.ts
│ │ │ │ │ ├── useQuotaAndFallback.test.ts
│ │ │ │ │ ├── useQuotaAndFallback.ts
│ │ │ │ │ ├── useRegistrySearch.ts
│ │ │ │ │ ├── useRepeatedKeyPress.ts
│ │ │ │ │ ├── useReverseSearchCompletion.test.tsx
│ │ │ │ │ ├── useReverseSearchCompletion.tsx
│ │ │ │ │ ├── useRewind.test.ts
│ │ │ │ │ ├── useRewind.ts
│ │ │ │ │ ├── useRunEventNotifications.ts
│ │ │ │ │ ├── useSearchBuffer.ts
│ │ │ │ │ ├── useSelectionList.test.tsx
│ │ │ │ │ ├── useSelectionList.ts
│ │ │ │ │ ├── useSessionBrowser.test.ts
│ │ │ │ │ ├── useSessionBrowser.ts
│ │ │ │ │ ├── useSessionResume.test.ts
│ │ │ │ │ ├── useSessionResume.ts
│ │ │ │ │ ├── useSettingsCommand.ts
│ │ │ │ │ ├── useSettingsNavigation.test.ts
│ │ │ │ │ ├── useSettingsNavigation.ts
│ │ │ │ │ ├── useShellCompletion.test.ts
│ │ │ │ │ ├── useShellCompletion.ts
│ │ │ │ │ ├── useShellHistory.test.ts
│ │ │ │ │ ├── useShellHistory.ts
│ │ │ │ │ ├── useShellInactivityStatus.test.ts
│ │ │ │ │ ├── useShellInactivityStatus.ts
│ │ │ │ │ ├── useSlashCompletion.test.ts
│ │ │ │ │ ├── useSlashCompletion.ts
│ │ │ │ │ ├── useSnowfall.test.tsx
│ │ │ │ │ ├── useSnowfall.ts
│ │ │ │ │ ├── useStateAndRef.ts
│ │ │ │ │ ├── useSuspend.test.ts
│ │ │ │ │ ├── useSuspend.ts
│ │ │ │ │ ├── useTabbedNavigation.test.ts
│ │ │ │ │ ├── useTabbedNavigation.ts
│ │ │ │ │ ├── useTerminalSize.ts
│ │ │ │ │ ├── useTerminalTheme.test.tsx
│ │ │ │ │ ├── useTerminalTheme.ts
│ │ │ │ │ ├── useThemeCommand.ts
│ │ │ │ │ ├── useTimedMessage.ts
│ │ │ │ │ ├── useTimer.test.tsx
│ │ │ │ │ ├── useTimer.ts
│ │ │ │ │ ├── useTips.test.ts
│ │ │ │ │ ├── useTips.ts
│ │ │ │ │ ├── useToolScheduler.test.ts
│ │ │ │ │ ├── useToolScheduler.ts
│ │ │ │ │ ├── useTurnActivityMonitor.test.ts
│ │ │ │ │ ├── useTurnActivityMonitor.ts
│ │ │ │ │ ├── useVisibilityToggle.ts
│ │ │ │ │ ├── vim-passthrough.test.tsx
│ │ │ │ │ ├── vim.test.tsx
│ │ │ │ │ └── vim.ts
│ │ │ │ ├── key/
│ │ │ │ │ ├── keyBindings.test.ts
│ │ │ │ │ ├── keyBindings.ts
│ │ │ │ │ ├── keyMatchers.test.ts
│ │ │ │ │ ├── keyMatchers.ts
│ │ │ │ │ ├── keyToAnsi.ts
│ │ │ │ │ ├── keybindingUtils.test.ts
│ │ │ │ │ └── keybindingUtils.ts
│ │ │ │ ├── layouts/
│ │ │ │ │ ├── DefaultAppLayout.test.tsx
│ │ │ │ │ ├── DefaultAppLayout.tsx
│ │ │ │ │ ├── ScreenReaderAppLayout.tsx
│ │ │ │ │ └── __snapshots__/
│ │ │ │ │ └── DefaultAppLayout.test.tsx.snap
│ │ │ │ ├── noninteractive/
│ │ │ │ │ └── nonInteractiveUi.ts
│ │ │ │ ├── privacy/
│ │ │ │ │ ├── CloudFreePrivacyNotice.test.tsx
│ │ │ │ │ ├── CloudFreePrivacyNotice.tsx
│ │ │ │ │ ├── CloudPaidPrivacyNotice.test.tsx
│ │ │ │ │ ├── CloudPaidPrivacyNotice.tsx
│ │ │ │ │ ├── GeminiPrivacyNotice.test.tsx
│ │ │ │ │ ├── GeminiPrivacyNotice.tsx
│ │ │ │ │ ├── PrivacyNotice.test.tsx
│ │ │ │ │ └── PrivacyNotice.tsx
│ │ │ │ ├── semantic-colors.ts
│ │ │ │ ├── state/
│ │ │ │ │ ├── extensions.test.ts
│ │ │ │ │ └── extensions.ts
│ │ │ │ ├── textConstants.ts
│ │ │ │ ├── themes/
│ │ │ │ │ ├── builtin/
│ │ │ │ │ │ ├── dark/
│ │ │ │ │ │ │ ├── ansi-dark.ts
│ │ │ │ │ │ │ ├── atom-one-dark.ts
│ │ │ │ │ │ │ ├── ayu-dark.ts
│ │ │ │ │ │ │ ├── default-dark.ts
│ │ │ │ │ │ │ ├── dracula-dark.ts
│ │ │ │ │ │ │ ├── github-dark.ts
│ │ │ │ │ │ │ ├── holiday-dark.ts
│ │ │ │ │ │ │ ├── shades-of-purple-dark.ts
│ │ │ │ │ │ │ └── solarized-dark.ts
│ │ │ │ │ │ ├── light/
│ │ │ │ │ │ │ ├── ansi-light.ts
│ │ │ │ │ │ │ ├── ayu-light.ts
│ │ │ │ │ │ │ ├── default-light.ts
│ │ │ │ │ │ │ ├── github-light.ts
│ │ │ │ │ │ │ ├── googlecode-light.ts
│ │ │ │ │ │ │ ├── solarized-light.ts
│ │ │ │ │ │ │ └── xcode-light.ts
│ │ │ │ │ │ └── no-color.ts
│ │ │ │ │ ├── color-utils.test.ts
│ │ │ │ │ ├── color-utils.ts
│ │ │ │ │ ├── semantic-tokens.ts
│ │ │ │ │ ├── theme-manager.test.ts
│ │ │ │ │ ├── theme-manager.ts
│ │ │ │ │ ├── theme.test.ts
│ │ │ │ │ └── theme.ts
│ │ │ │ ├── types.ts
│ │ │ │ └── utils/
│ │ │ │ ├── CodeColorizer.test.tsx
│ │ │ │ ├── CodeColorizer.tsx
│ │ │ │ ├── ConsolePatcher.ts
│ │ │ │ ├── InlineMarkdownRenderer.tsx
│ │ │ │ ├── MarkdownDisplay.test.tsx
│ │ │ │ ├── MarkdownDisplay.tsx
│ │ │ │ ├── TableRenderer.test.tsx
│ │ │ │ ├── TableRenderer.tsx
│ │ │ │ ├── __snapshots__/
│ │ │ │ │ ├── CodeColorizer.test.tsx.snap
│ │ │ │ │ ├── MarkdownDisplay.test.tsx.snap
│ │ │ │ │ ├── TableRenderer.test.tsx.snap
│ │ │ │ │ ├── borderStyles.test.tsx.snap
│ │ │ │ │ ├── terminalSetup.test.ts.snap
│ │ │ │ │ └── textOutput.test.ts.snap
│ │ │ │ ├── borderStyles.test.tsx
│ │ │ │ ├── borderStyles.ts
│ │ │ │ ├── clipboardUtils.test.ts
│ │ │ │ ├── clipboardUtils.ts
│ │ │ │ ├── clipboardUtils.windows.test.ts
│ │ │ │ ├── commandUtils.test.ts
│ │ │ │ ├── commandUtils.ts
│ │ │ │ ├── computeStats.test.ts
│ │ │ │ ├── computeStats.ts
│ │ │ │ ├── confirmingTool.ts
│ │ │ │ ├── contextUsage.ts
│ │ │ │ ├── directoryUtils.test.ts
│ │ │ │ ├── directoryUtils.ts
│ │ │ │ ├── displayUtils.test.ts
│ │ │ │ ├── displayUtils.ts
│ │ │ │ ├── editorUtils.ts
│ │ │ │ ├── formatters.test.ts
│ │ │ │ ├── formatters.ts
│ │ │ │ ├── highlight.test.ts
│ │ │ │ ├── highlight.ts
│ │ │ │ ├── historyExportUtils.ts
│ │ │ │ ├── inlineThinkingMode.ts
│ │ │ │ ├── input.test.ts
│ │ │ │ ├── input.ts
│ │ │ │ ├── isNarrowWidth.ts
│ │ │ │ ├── markdownParsingUtils.test.ts
│ │ │ │ ├── markdownParsingUtils.ts
│ │ │ │ ├── markdownUtilities.test.ts
│ │ │ │ ├── markdownUtilities.ts
│ │ │ │ ├── mouse.test.ts
│ │ │ │ ├── mouse.ts
│ │ │ │ ├── pendingAttentionNotification.test.ts
│ │ │ │ ├── pendingAttentionNotification.ts
│ │ │ │ ├── rewindFileOps.test.ts
│ │ │ │ ├── rewindFileOps.ts
│ │ │ │ ├── shortcutsHelp.ts
│ │ │ │ ├── terminalCapabilityManager.test.ts
│ │ │ │ ├── terminalCapabilityManager.ts
│ │ │ │ ├── terminalSetup.test.ts
│ │ │ │ ├── terminalSetup.ts
│ │ │ │ ├── terminalUtils.ts
│ │ │ │ ├── textOutput.test.ts
│ │ │ │ ├── textOutput.ts
│ │ │ │ ├── textUtils.test.ts
│ │ │ │ ├── textUtils.ts
│ │ │ │ ├── toolLayoutUtils.test.ts
│ │ │ │ ├── toolLayoutUtils.ts
│ │ │ │ ├── ui-sizing.test.ts
│ │ │ │ ├── ui-sizing.ts
│ │ │ │ ├── updateCheck.test.ts
│ │ │ │ ├── updateCheck.ts
│ │ │ │ ├── urlSecurityUtils.test.ts
│ │ │ │ └── urlSecurityUtils.ts
│ │ │ ├── utils/
│ │ │ │ ├── activityLogger.test.ts
│ │ │ │ ├── activityLogger.ts
│ │ │ │ ├── agentSettings.test.ts
│ │ │ │ ├── agentSettings.ts
│ │ │ │ ├── agentUtils.test.ts
│ │ │ │ ├── agentUtils.ts
│ │ │ │ ├── cleanup.test.ts
│ │ │ │ ├── cleanup.ts
│ │ │ │ ├── commands.test.ts
│ │ │ │ ├── commands.ts
│ │ │ │ ├── commentJson.test.ts
│ │ │ │ ├── commentJson.ts
│ │ │ │ ├── deepMerge.test.ts
│ │ │ │ ├── deepMerge.ts
│ │ │ │ ├── devtoolsService.test.ts
│ │ │ │ ├── devtoolsService.ts
│ │ │ │ ├── dialogScopeUtils.test.ts
│ │ │ │ ├── dialogScopeUtils.ts
│ │ │ │ ├── envVarResolver.test.ts
│ │ │ │ ├── envVarResolver.ts
│ │ │ │ ├── errors.test.ts
│ │ │ │ ├── errors.ts
│ │ │ │ ├── events.test.ts
│ │ │ │ ├── events.ts
│ │ │ │ ├── featureToggleUtils.test.ts
│ │ │ │ ├── featureToggleUtils.ts
│ │ │ │ ├── gitUtils.test.ts
│ │ │ │ ├── gitUtils.ts
│ │ │ │ ├── handleAutoUpdate.test.ts
│ │ │ │ ├── handleAutoUpdate.ts
│ │ │ │ ├── hookSettings.test.ts
│ │ │ │ ├── hookSettings.ts
│ │ │ │ ├── hookUtils.test.ts
│ │ │ │ ├── hookUtils.ts
│ │ │ │ ├── installationInfo.test.ts
│ │ │ │ ├── installationInfo.ts
│ │ │ │ ├── jsonoutput.test.ts
│ │ │ │ ├── jsonoutput.ts
│ │ │ │ ├── logCleanup.test.ts
│ │ │ │ ├── logCleanup.ts
│ │ │ │ ├── math.test.ts
│ │ │ │ ├── math.ts
│ │ │ │ ├── persistentState.test.ts
│ │ │ │ ├── persistentState.ts
│ │ │ │ ├── processUtils.test.ts
│ │ │ │ ├── processUtils.ts
│ │ │ │ ├── readStdin.test.ts
│ │ │ │ ├── readStdin.ts
│ │ │ │ ├── readStdin_safety.test.ts
│ │ │ │ ├── relaunch.test.ts
│ │ │ │ ├── relaunch.ts
│ │ │ │ ├── resolvePath.test.ts
│ │ │ │ ├── resolvePath.ts
│ │ │ │ ├── sandbox-macos-permissive-open.sb
│ │ │ │ ├── sandbox-macos-permissive-proxied.sb
│ │ │ │ ├── sandbox-macos-restrictive-open.sb
│ │ │ │ ├── sandbox-macos-restrictive-proxied.sb
│ │ │ │ ├── sandbox-macos-strict-open.sb
│ │ │ │ ├── sandbox-macos-strict-proxied.sb
│ │ │ │ ├── sandbox.test.ts
│ │ │ │ ├── sandbox.ts
│ │ │ │ ├── sandboxUtils.test.ts
│ │ │ │ ├── sandboxUtils.ts
│ │ │ │ ├── sessionCleanup.integration.test.ts
│ │ │ │ ├── sessionCleanup.test.ts
│ │ │ │ ├── sessionCleanup.ts
│ │ │ │ ├── sessionUtils.test.ts
│ │ │ │ ├── sessionUtils.ts
│ │ │ │ ├── sessions.test.ts
│ │ │ │ ├── sessions.ts
│ │ │ │ ├── settingsUtils.test.ts
│ │ │ │ ├── settingsUtils.ts
│ │ │ │ ├── skillSettings.test.ts
│ │ │ │ ├── skillSettings.ts
│ │ │ │ ├── skillUtils.test.ts
│ │ │ │ ├── skillUtils.ts
│ │ │ │ ├── spawnWrapper.ts
│ │ │ │ ├── startupWarnings.test.ts
│ │ │ │ ├── startupWarnings.ts
│ │ │ │ ├── terminalNotifications.test.ts
│ │ │ │ ├── terminalNotifications.ts
│ │ │ │ ├── terminalTheme.ts
│ │ │ │ ├── tierUtils.test.ts
│ │ │ │ ├── tierUtils.ts
│ │ │ │ ├── toolOutputCleanup.test.ts
│ │ │ │ ├── updateEventEmitter.test.ts
│ │ │ │ ├── updateEventEmitter.ts
│ │ │ │ ├── userStartupWarnings.test.ts
│ │ │ │ ├── userStartupWarnings.ts
│ │ │ │ ├── windowTitle.test.ts
│ │ │ │ └── windowTitle.ts
│ │ │ ├── validateNonInterActiveAuth.test.ts
│ │ │ └── validateNonInterActiveAuth.ts
│ │ ├── test-setup.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── core/
│ │ ├── GEMINI.md
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── scripts/
│ │ │ ├── bundle-browser-mcp.mjs
│ │ │ └── compile-windows-sandbox.js
│ │ ├── src/
│ │ │ ├── __mocks__/
│ │ │ │ └── fs/
│ │ │ │ └── promises.ts
│ │ │ ├── agent/
│ │ │ │ ├── content-utils.test.ts
│ │ │ │ ├── content-utils.ts
│ │ │ │ ├── mock.test.ts
│ │ │ │ ├── mock.ts
│ │ │ │ └── types.ts
│ │ │ ├── agents/
│ │ │ │ ├── a2a-client-manager.test.ts
│ │ │ │ ├── a2a-client-manager.ts
│ │ │ │ ├── a2a-errors.test.ts
│ │ │ │ ├── a2a-errors.ts
│ │ │ │ ├── a2aUtils.test.ts
│ │ │ │ ├── a2aUtils.ts
│ │ │ │ ├── acknowledgedAgents.test.ts
│ │ │ │ ├── acknowledgedAgents.ts
│ │ │ │ ├── agent-scheduler.test.ts
│ │ │ │ ├── agent-scheduler.ts
│ │ │ │ ├── agentLoader.test.ts
│ │ │ │ ├── agentLoader.ts
│ │ │ │ ├── auth-provider/
│ │ │ │ │ ├── api-key-provider.test.ts
│ │ │ │ │ ├── api-key-provider.ts
│ │ │ │ │ ├── base-provider.test.ts
│ │ │ │ │ ├── base-provider.ts
│ │ │ │ │ ├── factory.test.ts
│ │ │ │ │ ├── factory.ts
│ │ │ │ │ ├── google-credentials-provider.test.ts
│ │ │ │ │ ├── google-credentials-provider.ts
│ │ │ │ │ ├── http-provider.test.ts
│ │ │ │ │ ├── http-provider.ts
│ │ │ │ │ ├── oauth2-provider.test.ts
│ │ │ │ │ ├── oauth2-provider.ts
│ │ │ │ │ ├── types.ts
│ │ │ │ │ ├── value-resolver.test.ts
│ │ │ │ │ └── value-resolver.ts
│ │ │ │ ├── browser/
│ │ │ │ │ ├── analyzeScreenshot.test.ts
│ │ │ │ │ ├── analyzeScreenshot.ts
│ │ │ │ │ ├── automationOverlay.ts
│ │ │ │ │ ├── browser-tools-manifest.json
│ │ │ │ │ ├── browserAgentDefinition.ts
│ │ │ │ │ ├── browserAgentFactory.test.ts
│ │ │ │ │ ├── browserAgentFactory.ts
│ │ │ │ │ ├── browserAgentInvocation.test.ts
│ │ │ │ │ ├── browserAgentInvocation.ts
│ │ │ │ │ ├── browserManager.test.ts
│ │ │ │ │ ├── browserManager.ts
│ │ │ │ │ ├── inputBlocker.test.ts
│ │ │ │ │ ├── inputBlocker.ts
│ │ │ │ │ ├── mcpToolWrapper.test.ts
│ │ │ │ │ ├── mcpToolWrapper.ts
│ │ │ │ │ ├── mcpToolWrapperConfirmation.test.ts
│ │ │ │ │ └── modelAvailability.ts
│ │ │ │ ├── cli-help-agent.test.ts
│ │ │ │ ├── cli-help-agent.ts
│ │ │ │ ├── codebase-investigator.test.ts
│ │ │ │ ├── codebase-investigator.ts
│ │ │ │ ├── generalist-agent.test.ts
│ │ │ │ ├── generalist-agent.ts
│ │ │ │ ├── local-executor.test.ts
│ │ │ │ ├── local-executor.ts
│ │ │ │ ├── local-invocation.test.ts
│ │ │ │ ├── local-invocation.ts
│ │ │ │ ├── memory-manager-agent.test.ts
│ │ │ │ ├── memory-manager-agent.ts
│ │ │ │ ├── registry.test.ts
│ │ │ │ ├── registry.ts
│ │ │ │ ├── registry_acknowledgement.test.ts
│ │ │ │ ├── remote-invocation.test.ts
│ │ │ │ ├── remote-invocation.ts
│ │ │ │ ├── subagent-tool-wrapper.test.ts
│ │ │ │ ├── subagent-tool-wrapper.ts
│ │ │ │ ├── subagent-tool.test.ts
│ │ │ │ ├── subagent-tool.ts
│ │ │ │ ├── types.ts
│ │ │ │ ├── utils.test.ts
│ │ │ │ └── utils.ts
│ │ │ ├── availability/
│ │ │ │ ├── errorClassification.ts
│ │ │ │ ├── fallbackIntegration.test.ts
│ │ │ │ ├── modelAvailabilityService.test.ts
│ │ │ │ ├── modelAvailabilityService.ts
│ │ │ │ ├── modelPolicy.ts
│ │ │ │ ├── policyCatalog.test.ts
│ │ │ │ ├── policyCatalog.ts
│ │ │ │ ├── policyHelpers.test.ts
│ │ │ │ ├── policyHelpers.ts
│ │ │ │ └── testUtils.ts
│ │ │ ├── billing/
│ │ │ │ ├── billing.test.ts
│ │ │ │ ├── billing.ts
│ │ │ │ └── index.ts
│ │ │ ├── code_assist/
│ │ │ │ ├── admin/
│ │ │ │ │ ├── admin_controls.test.ts
│ │ │ │ │ ├── admin_controls.ts
│ │ │ │ │ ├── mcpUtils.test.ts
│ │ │ │ │ └── mcpUtils.ts
│ │ │ │ ├── codeAssist.test.ts
│ │ │ │ ├── codeAssist.ts
│ │ │ │ ├── converter.test.ts
│ │ │ │ ├── converter.ts
│ │ │ │ ├── experiments/
│ │ │ │ │ ├── client_metadata.test.ts
│ │ │ │ │ ├── client_metadata.ts
│ │ │ │ │ ├── experiments.test.ts
│ │ │ │ │ ├── experiments.ts
│ │ │ │ │ ├── experiments_local.test.ts
│ │ │ │ │ ├── flagNames.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── oauth-credential-storage.test.ts
│ │ │ │ ├── oauth-credential-storage.ts
│ │ │ │ ├── oauth2.test.ts
│ │ │ │ ├── oauth2.ts
│ │ │ │ ├── server.test.ts
│ │ │ │ ├── server.ts
│ │ │ │ ├── setup.test.ts
│ │ │ │ ├── setup.ts
│ │ │ │ ├── telemetry.test.ts
│ │ │ │ ├── telemetry.ts
│ │ │ │ └── types.ts
│ │ │ ├── commands/
│ │ │ │ ├── extensions.test.ts
│ │ │ │ ├── extensions.ts
│ │ │ │ ├── init.test.ts
│ │ │ │ ├── init.ts
│ │ │ │ ├── memory.test.ts
│ │ │ │ ├── memory.ts
│ │ │ │ ├── restore.test.ts
│ │ │ │ ├── restore.ts
│ │ │ │ └── types.ts
│ │ │ ├── config/
│ │ │ │ ├── agent-loop-context.ts
│ │ │ │ ├── config.test.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── constants.ts
│ │ │ │ ├── defaultModelConfigs.ts
│ │ │ │ ├── extensions/
│ │ │ │ │ ├── integrity.test.ts
│ │ │ │ │ ├── integrity.ts
│ │ │ │ │ └── integrityTypes.ts
│ │ │ │ ├── flashFallback.test.ts
│ │ │ │ ├── injectionService.test.ts
│ │ │ │ ├── injectionService.ts
│ │ │ │ ├── memory.test.ts
│ │ │ │ ├── memory.ts
│ │ │ │ ├── models.test.ts
│ │ │ │ ├── models.ts
│ │ │ │ ├── path-validation.test.ts
│ │ │ │ ├── projectRegistry.test.ts
│ │ │ │ ├── projectRegistry.ts
│ │ │ │ ├── sandbox-integration.test.ts
│ │ │ │ ├── storage.test.ts
│ │ │ │ ├── storage.ts
│ │ │ │ ├── storageMigration.test.ts
│ │ │ │ ├── storageMigration.ts
│ │ │ │ └── trackerFeatureFlag.test.ts
│ │ │ ├── confirmation-bus/
│ │ │ │ ├── index.ts
│ │ │ │ ├── message-bus.test.ts
│ │ │ │ ├── message-bus.ts
│ │ │ │ └── types.ts
│ │ │ ├── core/
│ │ │ │ ├── __snapshots__/
│ │ │ │ │ └── prompts.test.ts.snap
│ │ │ │ ├── apiKeyCredentialStorage.test.ts
│ │ │ │ ├── apiKeyCredentialStorage.ts
│ │ │ │ ├── baseLlmClient.test.ts
│ │ │ │ ├── baseLlmClient.ts
│ │ │ │ ├── client.test.ts
│ │ │ │ ├── client.ts
│ │ │ │ ├── contentGenerator.test.ts
│ │ │ │ ├── contentGenerator.ts
│ │ │ │ ├── coreToolHookTriggers.test.ts
│ │ │ │ ├── coreToolHookTriggers.ts
│ │ │ │ ├── coreToolScheduler.test.ts
│ │ │ │ ├── coreToolScheduler.ts
│ │ │ │ ├── fakeContentGenerator.test.ts
│ │ │ │ ├── fakeContentGenerator.ts
│ │ │ │ ├── geminiChat.test.ts
│ │ │ │ ├── geminiChat.ts
│ │ │ │ ├── geminiChat_network_retry.test.ts
│ │ │ │ ├── geminiRequest.ts
│ │ │ │ ├── localLiteRtLmClient.test.ts
│ │ │ │ ├── localLiteRtLmClient.ts
│ │ │ │ ├── logger.test.ts
│ │ │ │ ├── logger.ts
│ │ │ │ ├── loggingContentGenerator.test.ts
│ │ │ │ ├── loggingContentGenerator.ts
│ │ │ │ ├── prompts-substitution.test.ts
│ │ │ │ ├── prompts.test.ts
│ │ │ │ ├── prompts.ts
│ │ │ │ ├── recordingContentGenerator.test.ts
│ │ │ │ ├── recordingContentGenerator.ts
│ │ │ │ ├── tokenLimits.test.ts
│ │ │ │ ├── tokenLimits.ts
│ │ │ │ ├── turn.test.ts
│ │ │ │ └── turn.ts
│ │ │ ├── fallback/
│ │ │ │ ├── handler.test.ts
│ │ │ │ ├── handler.ts
│ │ │ │ └── types.ts
│ │ │ ├── hooks/
│ │ │ │ ├── hookAggregator.test.ts
│ │ │ │ ├── hookAggregator.ts
│ │ │ │ ├── hookEventHandler.test.ts
│ │ │ │ ├── hookEventHandler.ts
│ │ │ │ ├── hookPlanner.test.ts
│ │ │ │ ├── hookPlanner.ts
│ │ │ │ ├── hookRegistry.test.ts
│ │ │ │ ├── hookRegistry.ts
│ │ │ │ ├── hookRunner.test.ts
│ │ │ │ ├── hookRunner.ts
│ │ │ │ ├── hookSystem.test.ts
│ │ │ │ ├── hookSystem.ts
│ │ │ │ ├── hookTranslator.test.ts
│ │ │ │ ├── hookTranslator.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── runtimeHooks.test.ts
│ │ │ │ ├── trustedHooks.test.ts
│ │ │ │ ├── trustedHooks.ts
│ │ │ │ ├── types.test.ts
│ │ │ │ └── types.ts
│ │ │ ├── ide/
│ │ │ │ ├── constants.ts
│ │ │ │ ├── detect-ide.test.ts
│ │ │ │ ├── detect-ide.ts
│ │ │ │ ├── ide-client.test.ts
│ │ │ │ ├── ide-client.ts
│ │ │ │ ├── ide-connection-utils.test.ts
│ │ │ │ ├── ide-connection-utils.ts
│ │ │ │ ├── ide-installer.test.ts
│ │ │ │ ├── ide-installer.ts
│ │ │ │ ├── ideContext.test.ts
│ │ │ │ ├── ideContext.ts
│ │ │ │ ├── process-utils.test.ts
│ │ │ │ ├── process-utils.ts
│ │ │ │ └── types.ts
│ │ │ ├── index.test.ts
│ │ │ ├── index.ts
│ │ │ ├── mcp/
│ │ │ │ ├── auth-provider.ts
│ │ │ │ ├── google-auth-provider.test.ts
│ │ │ │ ├── google-auth-provider.ts
│ │ │ │ ├── mcp-oauth-provider.test.ts
│ │ │ │ ├── mcp-oauth-provider.ts
│ │ │ │ ├── oauth-provider.test.ts
│ │ │ │ ├── oauth-provider.ts
│ │ │ │ ├── oauth-token-storage.test.ts
│ │ │ │ ├── oauth-token-storage.ts
│ │ │ │ ├── oauth-utils.test.ts
│ │ │ │ ├── oauth-utils.ts
│ │ │ │ ├── sa-impersonation-provider.test.ts
│ │ │ │ ├── sa-impersonation-provider.ts
│ │ │ │ └── token-storage/
│ │ │ │ ├── base-token-storage.test.ts
│ │ │ │ ├── base-token-storage.ts
│ │ │ │ ├── hybrid-token-storage.test.ts
│ │ │ │ ├── hybrid-token-storage.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── keychain-token-storage.test.ts
│ │ │ │ ├── keychain-token-storage.ts
│ │ │ │ └── types.ts
│ │ │ ├── mocks/
│ │ │ │ └── msw.ts
│ │ │ ├── output/
│ │ │ │ ├── json-formatter.test.ts
│ │ │ │ ├── json-formatter.ts
│ │ │ │ ├── stream-json-formatter.test.ts
│ │ │ │ ├── stream-json-formatter.ts
│ │ │ │ └── types.ts
│ │ │ ├── policy/
│ │ │ │ ├── config.test.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── integrity.test.ts
│ │ │ │ ├── integrity.ts
│ │ │ │ ├── memory-manager-policy.test.ts
│ │ │ │ ├── persistence.test.ts
│ │ │ │ ├── policies/
│ │ │ │ │ ├── conseca.toml
│ │ │ │ │ ├── discovered.toml
│ │ │ │ │ ├── memory-manager.toml
│ │ │ │ │ ├── plan.toml
│ │ │ │ │ ├── read-only.toml
│ │ │ │ │ ├── tracker.toml
│ │ │ │ │ ├── write.toml
│ │ │ │ │ └── yolo.toml
│ │ │ │ ├── policy-engine.test.ts
│ │ │ │ ├── policy-engine.ts
│ │ │ │ ├── policy-updater.test.ts
│ │ │ │ ├── shell-safety.test.ts
│ │ │ │ ├── stable-stringify.ts
│ │ │ │ ├── toml-loader.test.ts
│ │ │ │ ├── toml-loader.ts
│ │ │ │ ├── types.ts
│ │ │ │ ├── utils.test.ts
│ │ │ │ ├── utils.ts
│ │ │ │ └── workspace-policy.test.ts
│ │ │ ├── prompts/
│ │ │ │ ├── mcp-prompts.test.ts
│ │ │ │ ├── mcp-prompts.ts
│ │ │ │ ├── prompt-registry.test.ts
│ │ │ │ ├── prompt-registry.ts
│ │ │ │ ├── promptProvider.test.ts
│ │ │ │ ├── promptProvider.ts
│ │ │ │ ├── snippets-memory-manager.test.ts
│ │ │ │ ├── snippets.legacy.ts
│ │ │ │ ├── snippets.ts
│ │ │ │ ├── utils.test.ts
│ │ │ │ └── utils.ts
│ │ │ ├── resources/
│ │ │ │ ├── resource-registry.test.ts
│ │ │ │ └── resource-registry.ts
│ │ │ ├── routing/
│ │ │ │ ├── modelRouterService.test.ts
│ │ │ │ ├── modelRouterService.ts
│ │ │ │ ├── routingStrategy.ts
│ │ │ │ └── strategies/
│ │ │ │ ├── approvalModeStrategy.test.ts
│ │ │ │ ├── approvalModeStrategy.ts
│ │ │ │ ├── classifierStrategy.test.ts
│ │ │ │ ├── classifierStrategy.ts
│ │ │ │ ├── compositeStrategy.test.ts
│ │ │ │ ├── compositeStrategy.ts
│ │ │ │ ├── defaultStrategy.test.ts
│ │ │ │ ├── defaultStrategy.ts
│ │ │ │ ├── fallbackStrategy.test.ts
│ │ │ │ ├── fallbackStrategy.ts
│ │ │ │ ├── gemmaClassifierStrategy.test.ts
│ │ │ │ ├── gemmaClassifierStrategy.ts
│ │ │ │ ├── numericalClassifierStrategy.test.ts
│ │ │ │ ├── numericalClassifierStrategy.ts
│ │ │ │ ├── overrideStrategy.test.ts
│ │ │ │ └── overrideStrategy.ts
│ │ │ ├── safety/
│ │ │ │ ├── built-in.test.ts
│ │ │ │ ├── built-in.ts
│ │ │ │ ├── checker-runner.test.ts
│ │ │ │ ├── checker-runner.ts
│ │ │ │ ├── conseca/
│ │ │ │ │ ├── conseca.test.ts
│ │ │ │ │ ├── conseca.ts
│ │ │ │ │ ├── integration.test.ts
│ │ │ │ │ ├── policy-enforcer.test.ts
│ │ │ │ │ ├── policy-enforcer.ts
│ │ │ │ │ ├── policy-generator.test.ts
│ │ │ │ │ ├── policy-generator.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── context-builder.test.ts
│ │ │ │ ├── context-builder.ts
│ │ │ │ ├── protocol.ts
│ │ │ │ ├── registry.test.ts
│ │ │ │ └── registry.ts
│ │ │ ├── sandbox/
│ │ │ │ ├── linux/
│ │ │ │ │ ├── LinuxSandboxManager.test.ts
│ │ │ │ │ └── LinuxSandboxManager.ts
│ │ │ │ └── macos/
│ │ │ │ ├── MacOsSandboxManager.integration.test.ts
│ │ │ │ ├── MacOsSandboxManager.test.ts
│ │ │ │ ├── MacOsSandboxManager.ts
│ │ │ │ ├── baseProfile.ts
│ │ │ │ ├── seatbeltArgsBuilder.test.ts
│ │ │ │ └── seatbeltArgsBuilder.ts
│ │ │ ├── scheduler/
│ │ │ │ ├── confirmation.test.ts
│ │ │ │ ├── confirmation.ts
│ │ │ │ ├── policy.test.ts
│ │ │ │ ├── policy.ts
│ │ │ │ ├── scheduler.test.ts
│ │ │ │ ├── scheduler.ts
│ │ │ │ ├── scheduler_parallel.test.ts
│ │ │ │ ├── scheduler_waiting_callback.test.ts
│ │ │ │ ├── state-manager.test.ts
│ │ │ │ ├── state-manager.ts
│ │ │ │ ├── tool-executor.test.ts
│ │ │ │ ├── tool-executor.ts
│ │ │ │ ├── tool-modifier.test.ts
│ │ │ │ ├── tool-modifier.ts
│ │ │ │ └── types.ts
│ │ │ ├── services/
│ │ │ │ ├── FolderTrustDiscoveryService.test.ts
│ │ │ │ ├── FolderTrustDiscoveryService.ts
│ │ │ │ ├── __snapshots__/
│ │ │ │ │ └── toolOutputMaskingService.test.ts.snap
│ │ │ │ ├── chatCompressionService.test.ts
│ │ │ │ ├── chatCompressionService.ts
│ │ │ │ ├── chatRecordingService.test.ts
│ │ │ │ ├── chatRecordingService.ts
│ │ │ │ ├── contextManager.test.ts
│ │ │ │ ├── contextManager.ts
│ │ │ │ ├── environmentSanitization.test.ts
│ │ │ │ ├── environmentSanitization.ts
│ │ │ │ ├── executionLifecycleService.test.ts
│ │ │ │ ├── executionLifecycleService.ts
│ │ │ │ ├── fileDiscoveryService.test.ts
│ │ │ │ ├── fileDiscoveryService.ts
│ │ │ │ ├── fileKeychain.ts
│ │ │ │ ├── fileSystemService.test.ts
│ │ │ │ ├── fileSystemService.ts
│ │ │ │ ├── gitService.test.ts
│ │ │ │ ├── gitService.ts
│ │ │ │ ├── keychainService.test.ts
│ │ │ │ ├── keychainService.ts
│ │ │ │ ├── keychainTypes.ts
│ │ │ │ ├── loopDetectionService.test.ts
│ │ │ │ ├── loopDetectionService.ts
│ │ │ │ ├── modelConfig.golden.test.ts
│ │ │ │ ├── modelConfig.integration.test.ts
│ │ │ │ ├── modelConfigService.test.ts
│ │ │ │ ├── modelConfigService.ts
│ │ │ │ ├── modelConfigServiceTestUtils.ts
│ │ │ │ ├── sandboxManager.test.ts
│ │ │ │ ├── sandboxManager.ts
│ │ │ │ ├── sandboxManagerFactory.ts
│ │ │ │ ├── sandboxedFileSystemService.test.ts
│ │ │ │ ├── sandboxedFileSystemService.ts
│ │ │ │ ├── scripts/
│ │ │ │ │ └── GeminiSandbox.cs
│ │ │ │ ├── sessionSummaryService.test.ts
│ │ │ │ ├── sessionSummaryService.ts
│ │ │ │ ├── sessionSummaryUtils.test.ts
│ │ │ │ ├── sessionSummaryUtils.ts
│ │ │ │ ├── shellExecutionService.test.ts
│ │ │ │ ├── shellExecutionService.ts
│ │ │ │ ├── test-data/
│ │ │ │ │ ├── resolved-aliases-retry.golden.json
│ │ │ │ │ └── resolved-aliases.golden.json
│ │ │ │ ├── toolOutputMaskingService.test.ts
│ │ │ │ ├── toolOutputMaskingService.ts
│ │ │ │ ├── trackerService.test.ts
│ │ │ │ ├── trackerService.ts
│ │ │ │ ├── trackerTypes.ts
│ │ │ │ ├── windowsSandboxManager.test.ts
│ │ │ │ └── windowsSandboxManager.ts
│ │ │ ├── skills/
│ │ │ │ ├── builtin/
│ │ │ │ │ └── skill-creator/
│ │ │ │ │ ├── SKILL.md
│ │ │ │ │ └── scripts/
│ │ │ │ │ ├── init_skill.cjs
│ │ │ │ │ ├── package_skill.cjs
│ │ │ │ │ └── validate_skill.cjs
│ │ │ │ ├── skillLoader.test.ts
│ │ │ │ ├── skillLoader.ts
│ │ │ │ ├── skillManager.test.ts
│ │ │ │ ├── skillManager.ts
│ │ │ │ └── skillManagerAlias.test.ts
│ │ │ ├── telemetry/
│ │ │ │ ├── activity-detector.test.ts
│ │ │ │ ├── activity-detector.ts
│ │ │ │ ├── activity-monitor.test.ts
│ │ │ │ ├── activity-monitor.ts
│ │ │ │ ├── activity-types.ts
│ │ │ │ ├── billingEvents.test.ts
│ │ │ │ ├── billingEvents.ts
│ │ │ │ ├── clearcut-logger/
│ │ │ │ │ ├── clearcut-logger.test.ts
│ │ │ │ │ ├── clearcut-logger.ts
│ │ │ │ │ └── event-metadata-key.ts
│ │ │ │ ├── config.test.ts
│ │ │ │ ├── config.ts
│ │ │ │ ├── conseca-logger.test.ts
│ │ │ │ ├── conseca-logger.ts
│ │ │ │ ├── constants.ts
│ │ │ │ ├── file-exporters.test.ts
│ │ │ │ ├── file-exporters.ts
│ │ │ │ ├── gcp-exporters.test.ts
│ │ │ │ ├── gcp-exporters.ts
│ │ │ │ ├── high-water-mark-tracker.test.ts
│ │ │ │ ├── high-water-mark-tracker.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── integration.test.circular.ts
│ │ │ │ ├── llmRole.ts
│ │ │ │ ├── loggers.test.circular.ts
│ │ │ │ ├── loggers.test.ts
│ │ │ │ ├── loggers.ts
│ │ │ │ ├── memory-monitor.test.ts
│ │ │ │ ├── memory-monitor.ts
│ │ │ │ ├── metrics.test.ts
│ │ │ │ ├── metrics.ts
│ │ │ │ ├── rate-limiter.test.ts
│ │ │ │ ├── rate-limiter.ts
│ │ │ │ ├── sanitize.test.ts
│ │ │ │ ├── sanitize.ts
│ │ │ │ ├── sdk.test.ts
│ │ │ │ ├── sdk.ts
│ │ │ │ ├── semantic.test.ts
│ │ │ │ ├── semantic.truncation.test.ts
│ │ │ │ ├── semantic.ts
│ │ │ │ ├── startupProfiler.test.ts
│ │ │ │ ├── startupProfiler.ts
│ │ │ │ ├── telemetry-utils.test.ts
│ │ │ │ ├── telemetry-utils.ts
│ │ │ │ ├── telemetry.test.ts
│ │ │ │ ├── telemetryAttributes.ts
│ │ │ │ ├── tool-call-decision.ts
│ │ │ │ ├── trace.test.ts
│ │ │ │ ├── trace.ts
│ │ │ │ ├── types.ts
│ │ │ │ ├── uiTelemetry.test.ts
│ │ │ │ └── uiTelemetry.ts
│ │ │ ├── test-utils/
│ │ │ │ ├── config.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── mock-message-bus.ts
│ │ │ │ ├── mock-tool.ts
│ │ │ │ └── mockWorkspaceContext.ts
│ │ │ ├── tools/
│ │ │ │ ├── __snapshots__/
│ │ │ │ │ ├── read-file.test.ts.snap
│ │ │ │ │ └── shell.test.ts.snap
│ │ │ │ ├── activate-skill.test.ts
│ │ │ │ ├── activate-skill.ts
│ │ │ │ ├── ask-user.test.ts
│ │ │ │ ├── ask-user.ts
│ │ │ │ ├── base-tool-invocation.test.ts
│ │ │ │ ├── confirmation-policy.test.ts
│ │ │ │ ├── constants.ts
│ │ │ │ ├── definitions/
│ │ │ │ │ ├── __snapshots__/
│ │ │ │ │ │ └── coreToolsModelSnapshots.test.ts.snap
│ │ │ │ │ ├── base-declarations.ts
│ │ │ │ │ ├── coreTools.ts
│ │ │ │ │ ├── coreToolsModelSnapshots.test.ts
│ │ │ │ │ ├── dynamic-declaration-helpers.ts
│ │ │ │ │ ├── model-family-sets/
│ │ │ │ │ │ ├── default-legacy.ts
│ │ │ │ │ │ └── gemini-3.ts
│ │ │ │ │ ├── modelFamilyService.ts
│ │ │ │ │ ├── resolver.test.ts
│ │ │ │ │ ├── resolver.ts
│ │ │ │ │ ├── trackerTools.ts
│ │ │ │ │ └── types.ts
│ │ │ │ ├── diff-utils.test.ts
│ │ │ │ ├── diff-utils.ts
│ │ │ │ ├── diffOptions.test.ts
│ │ │ │ ├── diffOptions.ts
│ │ │ │ ├── edit.test.ts
│ │ │ │ ├── edit.ts
│ │ │ │ ├── enter-plan-mode.test.ts
│ │ │ │ ├── enter-plan-mode.ts
│ │ │ │ ├── exit-plan-mode.test.ts
│ │ │ │ ├── exit-plan-mode.ts
│ │ │ │ ├── get-internal-docs.test.ts
│ │ │ │ ├── get-internal-docs.ts
│ │ │ │ ├── glob.test.ts
│ │ │ │ ├── glob.ts
│ │ │ │ ├── grep-utils.ts
│ │ │ │ ├── grep.test.ts
│ │ │ │ ├── grep.ts
│ │ │ │ ├── jit-context.test.ts
│ │ │ │ ├── jit-context.ts
│ │ │ │ ├── line-endings.test.ts
│ │ │ │ ├── ls.test.ts
│ │ │ │ ├── ls.ts
│ │ │ │ ├── mcp-client-manager.test.ts
│ │ │ │ ├── mcp-client-manager.ts
│ │ │ │ ├── mcp-client.test.ts
│ │ │ │ ├── mcp-client.ts
│ │ │ │ ├── mcp-tool.test.ts
│ │ │ │ ├── mcp-tool.ts
│ │ │ │ ├── memoryTool.test.ts
│ │ │ │ ├── memoryTool.ts
│ │ │ │ ├── message-bus-integration.test.ts
│ │ │ │ ├── modifiable-tool.test.ts
│ │ │ │ ├── modifiable-tool.ts
│ │ │ │ ├── omissionPlaceholderDetector.test.ts
│ │ │ │ ├── omissionPlaceholderDetector.ts
│ │ │ │ ├── read-file.test.ts
│ │ │ │ ├── read-file.ts
│ │ │ │ ├── read-many-files.test.ts
│ │ │ │ ├── read-many-files.ts
│ │ │ │ ├── ripGrep.test.ts
│ │ │ │ ├── ripGrep.ts
│ │ │ │ ├── shell.test.ts
│ │ │ │ ├── shell.ts
│ │ │ │ ├── tool-error.ts
│ │ │ │ ├── tool-names.test.ts
│ │ │ │ ├── tool-names.ts
│ │ │ │ ├── tool-registry.test.ts
│ │ │ │ ├── tool-registry.ts
│ │ │ │ ├── tools.test.ts
│ │ │ │ ├── tools.ts
│ │ │ │ ├── trackerTools.test.ts
│ │ │ │ ├── trackerTools.ts
│ │ │ │ ├── web-fetch.test.ts
│ │ │ │ ├── web-fetch.ts
│ │ │ │ ├── web-search.test.ts
│ │ │ │ ├── web-search.ts
│ │ │ │ ├── write-file.test.ts
│ │ │ │ ├── write-file.ts
│ │ │ │ ├── write-todos.test.ts
│ │ │ │ ├── write-todos.ts
│ │ │ │ ├── xcode-mcp-fix-transport.test.ts
│ │ │ │ └── xcode-mcp-fix-transport.ts
│ │ │ ├── utils/
│ │ │ │ ├── __fixtures__/
│ │ │ │ │ └── dummy.wasm
│ │ │ │ ├── apiConversionUtils.test.ts
│ │ │ │ ├── apiConversionUtils.ts
│ │ │ │ ├── approvalModeUtils.test.ts
│ │ │ │ ├── approvalModeUtils.ts
│ │ │ │ ├── authConsent.test.ts
│ │ │ │ ├── authConsent.ts
│ │ │ │ ├── bfsFileSearch.test.ts
│ │ │ │ ├── bfsFileSearch.ts
│ │ │ │ ├── browser.ts
│ │ │ │ ├── browserConsent.test.ts
│ │ │ │ ├── browserConsent.ts
│ │ │ │ ├── cache.test.ts
│ │ │ │ ├── cache.ts
│ │ │ │ ├── channel.test.ts
│ │ │ │ ├── channel.ts
│ │ │ │ ├── checkpointUtils.test.ts
│ │ │ │ ├── checkpointUtils.ts
│ │ │ │ ├── checks.test.ts
│ │ │ │ ├── checks.ts
│ │ │ │ ├── compatibility.test.ts
│ │ │ │ ├── compatibility.ts
│ │ │ │ ├── constants.ts
│ │ │ │ ├── customHeaderUtils.test.ts
│ │ │ │ ├── customHeaderUtils.ts
│ │ │ │ ├── deadlineTimer.test.ts
│ │ │ │ ├── deadlineTimer.ts
│ │ │ │ ├── debugLogger.test.ts
│ │ │ │ ├── debugLogger.ts
│ │ │ │ ├── delay.test.ts
│ │ │ │ ├── delay.ts
│ │ │ │ ├── editCorrector.test.ts
│ │ │ │ ├── editCorrector.ts
│ │ │ │ ├── editor.test.ts
│ │ │ │ ├── editor.ts
│ │ │ │ ├── envExpansion.test.ts
│ │ │ │ ├── envExpansion.ts
│ │ │ │ ├── environmentContext.test.ts
│ │ │ │ ├── environmentContext.ts
│ │ │ │ ├── errorParsing.test.ts
│ │ │ │ ├── errorParsing.ts
│ │ │ │ ├── errorReporting.test.ts
│ │ │ │ ├── errorReporting.ts
│ │ │ │ ├── errors.test.ts
│ │ │ │ ├── errors.ts
│ │ │ │ ├── errors_timeout.test.ts
│ │ │ │ ├── events.test.ts
│ │ │ │ ├── events.ts
│ │ │ │ ├── exitCodes.ts
│ │ │ │ ├── extensionLoader.test.ts
│ │ │ │ ├── extensionLoader.ts
│ │ │ │ ├── fastAckHelper.test.ts
│ │ │ │ ├── fastAckHelper.ts
│ │ │ │ ├── fetch.test.ts
│ │ │ │ ├── fetch.ts
│ │ │ │ ├── fileDiffUtils.test.ts
│ │ │ │ ├── fileDiffUtils.ts
│ │ │ │ ├── fileUtils.test.ts
│ │ │ │ ├── fileUtils.ts
│ │ │ │ ├── filesearch/
│ │ │ │ │ ├── crawlCache.test.ts
│ │ │ │ │ ├── crawlCache.ts
│ │ │ │ │ ├── crawler.test.ts
│ │ │ │ │ ├── crawler.ts
│ │ │ │ │ ├── fileSearch.test.ts
│ │ │ │ │ ├── fileSearch.ts
│ │ │ │ │ ├── ignore.test.ts
│ │ │ │ │ ├── ignore.ts
│ │ │ │ │ ├── result-cache.test.ts
│ │ │ │ │ └── result-cache.ts
│ │ │ │ ├── flashFallback.test.ts
│ │ │ │ ├── formatters.test.ts
│ │ │ │ ├── formatters.ts
│ │ │ │ ├── fsErrorMessages.test.ts
│ │ │ │ ├── fsErrorMessages.ts
│ │ │ │ ├── generateContentResponseUtilities.test.ts
│ │ │ │ ├── generateContentResponseUtilities.ts
│ │ │ │ ├── getFolderStructure.test.ts
│ │ │ │ ├── getFolderStructure.ts
│ │ │ │ ├── getPty.ts
│ │ │ │ ├── gitIgnoreParser.test.ts
│ │ │ │ ├── gitIgnoreParser.ts
│ │ │ │ ├── gitUtils.ts
│ │ │ │ ├── googleErrors.test.ts
│ │ │ │ ├── googleErrors.ts
│ │ │ │ ├── googleQuotaErrors.test.ts
│ │ │ │ ├── googleQuotaErrors.ts
│ │ │ │ ├── headless.test.ts
│ │ │ │ ├── headless.ts
│ │ │ │ ├── httpErrors.ts
│ │ │ │ ├── ignoreFileParser.test.ts
│ │ │ │ ├── ignoreFileParser.ts
│ │ │ │ ├── ignorePatterns.test.ts
│ │ │ │ ├── ignorePatterns.ts
│ │ │ │ ├── installationManager.test.ts
│ │ │ │ ├── installationManager.ts
│ │ │ │ ├── language-detection.test.ts
│ │ │ │ ├── language-detection.ts
│ │ │ │ ├── llm-edit-fixer.test.ts
│ │ │ │ ├── llm-edit-fixer.ts
│ │ │ │ ├── markdownUtils.test.ts
│ │ │ │ ├── markdownUtils.ts
│ │ │ │ ├── memoryDiscovery.test.ts
│ │ │ │ ├── memoryDiscovery.ts
│ │ │ │ ├── memoryImportProcessor.test.ts
│ │ │ │ ├── memoryImportProcessor.ts
│ │ │ │ ├── messageInspectors.ts
│ │ │ │ ├── nextSpeakerChecker.test.ts
│ │ │ │ ├── nextSpeakerChecker.ts
│ │ │ │ ├── oauth-flow.test.ts
│ │ │ │ ├── oauth-flow.ts
│ │ │ │ ├── package.test.ts
│ │ │ │ ├── package.ts
│ │ │ │ ├── partUtils.test.ts
│ │ │ │ ├── partUtils.ts
│ │ │ │ ├── pathCorrector.test.ts
│ │ │ │ ├── pathCorrector.ts
│ │ │ │ ├── pathReader.test.ts
│ │ │ │ ├── pathReader.ts
│ │ │ │ ├── paths.test.ts
│ │ │ │ ├── paths.ts
│ │ │ │ ├── planUtils.test.ts
│ │ │ │ ├── planUtils.ts
│ │ │ │ ├── process-utils.test.ts
│ │ │ │ ├── process-utils.ts
│ │ │ │ ├── promptIdContext.ts
│ │ │ │ ├── quotaErrorDetection.ts
│ │ │ │ ├── retry.test.ts
│ │ │ │ ├── retry.ts
│ │ │ │ ├── safeJsonStringify.test.ts
│ │ │ │ ├── safeJsonStringify.ts
│ │ │ │ ├── schemaValidator.test.ts
│ │ │ │ ├── schemaValidator.ts
│ │ │ │ ├── secure-browser-launcher.test.ts
│ │ │ │ ├── secure-browser-launcher.ts
│ │ │ │ ├── security.test.ts
│ │ │ │ ├── security.ts
│ │ │ │ ├── session.ts
│ │ │ │ ├── sessionUtils.test.ts
│ │ │ │ ├── sessionUtils.ts
│ │ │ │ ├── shell-utils.integration.test.ts
│ │ │ │ ├── shell-utils.test.ts
│ │ │ │ ├── shell-utils.ts
│ │ │ │ ├── stdio.test.ts
│ │ │ │ ├── stdio.ts
│ │ │ │ ├── summarizer.test.ts
│ │ │ │ ├── summarizer.ts
│ │ │ │ ├── surface.ts
│ │ │ │ ├── systemEncoding.test.ts
│ │ │ │ ├── systemEncoding.ts
│ │ │ │ ├── terminal.ts
│ │ │ │ ├── terminalSerializer.test.ts
│ │ │ │ ├── terminalSerializer.ts
│ │ │ │ ├── testUtils.ts
│ │ │ │ ├── textUtils.test.ts
│ │ │ │ ├── textUtils.ts
│ │ │ │ ├── thoughtUtils.test.ts
│ │ │ │ ├── thoughtUtils.ts
│ │ │ │ ├── tokenCalculation.test.ts
│ │ │ │ ├── tokenCalculation.ts
│ │ │ │ ├── tool-utils.test.ts
│ │ │ │ ├── tool-utils.ts
│ │ │ │ ├── toolCallContext.test.ts
│ │ │ │ ├── toolCallContext.ts
│ │ │ │ ├── userAccountManager.test.ts
│ │ │ │ ├── userAccountManager.ts
│ │ │ │ ├── version.test.ts
│ │ │ │ ├── version.ts
│ │ │ │ ├── workspaceContext.test.ts
│ │ │ │ └── workspaceContext.ts
│ │ │ └── voice/
│ │ │ ├── responseFormatter.test.ts
│ │ │ └── responseFormatter.ts
│ │ ├── test-setup.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── devtools/
│ │ ├── GEMINI.md
│ │ ├── client/
│ │ │ ├── index.html
│ │ │ └── src/
│ │ │ ├── App.tsx
│ │ │ ├── hooks.ts
│ │ │ └── main.tsx
│ │ ├── esbuild.client.js
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── index.ts
│ │ │ └── types.ts
│ │ ├── tsconfig.build.json
│ │ └── tsconfig.json
│ ├── sdk/
│ │ ├── GEMINI.md
│ │ ├── README.md
│ │ ├── SDK_DESIGN.md
│ │ ├── examples/
│ │ │ ├── session-context.ts
│ │ │ └── simple.ts
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── agent.integration.test.ts
│ │ │ ├── agent.ts
│ │ │ ├── fs.ts
│ │ │ ├── index.ts
│ │ │ ├── session.ts
│ │ │ ├── shell.ts
│ │ │ ├── skills.integration.test.ts
│ │ │ ├── skills.ts
│ │ │ ├── tool.integration.test.ts
│ │ │ ├── tool.test.ts
│ │ │ ├── tool.ts
│ │ │ └── types.ts
│ │ ├── test-data/
│ │ │ ├── agent-async-instructions.json
│ │ │ ├── agent-dynamic-instructions.json
│ │ │ ├── agent-resume-session.json
│ │ │ ├── agent-static-instructions.json
│ │ │ ├── skill-dir-success.json
│ │ │ ├── skill-root-success.json
│ │ │ ├── skills/
│ │ │ │ └── pirate-skill/
│ │ │ │ └── SKILL.md
│ │ │ ├── tool-catchall-error.json
│ │ │ ├── tool-error-recovery.json
│ │ │ └── tool-success.json
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ ├── test-utils/
│ │ ├── GEMINI.md
│ │ ├── index.ts
│ │ ├── package.json
│ │ ├── src/
│ │ │ ├── file-system-test-helpers.ts
│ │ │ ├── index.ts
│ │ │ ├── mock-utils.ts
│ │ │ └── test-rig.ts
│ │ ├── tsconfig.json
│ │ └── vitest.config.ts
│ └── vscode-ide-companion/
│ ├── .vscode/
│ │ ├── launch.json
│ │ └── tasks.json
│ ├── .vscodeignore
│ ├── GEMINI.md
│ ├── LICENSE
│ ├── NOTICES.txt
│ ├── README.md
│ ├── development.md
│ ├── esbuild.js
│ ├── package.json
│ ├── scripts/
│ │ ├── check-vscode-release.js
│ │ └── generate-notices.js
│ ├── src/
│ │ ├── diff-manager.ts
│ │ ├── extension.test.ts
│ │ ├── extension.ts
│ │ ├── ide-server.test.ts
│ │ ├── ide-server.ts
│ │ ├── open-files-manager.test.ts
│ │ ├── open-files-manager.ts
│ │ └── utils/
│ │ └── logger.ts
│ └── tsconfig.json
├── schemas/
│ └── settings.schema.json
├── scripts/
│ ├── aggregate_evals.js
│ ├── batch_triage.sh
│ ├── build.js
│ ├── build_binary.js
│ ├── build_package.js
│ ├── build_sandbox.js
│ ├── build_vscode_companion.js
│ ├── changed_prompt.js
│ ├── check-build-status.js
│ ├── check-lockfile.js
│ ├── clean.js
│ ├── cleanup-branches.ts
│ ├── close_duplicate_issues.js
│ ├── copy_bundle_assets.js
│ ├── copy_files.js
│ ├── create_alias.sh
│ ├── deflake.js
│ ├── entitlements.plist
│ ├── generate-git-commit-info.js
│ ├── generate-keybindings-doc.ts
│ ├── generate-settings-doc.ts
│ ├── generate-settings-schema.ts
│ ├── get-release-version.js
│ ├── lint.js
│ ├── local_telemetry.js
│ ├── pre-commit.js
│ ├── prepare-github-release.js
│ ├── prepare-npm-release.js
│ ├── prepare-package.js
│ ├── relabel_issues.sh
│ ├── releasing/
│ │ ├── create-patch-pr.js
│ │ ├── patch-comment.js
│ │ ├── patch-create-comment.js
│ │ └── patch-trigger.js
│ ├── review.sh
│ ├── sandbox_command.js
│ ├── send_gemini_request.sh
│ ├── start.js
│ ├── sync_project_dry_run.js
│ ├── telemetry.js
│ ├── telemetry_gcp.js
│ ├── telemetry_genkit.js
│ ├── telemetry_utils.js
│ ├── test-windows-paths.js
│ ├── tests/
│ │ ├── autogen.test.ts
│ │ ├── generate-keybindings-doc.test.ts
│ │ ├── generate-settings-doc.test.ts
│ │ ├── generate-settings-schema.test.ts
│ │ ├── get-release-version.test.js
│ │ ├── patch-create-comment.test.js
│ │ ├── telemetry_gcp.test.ts
│ │ ├── test-setup.ts
│ │ └── vitest.config.ts
│ ├── utils/
│ │ └── autogen.ts
│ └── version.js
├── sea/
│ ├── sea-launch.cjs
│ └── sea-launch.test.js
├── third_party/
│ └── get-ripgrep/
│ ├── LICENSE
│ ├── package.json
│ └── src/
│ ├── downloadRipGrep.js
│ └── index.js
└── tsconfig.json
================================================
FILE CONTENTS
================================================
================================================
FILE: .allstar/branch_protection.yaml
================================================
action: 'log'
================================================
FILE: .editorconfig
================================================
root = true
[*]
charset = utf-8
insert_final_newline = true
end_of_line = lf
indent_style = space
indent_size = 2
max_line_length = 80
[Makefile]
indent_style = tab
indent_size = 8
================================================
FILE: .gcp/Dockerfile.gemini-code-builder
================================================
# Use a common base image like Debian.
# Using 'bookworm-slim' for a balance of size and compatibility.
FROM debian:bookworm-slim
# Set environment variables to prevent interactive prompts during installation
ENV DEBIAN_FRONTEND=noninteractive
ENV NODE_VERSION=20.12.2
ENV NODE_VERSION_MAJOR=20
ENV DOCKER_CLI_VERSION=26.1.3
ENV BUILDX_VERSION=v0.14.0
# Install dependencies for adding NodeSource repository, gcloud, and other tools
# - curl: for downloading files
# - gnupg: for managing GPG keys (used by NodeSource & Google Cloud SDK)
# - apt-transport-https: for HTTPS apt repositories
# - ca-certificates: for HTTPS apt repositories
# - rsync: the rsync utility itself
# - git: often useful in build environments
# - python3, python3-pip, python3-venv, python3-crcmod: for gcloud SDK and some of its components
# - lsb-release: for gcloud install script to identify distribution
RUN apt-get update && \
apt-get install -y --no-install-recommends \
curl \
gnupg \
apt-transport-https \
ca-certificates \
rsync \
git \
python3 \
python3-pip \
python3-venv \
python3-crcmod \
lsb-release \
&& rm -rf /var/lib/apt/lists/*
# Install Node.js and npm
# We'll use the official NodeSource repository for a specific version
RUN set -eux; \
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \
# For Node.js 20.x, it's node_20.x
# Let's explicitly define the major version for clarity
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" > /etc/apt/sources.list.d/nodesource.list && \
apt-get update && \
apt-get install -y --no-install-recommends nodejs && \
npm install -g npm@latest && \
# Verify installations
node -v && \
npm -v && \
rm -rf /var/lib/apt/lists/*
# Install Docker CLI
# Download the static binary from Docker's official source
RUN set -eux; \
DOCKER_CLI_ARCH=$(dpkg --print-architecture); \
case "${DOCKER_CLI_ARCH}" in \
amd64) DOCKER_CLI_ARCH_SUFFIX="x86_64" ;; \
arm64) DOCKER_CLI_ARCH_SUFFIX="aarch64" ;; \
*) echo "Unsupported architecture: ${DOCKER_CLI_ARCH}"; exit 1 ;; \
esac; \
curl -fsSL "https://download.docker.com/linux/static/stable/${DOCKER_CLI_ARCH_SUFFIX}/docker-${DOCKER_CLI_VERSION}.tgz" -o docker.tgz && \
tar -xzf docker.tgz --strip-components=1 -C /usr/local/bin docker/docker && \
rm docker.tgz && \
# Verify installation
docker --version
# Install Docker Buildx plugin
RUN set -eux; \
BUILDX_ARCH_DEB=$(dpkg --print-architecture); \
case "${BUILDX_ARCH_DEB}" in \
amd64) BUILDX_ARCH_SUFFIX="amd64" ;; \
arm64) BUILDX_ARCH_SUFFIX="arm64" ;; \
*) echo "Unsupported architecture for Buildx: ${BUILDX_ARCH_DEB}"; exit 1 ;; \
esac; \
mkdir -p /usr/local/lib/docker/cli-plugins && \
curl -fsSL "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.linux-${BUILDX_ARCH_SUFFIX}" -o /usr/local/lib/docker/cli-plugins/docker-buildx && \
chmod +x /usr/local/lib/docker/cli-plugins/docker-buildx && \
# verify installation
docker buildx version
# Install Google Cloud SDK (gcloud CLI)
RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg && apt-get update -y && apt-get install google-cloud-cli -y
# Set a working directory (optional, but good practice)
WORKDIR /workspace
# You can add a CMD or ENTRYPOINT if you intend to run this image directly,
# but for Cloud Build, it's usually not necessary as Cloud Build steps override it.
# For example:
ENTRYPOINT '/bin/bash'
================================================
FILE: .gcp/release-docker.yml
================================================
steps:
# Step 1: Install root dependencies (includes workspaces)
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Install Dependencies'
entrypoint: 'npm'
args: ['install']
# Step 2: Authenticate for Docker (so we can push images to the artifact registry)
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Authenticate docker'
entrypoint: 'npm'
args: ['run', 'auth']
# Step 3: Build workspace packages
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Build packages'
entrypoint: 'npm'
args: ['run', 'build:packages']
# Step 4: Determine Docker Image Tag
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Determine Docker Image Tag'
entrypoint: 'bash'
args:
- '-c'
- |-
SHELL_TAG_NAME="$TAG_NAME"
FINAL_TAG="$SHORT_SHA" # Default to SHA
if [[ "$$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "Release detected."
FINAL_TAG="$${SHELL_TAG_NAME#v}"
else
echo "Development release detected. Using commit SHA as tag."
fi
echo "Determined image tag: $$FINAL_TAG"
echo "$$FINAL_TAG" > /workspace/image_tag.txt
# Step 5: Build sandbox container image
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Build sandbox Docker image'
entrypoint: 'bash'
args:
- '-c'
- |-
export GEMINI_SANDBOX_IMAGE_TAG=$$(cat /workspace/image_tag.txt)
echo "Using Docker image tag for build: $$GEMINI_SANDBOX_IMAGE_TAG"
npm run build:sandbox -- --output-file /workspace/final_image_uri.txt
env:
- 'GEMINI_SANDBOX=$_CONTAINER_TOOL'
# Step 8: Publish sandbox container image
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
id: 'Publish sandbox Docker image'
entrypoint: 'bash'
args:
- '-c'
- |-
set -e
FINAL_IMAGE_URI=$$(cat /workspace/final_image_uri.txt)
echo "Pushing sandbox image: $${FINAL_IMAGE_URI}"
$_CONTAINER_TOOL push "$${FINAL_IMAGE_URI}"
env:
- 'GEMINI_SANDBOX=$_CONTAINER_TOOL'
options:
defaultLogsBucketBehavior: 'REGIONAL_USER_OWNED_BUCKET'
dynamicSubstitutions: true
substitutions:
_CONTAINER_TOOL: 'docker'
================================================
FILE: .gemini/config.yaml
================================================
# Config for the Gemini Pull Request Review Bot.
# https://github.com/marketplace/gemini-code-assist
have_fun: false
code_review:
disable: false
comment_severity_threshold: 'HIGH'
max_review_comments: -1
pull_request_opened:
help: false
summary: true
code_review: true
include_drafts: false
ignore_patterns: []
================================================
FILE: .gemini/settings.json
================================================
{
"experimental": {
"plan": true,
"extensionReloading": true,
"modelSteering": true,
"memoryManager": true
},
"general": {
"devtools": true
}
}
================================================
FILE: .geminiignore
================================================
packages/core/src/services/scripts/*.exe
================================================
FILE: .gitattributes
================================================
# Set the default behavior for all files to automatically handle line endings.
# This will ensure that all text files are normalized to use LF (line feed)
# line endings in the repository, which helps prevent cross-platform issues.
* text=auto eol=lf
# Explicitly declare files that must have LF line endings for proper execution
# on Unix-like systems.
*.sh eol=lf
*.bash eol=lf
Makefile eol=lf
# Explicitly declare binary file types to prevent Git from attempting to
# normalize their line endings.
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.ico binary
*.pdf binary
*.woff binary
*.woff2 binary
*.eot binary
*.ttf binary
*.otf binary
================================================
FILE: .github/CODEOWNERS
================================================
# By default, require reviews from the maintainers for all files.
* @google-gemini/gemini-cli-maintainers
# Require reviews from the release approvers for critical files.
# These patterns override the rule above.
/package.json @google-gemini/gemini-cli-askmode-approvers
/package-lock.json @google-gemini/gemini-cli-askmode-approvers
/GEMINI.md @google-gemini/gemini-cli-askmode-approvers
/SECURITY.md @google-gemini/gemini-cli-askmode-approvers
/LICENSE @google-gemini/gemini-cli-askmode-approvers
/.github/workflows/ @google-gemini/gemini-cli-askmode-approvers
/packages/cli/package.json @google-gemini/gemini-cli-askmode-approvers
/packages/core/package.json @google-gemini/gemini-cli-askmode-approvers
# Docs have a dedicated approver group in addition to maintainers
/docs/ @google-gemini/gemini-cli-maintainers @google-gemini/gemini-cli-docs
/README.md @google-gemini/gemini-cli-maintainers @google-gemini/gemini-cli-docs
# Prompt contents, tool definitions, and evals require reviews from prompt approvers
/packages/core/src/prompts/ @google-gemini/gemini-cli-prompt-approvers
/packages/core/src/tools/ @google-gemini/gemini-cli-prompt-approvers
/evals/ @google-gemini/gemini-cli-prompt-approvers
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.yml
================================================
name: 'Bug Report'
description: 'Report a bug to help us improve Gemini CLI'
body:
- type: 'markdown'
attributes:
value: |-
> [!IMPORTANT]
> Thanks for taking the time to fill out this bug report!
>
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if an issue already exists for the bug you encountered.
- type: 'textarea'
id: 'problem'
attributes:
label: 'What happened?'
description: 'A clear and concise description of what the bug is.'
validations:
required: true
- type: 'textarea'
id: 'expected'
attributes:
label: 'What did you expect to happen?'
validations:
required: true
- type: 'textarea'
id: 'info'
attributes:
label: 'Client information'
description: 'Please paste the full text from the `/about` command run from Gemini CLI. Also include which platform (macOS, Windows, Linux). Note that this output contains your email address. Consider removing it before submitting.'
value: |-
Client Information
Run `gemini` to enter the interactive CLI, then run the `/about` command.
```console
> /about
# paste output here
```
validations:
required: true
- type: 'textarea'
id: 'login-info'
attributes:
label: 'Login information'
description: 'Describe how you are logging in (e.g., Google Account, API key).'
- type: 'textarea'
id: 'additional-context'
attributes:
label: 'Anything else we need to know?'
description: 'Add any other context about the problem here.'
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.yml
================================================
name: 'Feature Request'
description: 'Suggest an idea for this project'
labels:
- 'status/need-triage'
type: 'Feature'
body:
- type: 'markdown'
attributes:
value: |-
> [!IMPORTANT]
> Thanks for taking the time to suggest an enhancement!
>
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if a similar feature has already been requested.
- type: 'textarea'
id: 'feature'
attributes:
label: 'What would you like to be added?'
description: 'A clear and concise description of the enhancement.'
validations:
required: true
- type: 'textarea'
id: 'rationale'
attributes:
label: 'Why is this needed?'
description: 'A clear and concise description of why this enhancement is needed.'
validations:
required: true
- type: 'textarea'
id: 'additional-context'
attributes:
label: 'Additional context'
description: 'Add any other context or screenshots about the feature request here.'
================================================
FILE: .github/ISSUE_TEMPLATE/website_issue.yml
================================================
name: 'Website issue'
description: 'Report an issue with the Gemini CLI Website and Gemini CLI Extensions Gallery'
title: 'GeminiCLI.com Feedback: [ISSUE]'
labels:
- 'area/extensions'
- 'area/documentation'
body:
- type: 'markdown'
attributes:
value: |-
> [!IMPORTANT]
> Thanks for taking the time to report an issue with the Gemini CLI Website
>
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues?q=is%3Aissue+is%3Aopen+label%3Aarea%2Fwebsite)** to see if a similar feature has already been requested.
- type: 'input'
id: 'url'
attributes:
label: 'URL of the page with the issue'
description: 'Please provide the URL where the issue occurs.'
validations:
required: true
- type: 'textarea'
id: 'problem'
attributes:
label: 'What is the problem?'
description: 'A clear and concise description of what the bug or issue is.'
validations:
required: true
- type: 'textarea'
id: 'expected'
attributes:
label: 'What did you expect to happen?'
validations:
required: true
- type: 'textarea'
id: 'additional-context'
attributes:
label: 'Additional context'
description: 'Add any other context or screenshots about the issue here.'
================================================
FILE: .github/actions/calculate-vars/action.yml
================================================
name: 'Calculate vars'
description: 'Calculate commonly used var in our release process'
inputs:
dry_run:
description: 'Whether or not this is a dry run'
type: 'boolean'
outputs:
is_dry_run:
description: 'Boolean flag indicating if the current run is a dry-run or a production release.'
value: '${{ steps.set_vars.outputs.is_dry_run }}'
runs:
using: 'composite'
steps:
- name: 'Print inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Set vars for simplified logic'
id: 'set_vars'
shell: 'bash'
env:
DRY_RUN_INPUT: '${{ inputs.dry_run }}'
run: |-
is_dry_run="true"
if [[ "${DRY_RUN_INPUT}" == "" || "${DRY_RUN_INPUT}" == "false" ]]; then
is_dry_run="false"
fi
echo "is_dry_run=${is_dry_run}" >> "${GITHUB_OUTPUT}"
================================================
FILE: .github/actions/create-pull-request/action.yml
================================================
name: 'Create Pull Request'
description: 'Creates a pull request.'
inputs:
branch-name:
description: 'The name of the branch to create the PR from.'
required: true
pr-title:
description: 'The title of the pull request.'
required: true
pr-body:
description: 'The body of the pull request.'
required: true
base-branch:
description: 'The branch to merge into.'
required: true
default: 'main'
github-token:
description: 'The GitHub token to use for creating the pull request.'
required: true
dry-run:
description: 'Whether to run in dry-run mode.'
required: false
default: 'false'
working-directory:
description: 'The working directory to run the commands in.'
required: false
default: '.'
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Creates a Pull Request'
if: "inputs.dry-run != 'true'"
env:
GH_TOKEN: '${{ inputs.github-token }}'
INPUTS_BRANCH_NAME: '${{ inputs.branch-name }}'
INPUTS_PR_TITLE: '${{ inputs.pr-title }}'
INPUTS_PR_BODY: '${{ inputs.pr-body }}'
INPUTS_BASE_BRANCH: '${{ inputs.base-branch }}'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |
set -e
if ! git ls-remote --exit-code --heads origin "${INPUTS_BRANCH_NAME}"; then
echo "::error::Branch '${INPUTS_BRANCH_NAME}' does not exist on the remote repository."
exit 1
fi
PR_URL=$(gh pr create \
--title "${INPUTS_PR_TITLE}" \
--body "${INPUTS_PR_BODY}" \
--base "${INPUTS_BASE_BRANCH}" \
--head "${INPUTS_BRANCH_NAME}" \
--fill)
gh pr merge "$PR_URL" --auto
================================================
FILE: .github/actions/npm-auth-token/action.yml
================================================
name: 'NPM Auth Token'
description: 'Generates an NPM auth token for publishing a specific package'
inputs:
package-name:
description: 'The name of the package to publish'
required: true
github-token:
description: 'the github token'
required: true
wombat-token-core:
description: 'The npm token for the cli-core package.'
required: true
wombat-token-cli:
description: 'The npm token for the cli package.'
required: true
wombat-token-a2a-server:
description: 'The npm token for the a2a package.'
required: true
outputs:
auth-token:
description: 'The generated NPM auth token'
value: '${{ steps.npm_auth_token.outputs.auth-token }}'
runs:
using: 'composite'
steps:
- name: 'Generate NPM Auth Token'
id: 'npm_auth_token'
shell: 'bash'
run: |
AUTH_TOKEN="${INPUTS_GITHUB_TOKEN}"
PACKAGE_NAME="${INPUTS_PACKAGE_NAME}"
PRIVATE_REPO="@google-gemini/"
if [[ "$PACKAGE_NAME" == "$PRIVATE_REPO"* ]]; then
AUTH_TOKEN="${INPUTS_GITHUB_TOKEN}"
elif [[ "$PACKAGE_NAME" == "@google/gemini-cli" ]]; then
AUTH_TOKEN="${INPUTS_WOMBAT_TOKEN_CLI}"
elif [[ "$PACKAGE_NAME" == "@google/gemini-cli-core" ]]; then
AUTH_TOKEN="${INPUTS_WOMBAT_TOKEN_CORE}"
elif [[ "$PACKAGE_NAME" == "@google/gemini-cli-a2a-server" ]]; then
AUTH_TOKEN="${INPUTS_WOMBAT_TOKEN_A2A_SERVER}"
fi
echo "auth-token=$AUTH_TOKEN" >> $GITHUB_OUTPUT
env:
INPUTS_GITHUB_TOKEN: '${{ inputs.github-token }}'
INPUTS_PACKAGE_NAME: '${{ inputs.package-name }}'
INPUTS_WOMBAT_TOKEN_CLI: '${{ inputs.wombat-token-cli }}'
INPUTS_WOMBAT_TOKEN_CORE: '${{ inputs.wombat-token-core }}'
INPUTS_WOMBAT_TOKEN_A2A_SERVER: '${{ inputs.wombat-token-a2a-server }}'
================================================
FILE: .github/actions/post-coverage-comment/action.yml
================================================
name: 'Post Coverage Comment Action'
description: 'Prepares and posts a code coverage comment to a PR.'
inputs:
cli_json_file:
description: 'Path to CLI coverage-summary.json'
required: true
core_json_file:
description: 'Path to Core coverage-summary.json'
required: true
cli_full_text_summary_file:
description: 'Path to CLI full-text-summary.txt'
required: true
core_full_text_summary_file:
description: 'Path to Core full-text-summary.txt'
required: true
node_version:
description: 'Node.js version for context in messages'
required: true
os:
description: 'The os for context in messages'
required: true
github_token:
description: 'GitHub token for posting comments'
required: true
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Prepare Coverage Comment'
id: 'prep_coverage_comment'
shell: 'bash'
env:
CLI_JSON_FILE: '${{ inputs.cli_json_file }}'
CORE_JSON_FILE: '${{ inputs.core_json_file }}'
CLI_FULL_TEXT_SUMMARY_FILE: '${{ inputs.cli_full_text_summary_file }}'
CORE_FULL_TEXT_SUMMARY_FILE: '${{ inputs.core_full_text_summary_file }}'
COMMENT_FILE: 'coverage-comment.md'
NODE_VERSION: '${{ inputs.node_version }}'
OS: '${{ inputs.os }}'
run: |-
# Extract percentages using jq for the main table
if [ -f "${CLI_JSON_FILE}" ]; then
cli_lines_pct="$(jq -r '.total.lines.pct' "${CLI_JSON_FILE}")"
cli_statements_pct="$(jq -r '.total.statements.pct' "${CLI_JSON_FILE}")"
cli_functions_pct="$(jq -r '.total.functions.pct' "${CLI_JSON_FILE}")"
cli_branches_pct="$(jq -r '.total.branches.pct' "${CLI_JSON_FILE}")"
else
cli_lines_pct="N/A"
cli_statements_pct="N/A"
cli_functions_pct="N/A"
cli_branches_pct="N/A"
echo "CLI coverage-summary.json not found at: ${CLI_JSON_FILE}" >&2 # Error to stderr
fi
if [ -f "${CORE_JSON_FILE}" ]; then
core_lines_pct="$(jq -r '.total.lines.pct' "${CORE_JSON_FILE}")"
core_statements_pct="$(jq -r '.total.statements.pct' "${CORE_JSON_FILE}")"
core_functions_pct="$(jq -r '.total.functions.pct' "${CORE_JSON_FILE}")"
core_branches_pct="$(jq -r '.total.branches.pct' "${CORE_JSON_FILE}")"
else
core_lines_pct="N/A"
core_statements_pct="N/A"
core_functions_pct="N/A"
core_branches_pct="N/A"
echo "Core coverage-summary.json not found at: ${CORE_JSON_FILE}" >&2 # Error to stderr
fi
echo "## Code Coverage Summary" > "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
echo "| Package | Lines | Statements | Functions | Branches |" >> "${COMMENT_FILE}"
echo "|---|---|---|---|---|" >> "${COMMENT_FILE}"
echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "${COMMENT_FILE}"
echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
# CLI Package - Collapsible Section (with full text summary from file)
echo "" >> "${COMMENT_FILE}"
echo "CLI Package - Full Text Report" >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
echo '```text' >> "${COMMENT_FILE}"
if [ -f "${CLI_FULL_TEXT_SUMMARY_FILE}" ]; then
cat "${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}"
else
echo "CLI full-text-summary.txt not found at: ${CLI_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}"
fi
echo '```' >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
# Core Package - Collapsible Section (with full text summary from file)
echo "" >> "${COMMENT_FILE}"
echo "Core Package - Full Text Report" >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
echo '```text' >> "${COMMENT_FILE}"
if [ -f "${CORE_FULL_TEXT_SUMMARY_FILE}" ]; then
cat "${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}"
else
echo "Core full-text-summary.txt not found at: ${CORE_FULL_TEXT_SUMMARY_FILE}" >> "${COMMENT_FILE}"
fi
echo '```' >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
echo "" >> "${COMMENT_FILE}"
echo "_For detailed HTML reports, please see the 'coverage-reports-${NODE_VERSION}-${OS}' artifact from the main CI run._" >> "${COMMENT_FILE}"
- name: 'Post Coverage Comment'
uses: 'thollander/actions-comment-pull-request@65f9e5c9a1f2cd378bd74b2e057c9736982a8e74' # ratchet:thollander/actions-comment-pull-request@v3
if: |-
${{ always() }}
with:
file-path: 'coverage-comment.md' # Use the generated file directly
comment-tag: 'code-coverage-summary'
github-token: '${{ inputs.github_token }}'
================================================
FILE: .github/actions/publish-release/action.yml
================================================
name: 'Publish Release'
description: 'Builds, prepares, and publishes the gemini-cli packages to npm and creates a GitHub release.'
inputs:
release-version:
description: 'The version to release (e.g., 0.1.11).'
required: true
npm-tag:
description: 'The npm tag to publish with (e.g., latest, preview, nightly).'
required: true
wombat-token-core:
description: 'The npm token for the cli-core package.'
required: true
wombat-token-cli:
description: 'The npm token for the cli package.'
required: true
wombat-token-a2a-server:
description: 'The npm token for the a2a package.'
required: true
github-token:
description: 'The GitHub token for creating the release.'
required: true
github-release-token:
description: 'The GitHub token used specifically for creating the GitHub release (to trigger other workflows).'
required: false
dry-run:
description: 'Whether to run in dry-run mode.'
type: 'string'
required: true
release-tag:
description: 'The release tag for the release (e.g., v0.1.11).'
required: true
previous-tag:
description: 'The previous tag to use for generating release notes.'
required: true
skip-github-release:
description: 'Whether to skip creating a GitHub release.'
type: 'boolean'
required: false
default: false
working-directory:
description: 'The working directory to run the steps in.'
required: false
default: '.'
force-skip-tests:
description: 'Skip tests and validation'
required: false
default: false
skip-branch-cleanup:
description: 'Whether to skip cleaning up the release branch.'
type: 'boolean'
required: false
default: false
gemini_api_key:
description: 'The API key for running integration tests.'
required: true
npm-registry-publish-url:
description: 'npm registry publish url'
required: true
npm-registry-url:
description: 'npm registry url'
required: true
npm-registry-scope:
description: 'npm registry scope'
required: true
cli-package-name:
description: 'The name of the cli package.'
required: true
core-package-name:
description: 'The name of the core package.'
required: true
a2a-package-name:
description: 'The name of the a2a package.'
required: true
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: '👤 Configure Git User'
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
run: |
git config user.name "gemini-cli-robot"
git config user.email "gemini-cli-robot@google.com"
- name: '🌿 Create and switch to a release branch'
working-directory: '${{ inputs.working-directory }}'
id: 'release_branch'
shell: 'bash'
run: |
BRANCH_NAME="release/${INPUTS_RELEASE_TAG}"
git switch -c "${BRANCH_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
env:
INPUTS_RELEASE_TAG: '${{ inputs.release-tag }}'
- name: '⬆️ Update package versions'
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
run: |
npm run release:version "${INPUTS_RELEASE_VERSION}"
env:
INPUTS_RELEASE_VERSION: '${{ inputs.release-version }}'
- name: '💾 Commit and Conditionally Push package versions'
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
env:
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
DRY_RUN: '${{ inputs.dry-run }}'
RELEASE_TAG: '${{ inputs.release-tag }}'
run: |-
set -e
git add package.json package-lock.json packages/*/package.json
git commit -m "chore(release): ${RELEASE_TAG}"
if [[ "${DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}" --follow-tags
else
echo "Dry run enabled. Skipping push."
fi
- name: '🛠️ Build and Prepare Packages'
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
run: |
npm run build:packages
npm run prepare:package
- name: '🎁 Bundle'
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
run: |
npm run bundle
# TODO: Refactor this github specific publishing script to be generalized based upon inputs.
- name: '📦 Prepare for GitHub release'
if: "inputs.npm-registry-url == 'https://npm.pkg.github.com/'"
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
run: |
node ${{ github.workspace }}/scripts/prepare-github-release.js
- name: 'Configure npm for publishing to npm'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '${{ inputs.working-directory }}/.nvmrc'
registry-url: '${{inputs.npm-registry-publish-url}}'
scope: '${{inputs.npm-registry-scope}}'
- name: 'Get core Token'
uses: './.github/actions/npm-auth-token'
id: 'core-token'
with:
package-name: '${{ inputs.core-package-name }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
- name: '📦 Publish CORE to NPM'
working-directory: '${{ inputs.working-directory }}'
env:
NODE_AUTH_TOKEN: '${{ steps.core-token.outputs.auth-token }}'
INPUTS_DRY_RUN: '${{ inputs.dry-run }}'
INPUTS_CORE_PACKAGE_NAME: '${{ inputs.core-package-name }}'
shell: 'bash'
run: |
npm publish \
--dry-run="${INPUTS_DRY_RUN}" \
--workspace="${INPUTS_CORE_PACKAGE_NAME}" \
--no-tag
npm dist-tag rm ${INPUTS_CORE_PACKAGE_NAME} false --silent
- name: '🔗 Install latest core package'
working-directory: '${{ inputs.working-directory }}'
if: "${{ inputs.dry-run != 'true' }}"
shell: 'bash'
run: |
npm install "${INPUTS_CORE_PACKAGE_NAME}@${INPUTS_RELEASE_VERSION}" \
--workspace="${INPUTS_CLI_PACKAGE_NAME}" \
--workspace="${INPUTS_A2A_PACKAGE_NAME}" \
--save-exact
env:
INPUTS_CORE_PACKAGE_NAME: '${{ inputs.core-package-name }}'
INPUTS_RELEASE_VERSION: '${{ inputs.release-version }}'
INPUTS_CLI_PACKAGE_NAME: '${{ inputs.cli-package-name }}'
INPUTS_A2A_PACKAGE_NAME: '${{ inputs.a2a-package-name }}'
- name: '📦 Prepare bundled CLI for npm release'
if: "inputs.npm-registry-url != 'https://npm.pkg.github.com/' && inputs.npm-tag != 'latest'"
working-directory: '${{ inputs.working-directory }}'
shell: 'bash'
run: |
node ${{ github.workspace }}/scripts/prepare-npm-release.js
- name: 'Get CLI Token'
uses: './.github/actions/npm-auth-token'
id: 'cli-token'
with:
package-name: '${{ inputs.cli-package-name }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
- name: '📦 Publish CLI'
working-directory: '${{ inputs.working-directory }}'
env:
NODE_AUTH_TOKEN: '${{ steps.cli-token.outputs.auth-token }}'
INPUTS_DRY_RUN: '${{ inputs.dry-run }}'
INPUTS_CLI_PACKAGE_NAME: '${{ inputs.cli-package-name }}'
shell: 'bash'
run: |
npm publish \
--dry-run="${INPUTS_DRY_RUN}" \
--workspace="${INPUTS_CLI_PACKAGE_NAME}" \
--no-tag
npm dist-tag rm ${INPUTS_CLI_PACKAGE_NAME} false --silent
- name: 'Get a2a-server Token'
uses: './.github/actions/npm-auth-token'
id: 'a2a-token'
with:
package-name: '${{ inputs.a2a-package-name }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
- name: '📦 Publish a2a'
working-directory: '${{ inputs.working-directory }}'
env:
NODE_AUTH_TOKEN: '${{ steps.a2a-token.outputs.auth-token }}'
INPUTS_DRY_RUN: '${{ inputs.dry-run }}'
INPUTS_A2A_PACKAGE_NAME: '${{ inputs.a2a-package-name }}'
shell: 'bash'
# Tag staging for initial release
run: |
npm publish \
--dry-run="${INPUTS_DRY_RUN}" \
--workspace="${INPUTS_A2A_PACKAGE_NAME}" \
--no-tag
npm dist-tag rm ${INPUTS_A2A_PACKAGE_NAME} false --silent
- name: '🔬 Verify NPM release by version'
uses: './.github/actions/verify-release'
if: "${{ inputs.dry-run != 'true' && inputs.force-skip-tests != 'true' }}"
with:
npm-package: '${{ inputs.cli-package-name }}@${{ inputs.release-version }}'
expected-version: '${{ inputs.release-version }}'
working-directory: '${{ inputs.working-directory }}'
gemini_api_key: '${{ inputs.gemini_api_key }}'
github-token: '${{ inputs.github-token }}'
npm-registry-url: '${{ inputs.npm-registry-url }}'
npm-registry-scope: '${{ inputs.npm-registry-scope }}'
- name: '🏷️ Tag release'
uses: './.github/actions/tag-npm-release'
with:
channel: '${{ inputs.npm-tag }}'
version: '${{ inputs.release-version }}'
dry-run: '${{ inputs.dry-run }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
cli-package-name: '${{ inputs.cli-package-name }}'
core-package-name: '${{ inputs.core-package-name }}'
a2a-package-name: '${{ inputs.a2a-package-name }}'
working-directory: '${{ inputs.working-directory }}'
- name: '🎉 Create GitHub Release'
working-directory: '${{ inputs.working-directory }}'
if: "${{ inputs.dry-run != 'true' && inputs.skip-github-release != 'true' && inputs.npm-tag != 'dev' && inputs.npm-registry-url != 'https://npm.pkg.github.com/' }}"
env:
GITHUB_TOKEN: '${{ inputs.github-release-token || inputs.github-token }}'
INPUTS_RELEASE_TAG: '${{ inputs.release-tag }}'
STEPS_RELEASE_BRANCH_OUTPUTS_BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
INPUTS_PREVIOUS_TAG: '${{ inputs.previous-tag }}'
shell: 'bash'
run: |
gh release create "${INPUTS_RELEASE_TAG}" \
bundle/gemini.js \
--target "${STEPS_RELEASE_BRANCH_OUTPUTS_BRANCH_NAME}" \
--title "Release ${INPUTS_RELEASE_TAG}" \
--notes-start-tag "${INPUTS_PREVIOUS_TAG}" \
--generate-notes \
${{ inputs.npm-tag != 'latest' && '--prerelease' || '' }}
- name: '🧹 Clean up release branch'
working-directory: '${{ inputs.working-directory }}'
if: "${{ inputs.dry-run != 'true' && inputs.skip-branch-cleanup != 'true' }}"
continue-on-error: true
shell: 'bash'
run: |
echo "Cleaning up release branch ${STEPS_RELEASE_BRANCH_OUTPUTS_BRANCH_NAME}..."
git push origin --delete "${STEPS_RELEASE_BRANCH_OUTPUTS_BRANCH_NAME}"
env:
STEPS_RELEASE_BRANCH_OUTPUTS_BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
================================================
FILE: .github/actions/push-docker/action.yml
================================================
name: 'Push to docker'
description: 'Builds packages and pushes a docker image to GHCR'
inputs:
github-actor:
description: 'Github actor'
required: true
github-secret:
description: 'Github secret'
required: true
ref-name:
description: 'Github ref name'
required: true
github-sha:
description: 'Github Commit SHA Hash'
required: true
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v4
with:
ref: '${{ inputs.github-sha }}'
fetch-depth: 0
- name: 'Install Dependencies'
shell: 'bash'
run: 'npm install'
- name: 'Set up Docker Buildx'
uses: 'docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435' # ratchet:docker/setup-buildx-action@v3
- name: 'build'
shell: 'bash'
run: 'npm run build'
- name: 'pack @google/gemini-cli'
shell: 'bash'
run: 'npm pack -w @google/gemini-cli --pack-destination ./packages/cli/dist'
- name: 'pack @google/gemini-cli-core'
shell: 'bash'
run: 'npm pack -w @google/gemini-cli-core --pack-destination ./packages/core/dist'
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ inputs.github-actor }}'
password: '${{ inputs.github-secret }}'
- name: 'Get branch name'
id: 'branch_name'
shell: 'bash'
run: |
REF_NAME="${INPUTS_REF_NAME}"
echo "name=${REF_NAME%/merge}" >> $GITHUB_OUTPUT
env:
INPUTS_REF_NAME: '${{ inputs.ref-name }}'
- name: 'Build and Push the Docker Image'
uses: 'docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83' # ratchet:docker/build-push-action@v6
with:
context: '.'
file: './Dockerfile'
push: true
provenance: false # avoid pushing 3 images to Aritfact Registry
tags: |
ghcr.io/${{ github.repository }}/cli:${{ steps.branch_name.outputs.name }}
ghcr.io/${{ github.repository }}/cli:${{ inputs.github-sha }}
- name: 'Create issue on failure'
if: |-
${{ failure() }}
shell: 'bash'
env:
GITHUB_TOKEN: '${{ inputs.github-secret }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |-
gh issue create \
--title "Docker build failed" \
--body "The docker build failed. See the full run for details: ${DETAILS_URL}" \
--label "release-failure"
================================================
FILE: .github/actions/push-sandbox/action.yml
================================================
name: 'Build and push sandbox docker'
description: 'Pushes sandbox docker image to container registry'
inputs:
github-actor:
description: 'Github actor'
required: true
github-secret:
description: 'Github secret'
required: true
dockerhub-username:
description: 'Dockerhub username'
required: true
dockerhub-token:
description: 'Dockerhub PAT w/ R+W'
required: true
github-sha:
description: 'Github Commit SHA Hash'
required: true
github-ref-name:
description: 'Github ref name'
required: true
dry-run:
description: 'Whether this is a dry run.'
required: true
type: 'boolean'
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Checkout'
uses: 'actions/checkout@v4'
with:
ref: '${{ inputs.github-sha }}'
fetch-depth: 0
- name: 'Install Dependencies'
shell: 'bash'
run: 'npm install'
- name: 'npm build'
shell: 'bash'
run: 'npm run build'
- name: 'Set up QEMU'
uses: 'docker/setup-qemu-action@v3'
- name: 'Set up Docker Buildx'
uses: 'docker/setup-buildx-action@v3'
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@v3'
with:
registry: 'docker.io'
username: '${{ inputs.dockerhub-username }}'
password: '${{ inputs.dockerhub-token }}'
- name: 'determine image tag'
id: 'image_tag'
shell: 'bash'
run: |-
SHELL_TAG_NAME="${INPUTS_GITHUB_REF_NAME}"
FINAL_TAG="${INPUTS_GITHUB_SHA}"
if [[ "$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "Release detected."
FINAL_TAG="${SHELL_TAG_NAME#v}"
else
echo "Development release detected. Using commit SHA as tag."
fi
echo "Determined image tag: $FINAL_TAG"
echo "FINAL_TAG=$FINAL_TAG" >> $GITHUB_OUTPUT
env:
INPUTS_GITHUB_REF_NAME: '${{ inputs.github-ref-name }}'
INPUTS_GITHUB_SHA: '${{ inputs.github-sha }}'
# We build amd64 just so we can verify it.
# We build and push both amd64 and arm64 in the publish step.
- name: 'build'
id: 'docker_build'
shell: 'bash'
env:
GEMINI_SANDBOX_IMAGE_TAG: '${{ steps.image_tag.outputs.FINAL_TAG }}'
GEMINI_SANDBOX: 'docker'
BUILD_SANDBOX_FLAGS: '--platform linux/amd64 --load'
STEPS_IMAGE_TAG_OUTPUTS_FINAL_TAG: '${{ steps.image_tag.outputs.FINAL_TAG }}'
run: |-
npm run build:sandbox -- \
--image "google/gemini-cli-sandbox:${STEPS_IMAGE_TAG_OUTPUTS_FINAL_TAG}" \
--output-file final_image_uri.txt
echo "uri=$(cat final_image_uri.txt)" >> $GITHUB_OUTPUT
- name: 'verify'
shell: 'bash'
run: |-
docker run --rm --entrypoint sh "${{ steps.docker_build.outputs.uri }}" -lc '
set -e
node -e "const fs=require(\"node:fs\"); JSON.parse(fs.readFileSync(\"/usr/local/share/npm-global/lib/node_modules/@google/gemini-cli/package.json\",\"utf8\")); JSON.parse(fs.readFileSync(\"/usr/local/share/npm-global/lib/node_modules/@google/gemini-cli-core/package.json\",\"utf8\"));"
/usr/local/share/npm-global/bin/gemini --version >/dev/null
'
- name: 'publish'
shell: 'bash'
if: "${{ inputs.dry-run != 'true' }}"
env:
GEMINI_SANDBOX_IMAGE_TAG: '${{ steps.image_tag.outputs.FINAL_TAG }}'
GEMINI_SANDBOX: 'docker'
BUILD_SANDBOX_FLAGS: '--platform linux/amd64,linux/arm64 --push'
STEPS_IMAGE_TAG_OUTPUTS_FINAL_TAG: '${{ steps.image_tag.outputs.FINAL_TAG }}'
run: |-
npm run build:sandbox -- \
--image "google/gemini-cli-sandbox:${STEPS_IMAGE_TAG_OUTPUTS_FINAL_TAG}"
- name: 'Create issue on failure'
if: |-
${{ failure() }}
shell: 'bash'
env:
GITHUB_TOKEN: '${{ inputs.github-secret }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |-
gh issue create \
--title "Docker build failed" \
--body "The docker build failed. See the full run for details: ${DETAILS_URL}" \
--label "release-failure"
================================================
FILE: .github/actions/run-tests/action.yml
================================================
name: 'Run Tests'
description: 'Runs the preflight checks and integration tests.'
inputs:
gemini_api_key:
description: 'The API key for running integration tests.'
required: true
working-directory:
description: 'The working directory to run the tests in.'
required: false
default: '.'
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Run Tests'
env:
GEMINI_API_KEY: '${{ inputs.gemini_api_key }}'
working-directory: '${{ inputs.working-directory }}'
run: |-
echo "::group::Build"
npm run build
echo "::endgroup::"
echo "::group::Unit Tests"
npm run test:ci
echo "::endgroup::"
echo "::group::Integration Tests (no sandbox)"
npm run test:integration:sandbox:none
echo "::endgroup::"
echo "::group::Integration Tests (docker sandbox)"
npm run test:integration:sandbox:docker
echo "::endgroup::"
shell: 'bash'
================================================
FILE: .github/actions/setup-npmrc/action.yml
================================================
name: 'Setup NPMRC'
description: 'Sets up NPMRC with all the correct repos for readonly access.'
inputs:
github-token:
description: 'the github token'
required: true
outputs:
auth-token:
description: 'The generated NPM auth token'
value: '${{ steps.npm_auth_token.outputs.auth-token }}'
runs:
using: 'composite'
steps:
- name: 'Configure .npmrc'
shell: 'bash'
run: |-
echo ""@google-gemini:registry=https://npm.pkg.github.com"" > ~/.npmrc
echo ""//npm.pkg.github.com/:_authToken=${INPUTS_GITHUB_TOKEN}"" >> ~/.npmrc
echo ""@google:registry=https://wombat-dressing-room.appspot.com"" >> ~/.npmrc
env:
INPUTS_GITHUB_TOKEN: '${{ inputs.github-token }}'
================================================
FILE: .github/actions/tag-npm-release/action.yml
================================================
name: 'Tag an NPM release'
description: 'Tags a specific npm version to a specific channel.'
inputs:
channel:
description: 'NPM Channel tag'
required: true
version:
description: 'version'
required: true
dry-run:
description: 'Whether to run in dry-run mode.'
required: true
github-token:
description: 'The GitHub token for creating the release.'
required: true
wombat-token-core:
description: 'The npm token for the wombat @google/gemini-cli-core'
required: true
wombat-token-cli:
description: 'The npm token for wombat @google/gemini-cli'
required: true
wombat-token-a2a-server:
description: 'The npm token for the @google/gemini-cli-a2a-server package.'
required: true
cli-package-name:
description: 'The name of the cli package.'
required: true
core-package-name:
description: 'The name of the core package.'
required: true
a2a-package-name:
description: 'The name of the a2a package.'
required: true
working-directory:
description: 'The working directory to run the commands in.'
required: false
default: '.'
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '${{ inputs.working-directory }}/.nvmrc'
- name: 'configure .npmrc'
uses: './.github/actions/setup-npmrc'
with:
github-token: '${{ inputs.github-token }}'
- name: 'Get core Token'
uses: './.github/actions/npm-auth-token'
id: 'core-token'
with:
package-name: '${{ inputs.core-package-name }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
- name: 'Change tag for CORE'
if: |-
${{ inputs.dry-run != 'true' }}
env:
NODE_AUTH_TOKEN: '${{ steps.core-token.outputs.auth-token }}'
INPUTS_CORE_PACKAGE_NAME: '${{ inputs.core-package-name }}'
INPUTS_VERSION: '${{ inputs.version }}'
INPUTS_CHANNEL: '${{ inputs.channel }}'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |
npm dist-tag add ${INPUTS_CORE_PACKAGE_NAME}@${INPUTS_VERSION} ${INPUTS_CHANNEL}
- name: 'Get cli Token'
uses: './.github/actions/npm-auth-token'
id: 'cli-token'
with:
package-name: '${{ inputs.cli-package-name }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
- name: 'Change tag for CLI'
if: |-
${{ inputs.dry-run != 'true' }}
env:
NODE_AUTH_TOKEN: '${{ steps.cli-token.outputs.auth-token }}'
INPUTS_CLI_PACKAGE_NAME: '${{ inputs.cli-package-name }}'
INPUTS_VERSION: '${{ inputs.version }}'
INPUTS_CHANNEL: '${{ inputs.channel }}'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |
npm dist-tag add ${INPUTS_CLI_PACKAGE_NAME}@${INPUTS_VERSION} ${INPUTS_CHANNEL}
- name: 'Get a2a Token'
uses: './.github/actions/npm-auth-token'
id: 'a2a-token'
with:
package-name: '${{ inputs.a2a-package-name }}'
github-token: '${{ inputs.github-token }}'
wombat-token-core: '${{ inputs.wombat-token-core }}'
wombat-token-cli: '${{ inputs.wombat-token-cli }}'
wombat-token-a2a-server: '${{ inputs.wombat-token-a2a-server }}'
- name: 'Change tag for a2a'
if: |-
${{ inputs.dry-run == 'false' }}
env:
NODE_AUTH_TOKEN: '${{ steps.a2a-token.outputs.auth-token }}'
INPUTS_A2A_PACKAGE_NAME: '${{ inputs.a2a-package-name }}'
INPUTS_VERSION: '${{ inputs.version }}'
INPUTS_CHANNEL: '${{ inputs.channel }}'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |
npm dist-tag add ${INPUTS_A2A_PACKAGE_NAME}@${INPUTS_VERSION} ${INPUTS_CHANNEL}
- name: 'Log dry run'
if: |-
${{ inputs.dry-run == 'true' }}
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |
echo "Dry run: Would have added tag '${INPUTS_CHANNEL}' to version '${INPUTS_VERSION}' for ${INPUTS_CLI_PACKAGE_NAME}, ${INPUTS_CORE_PACKAGE_NAME}, and ${INPUTS_A2A_PACKAGE_NAME}."
env:
INPUTS_CHANNEL: '${{ inputs.channel }}'
INPUTS_VERSION: '${{ inputs.version }}'
INPUTS_CLI_PACKAGE_NAME: '${{ inputs.cli-package-name }}'
INPUTS_CORE_PACKAGE_NAME: '${{ inputs.core-package-name }}'
INPUTS_A2A_PACKAGE_NAME: '${{ inputs.a2a-package-name }}'
================================================
FILE: .github/actions/verify-release/action.yml
================================================
name: 'Verify an NPM release'
description: 'Fetches a package from NPM and does some basic smoke tests'
inputs:
npm-package:
description: 'NPM Package'
required: true
default: '@google/gemini-cli@latest'
npm-registry-url:
description: 'NPM Registry URL'
required: true
npm-registry-scope:
description: 'NPM Registry Scope'
required: true
expected-version:
description: 'Expected version'
required: true
gemini_api_key:
description: 'The API key for running integration tests.'
required: true
github-token:
description: 'The GitHub token for running integration tests.'
required: true
working-directory:
description: 'The working directory to run the tests in.'
required: false
default: '.'
runs:
using: 'composite'
steps:
- name: '📝 Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'setup node'
uses: 'actions/setup-node@v4'
with:
node-version: '20'
- name: 'configure .npmrc'
uses: './.github/actions/setup-npmrc'
with:
github-token: '${{ inputs.github-token }}'
- name: 'Clear npm cache'
shell: 'bash'
run: 'npm cache clean --force'
- name: 'Install from NPM'
uses: 'nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08' # ratchet:nick-fields/retry@v3
with:
timeout_seconds: 900
retry_wait_seconds: 30
max_attempts: 10
command: |-
cd ${{ inputs.working-directory }}
npm install --prefer-online --no-cache -g "${{ inputs.npm-package }}"
- name: 'Smoke test - NPM Install'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |-
gemini_version=$(gemini --version)
if [ "$gemini_version" != "${INPUTS_EXPECTED_VERSION}" ]; then
echo "❌ NPM Version mismatch: Got $gemini_version from ${INPUTS_NPM_PACKAGE}, expected ${INPUTS_EXPECTED_VERSION}"
exit 1
fi
env:
INPUTS_EXPECTED_VERSION: '${{ inputs.expected-version }}'
INPUTS_NPM_PACKAGE: '${{ inputs.npm-package }}'
- name: 'Clear npm cache'
shell: 'bash'
run: 'npm cache clean --force'
- name: 'Smoke test - NPX Run'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: |-
gemini_version=$(npx --prefer-online "${INPUTS_NPM_PACKAGE}" --version)
if [ "$gemini_version" != "${INPUTS_EXPECTED_VERSION}" ]; then
echo "❌ NPX Run Version mismatch: Got $gemini_version from ${INPUTS_NPM_PACKAGE}, expected ${INPUTS_EXPECTED_VERSION}"
exit 1
fi
env:
INPUTS_NPM_PACKAGE: '${{ inputs.npm-package }}'
INPUTS_EXPECTED_VERSION: '${{ inputs.expected-version }}'
- name: 'Install dependencies for integration tests'
shell: 'bash'
working-directory: '${{ inputs.working-directory }}'
run: 'npm ci'
- name: '🔬 Run integration tests against NPM release'
working-directory: '${{ inputs.working-directory }}'
env:
GEMINI_API_KEY: '${{ inputs.gemini_api_key }}'
INTEGRATION_TEST_USE_INSTALLED_GEMINI: 'true'
# We must diable CI mode here because it interferes with interactive tests.
# See https://github.com/google-gemini/gemini-cli/issues/10517
CI: 'false'
shell: 'bash'
run: 'npm run test:integration:sandbox:none'
================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
- package-ecosystem: 'npm'
directory: '/'
schedule:
interval: 'weekly'
day: 'monday'
open-pull-requests-limit: 10
reviewers:
- 'joshualitt'
groups:
npm-dependencies:
patterns:
- '*'
update-types:
- 'minor'
- 'patch'
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
interval: 'weekly'
day: 'monday'
open-pull-requests-limit: 10
reviewers:
- 'joshualitt'
groups:
actions-dependencies:
patterns:
- '*'
update-types:
- 'minor'
- 'patch'
================================================
FILE: .github/pull_request_template.md
================================================
## Summary
## Details
## Related Issues
## How to Validate
## Pre-Merge Checklist
- [ ] Updated relevant documentation and README (if needed)
- [ ] Added/updated tests (if needed)
- [ ] Noted breaking changes (if any)
- [ ] Validated on required platforms/methods:
- [ ] MacOS
- [ ] npm run
- [ ] npx
- [ ] Docker
- [ ] Podman
- [ ] Seatbelt
- [ ] Windows
- [ ] npm run
- [ ] npx
- [ ] Docker
- [ ] Linux
- [ ] npm run
- [ ] npx
- [ ] Docker
================================================
FILE: .github/scripts/backfill-need-triage.cjs
================================================
/* eslint-disable */
/* global require, console, process */
/**
* Script to backfill the 'status/need-triage' label to all open issues
* that are NOT currently labeled with '🔒 maintainer only' or 'help wanted'.
*/
const { execFileSync } = require('child_process');
const isDryRun = process.argv.includes('--dry-run');
const REPO = 'google-gemini/gemini-cli';
/**
* Executes a GitHub CLI command safely using an argument array to prevent command injection.
* @param {string[]} args
* @returns {string|null}
*/
function runGh(args) {
try {
// Using execFileSync with an array of arguments is safe as it doesn't use a shell.
// We set a large maxBuffer (10MB) to handle repositories with many issues.
return execFileSync('gh', args, {
encoding: 'utf8',
maxBuffer: 10 * 1024 * 1024,
stdio: ['ignore', 'pipe', 'pipe'],
}).trim();
} catch (error) {
const stderr = error.stderr ? ` Stderr: ${error.stderr.trim()}` : '';
console.error(
`❌ Error running gh ${args.join(' ')}: ${error.message}${stderr}`,
);
return null;
}
}
async function main() {
console.log('🔐 GitHub CLI security check...');
const authStatus = runGh(['auth', 'status']);
if (authStatus === null) {
console.error('❌ GitHub CLI (gh) is not installed or not authenticated.');
process.exit(1);
}
if (isDryRun) {
console.log('🧪 DRY RUN MODE ENABLED - No changes will be made.\n');
}
console.log(`🔍 Fetching and filtering open issues from ${REPO}...`);
// We use the /issues endpoint with pagination to bypass the 1000-result limit.
// The jq filter ensures we exclude PRs, maintainer-only, help-wanted, and existing status/need-triage.
const jqFilter =
'.[] | select(.pull_request == null) | select([.labels[].name] as $l | (any($l[]; . == "🔒 maintainer only") | not) and (any($l[]; . == "help wanted") | not) and (any($l[]; . == "status/need-triage") | not)) | {number: .number, title: .title}';
const output = runGh([
'api',
`repos/${REPO}/issues?state=open&per_page=100`,
'--paginate',
'--jq',
jqFilter,
]);
if (output === null) {
process.exit(1);
}
const issues = output
.split('\n')
.filter((line) => line.trim())
.map((line) => {
try {
return JSON.parse(line);
} catch (_e) {
console.error(`⚠️ Failed to parse line: ${line}`);
return null;
}
})
.filter(Boolean);
console.log(`✅ Found ${issues.length} issues matching criteria.`);
if (issues.length === 0) {
console.log('✨ No issues need backfilling.');
return;
}
let successCount = 0;
let failCount = 0;
if (isDryRun) {
for (const issue of issues) {
console.log(
`[DRY RUN] Would label issue #${issue.number}: ${issue.title}`,
);
}
successCount = issues.length;
} else {
console.log(`🏷️ Applying labels to ${issues.length} issues...`);
for (const issue of issues) {
const issueNumber = String(issue.number);
console.log(`🏷️ Labeling issue #${issueNumber}: ${issue.title}`);
const result = runGh([
'issue',
'edit',
issueNumber,
'--add-label',
'status/need-triage',
'--repo',
REPO,
]);
if (result !== null) {
successCount++;
} else {
failCount++;
}
}
}
console.log(`\n📊 Summary:`);
console.log(` - Success: ${successCount}`);
console.log(` - Failed: ${failCount}`);
if (failCount > 0) {
console.error(`\n❌ Backfill completed with ${failCount} errors.`);
process.exit(1);
} else {
console.log(`\n🎉 ${isDryRun ? 'Dry run' : 'Backfill'} complete!`);
}
}
main().catch((error) => {
console.error('❌ Unexpected error:', error);
process.exit(1);
});
================================================
FILE: .github/scripts/backfill-pr-notification.cjs
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/* eslint-disable */
/* global require, console, process */
/**
* Script to backfill a process change notification comment to all open PRs
* not created by members of the 'gemini-cli-maintainers' team.
*
* Skip PRs that are already associated with an issue.
*/
const { execFileSync } = require('child_process');
const isDryRun = process.argv.includes('--dry-run');
const REPO = 'google-gemini/gemini-cli';
const ORG = 'google-gemini';
const TEAM_SLUG = 'gemini-cli-maintainers';
const DISCUSSION_URL =
'https://github.com/google-gemini/gemini-cli/discussions/16706';
/**
* Executes a GitHub CLI command safely using an argument array.
*/
function runGh(args, options = {}) {
const { silent = false } = options;
try {
return execFileSync('gh', args, {
encoding: 'utf8',
maxBuffer: 10 * 1024 * 1024,
stdio: ['ignore', 'pipe', 'pipe'],
}).trim();
} catch (error) {
if (!silent) {
const stderr = error.stderr ? ` Stderr: ${error.stderr.trim()}` : '';
console.error(
`❌ Error running gh ${args.join(' ')}: ${error.message}${stderr}`,
);
}
return null;
}
}
/**
* Checks if a user is a member of the maintainers team.
*/
const membershipCache = new Map();
function isMaintainer(username) {
if (membershipCache.has(username)) return membershipCache.get(username);
// GitHub returns 404 if user is not a member.
// We use silent: true to avoid logging 404s as errors.
const result = runGh(
['api', `orgs/${ORG}/teams/${TEAM_SLUG}/memberships/${username}`],
{ silent: true },
);
const isMember = result !== null;
membershipCache.set(username, isMember);
return isMember;
}
async function main() {
console.log('🔐 GitHub CLI security check...');
if (runGh(['auth', 'status']) === null) {
console.error('❌ GitHub CLI (gh) is not authenticated.');
process.exit(1);
}
if (isDryRun) {
console.log('🧪 DRY RUN MODE ENABLED\n');
}
console.log(`📥 Fetching open PRs from ${REPO}...`);
// Fetch number, author, and closingIssuesReferences to check if linked to an issue
const prsJson = runGh([
'pr',
'list',
'--repo',
REPO,
'--state',
'open',
'--limit',
'1000',
'--json',
'number,author,closingIssuesReferences',
]);
if (prsJson === null) process.exit(1);
const prs = JSON.parse(prsJson);
console.log(`📊 Found ${prs.length} open PRs. Filtering...`);
let targetPrs = [];
for (const pr of prs) {
const author = pr.author.login;
const issueCount = pr.closingIssuesReferences
? pr.closingIssuesReferences.length
: 0;
if (issueCount > 0) {
// Skip if already linked to an issue
continue;
}
if (!isMaintainer(author)) {
targetPrs.push(pr);
}
}
console.log(
`✅ Found ${targetPrs.length} PRs from non-maintainers without associated issues.`,
);
const commentBody =
"\nHi @{AUTHOR}, thank you so much for your contribution to Gemini CLI! We really appreciate the time and effort you've put into this.\n\nWe're making some updates to our contribution process to improve how we track and review changes. Please take a moment to review our recent discussion post: [Improving Our Contribution Process & Introducing New Guidelines](${DISCUSSION_URL}).\n\nKey Update: Starting **January 26, 2026**, the Gemini CLI project will require all pull requests to be associated with an existing issue. Any pull requests not linked to an issue by that date will be automatically closed.\n\nThank you for your understanding and for being a part of our community!\n ".trim();
let successCount = 0;
let skipCount = 0;
let failCount = 0;
for (const pr of targetPrs) {
const prNumber = String(pr.number);
const author = pr.author.login;
// Check if we already commented (idempotency)
// We use silent: true here because view might fail if PR is deleted mid-run
const existingComments = runGh(
[
'pr',
'view',
prNumber,
'--repo',
REPO,
'--json',
'comments',
'--jq',
`.comments[].body | contains("${DISCUSSION_URL}")`,
],
{ silent: true },
);
if (existingComments && existingComments.includes('true')) {
console.log(
`⏭️ PR #${prNumber} already has the notification. Skipping.`,
);
skipCount++;
continue;
}
if (isDryRun) {
console.log(`[DRY RUN] Would notify @${author} on PR #${prNumber}`);
successCount++;
} else {
console.log(`💬 Notifying @${author} on PR #${prNumber}...`);
const personalizedComment = commentBody.replace('{AUTHOR}', author);
const result = runGh([
'pr',
'comment',
prNumber,
'--repo',
REPO,
'--body',
personalizedComment,
]);
if (result !== null) {
successCount++;
} else {
failCount++;
}
}
}
console.log(`\n📊 Summary:`);
console.log(` - Notified: ${successCount}`);
console.log(` - Skipped: ${skipCount}`);
console.log(` - Failed: ${failCount}`);
if (failCount > 0) process.exit(1);
}
main().catch((e) => {
console.error(e);
process.exit(1);
});
================================================
FILE: .github/scripts/pr-triage.sh
================================================
#!/usr/bin/env bash
# @license
# Copyright 2026 Google LLC
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
# Initialize a comma-separated string to hold PR numbers that need a comment
PRS_NEEDING_COMMENT=""
# Global cache for issue labels (compatible with Bash 3.2)
# Stores "|ISSUE_NUM:LABELS|" segments
ISSUE_LABELS_CACHE_FLAT="|"
# Function to get labels from an issue (with caching)
get_issue_labels() {
local ISSUE_NUM="${1}"
if [[ -z "${ISSUE_NUM}" || "${ISSUE_NUM}" == "null" || "${ISSUE_NUM}" == "" ]]; then
return
fi
# Check cache
case "${ISSUE_LABELS_CACHE_FLAT}" in
*"|${ISSUE_NUM}:"*)
local suffix="${ISSUE_LABELS_CACHE_FLAT#*|"${ISSUE_NUM}":}"
echo "${suffix%%|*}"
return
;;
*)
# Cache miss, proceed to fetch
;;
esac
echo " 📥 Fetching labels from issue #${ISSUE_NUM}" >&2
local gh_output
if ! gh_output=$(gh issue view "${ISSUE_NUM}" --repo "${GITHUB_REPOSITORY}" --json labels -q '.labels[].name' 2>/dev/null); then
echo " ⚠️ Could not fetch issue #${ISSUE_NUM}" >&2
ISSUE_LABELS_CACHE_FLAT="${ISSUE_LABELS_CACHE_FLAT}${ISSUE_NUM}:|"
return
fi
local labels
labels=$(echo "${gh_output}" | grep -x -E '(area|priority)/.*|help wanted|🔒 maintainer only' | tr '\n' ',' | sed 's/,$//' || echo "")
# Save to flat cache
ISSUE_LABELS_CACHE_FLAT="${ISSUE_LABELS_CACHE_FLAT}${ISSUE_NUM}:${labels}|"
echo "${labels}"
}
# Function to process a single PR with pre-fetched data
process_pr_optimized() {
local PR_NUMBER="${1}"
local IS_DRAFT="${2}"
local ISSUE_NUMBER="${3}"
local CURRENT_LABELS="${4}" # Comma-separated labels
echo "🔄 Processing PR #${PR_NUMBER}"
local LABELS_TO_ADD=""
local LABELS_TO_REMOVE=""
if [[ -z "${ISSUE_NUMBER}" || "${ISSUE_NUMBER}" == "null" || "${ISSUE_NUMBER}" == "" ]]; then
if [[ "${IS_DRAFT}" == "true" ]]; then
echo " 📝 PR #${PR_NUMBER} is a draft and has no linked issue"
if [[ ",${CURRENT_LABELS}," == *",status/need-issue,"* ]]; then
echo " ➖ Removing status/need-issue label"
LABELS_TO_REMOVE="status/need-issue"
fi
else
echo " ⚠️ No linked issue found for PR #${PR_NUMBER}"
if [[ ",${CURRENT_LABELS}," != *",status/need-issue,"* ]]; then
echo " ➕ Adding status/need-issue label"
LABELS_TO_ADD="status/need-issue"
fi
if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then
PRS_NEEDING_COMMENT="${PR_NUMBER}"
else
PRS_NEEDING_COMMENT="${PRS_NEEDING_COMMENT},${PR_NUMBER}"
fi
fi
else
echo " 🔗 Found linked issue #${ISSUE_NUMBER}"
if [[ ",${CURRENT_LABELS}," == *",status/need-issue,"* ]]; then
echo " ➖ Removing status/need-issue label"
LABELS_TO_REMOVE="status/need-issue"
fi
local ISSUE_LABELS
ISSUE_LABELS=$(get_issue_labels "${ISSUE_NUMBER}")
if [[ -n "${ISSUE_LABELS}" ]]; then
local IFS_OLD="${IFS}"
IFS=','
for label in ${ISSUE_LABELS}; do
if [[ -n "${label}" ]] && [[ ",${CURRENT_LABELS}," != *",${label},"* ]]; then
if [[ -z "${LABELS_TO_ADD}" ]]; then
LABELS_TO_ADD="${label}"
else
LABELS_TO_ADD="${LABELS_TO_ADD},${label}"
fi
fi
done
IFS="${IFS_OLD}"
fi
if [[ -z "${LABELS_TO_ADD}" && -z "${LABELS_TO_REMOVE}" ]]; then
echo " ✅ Labels already synchronized"
fi
fi
if [[ -n "${LABELS_TO_ADD}" || -n "${LABELS_TO_REMOVE}" ]]; then
local EDIT_CMD=("gh" "pr" "edit" "${PR_NUMBER}" "--repo" "${GITHUB_REPOSITORY}")
if [[ -n "${LABELS_TO_ADD}" ]]; then
echo " ➕ Syncing labels to add: ${LABELS_TO_ADD}"
EDIT_CMD+=("--add-label" "${LABELS_TO_ADD}")
fi
if [[ -n "${LABELS_TO_REMOVE}" ]]; then
echo " ➖ Syncing labels to remove: ${LABELS_TO_REMOVE}"
EDIT_CMD+=("--remove-label" "${LABELS_TO_REMOVE}")
fi
("${EDIT_CMD[@]}" || true)
fi
}
if [[ -z "${GITHUB_REPOSITORY:-}" ]]; then
echo "‼️ Missing \$GITHUB_REPOSITORY - this must be run from GitHub Actions"
exit 1
fi
if [[ -z "${GITHUB_OUTPUT:-}" ]]; then
echo "‼️ Missing \$GITHUB_OUTPUT - this must be run from GitHub Actions"
exit 1
fi
JQ_EXTRACT_FIELDS='{
number: .number,
isDraft: .isDraft,
issue: (.closingIssuesReferences[0].number // (.body // "" | capture("(^|[^a-zA-Z0-9])#(?[0-9]+)([^a-zA-Z0-9]|$)")? | .num) // "null"),
labels: [.labels[].name] | join(",")
}'
JQ_TSV_FORMAT='"\((.number | tostring))\t\(.isDraft)\t\((.issue // null) | tostring)\t\(.labels)"'
if [[ -n "${PR_NUMBER:-}" ]]; then
echo "🔄 Processing single PR #${PR_NUMBER}"
PR_DATA=$(gh pr view "${PR_NUMBER}" --repo "${GITHUB_REPOSITORY}" --json number,closingIssuesReferences,isDraft,body,labels 2>/dev/null) || {
echo "❌ Failed to fetch data for PR #${PR_NUMBER}"
exit 1
}
line=$(echo "${PR_DATA}" | jq -r "${JQ_EXTRACT_FIELDS} | ${JQ_TSV_FORMAT}")
IFS=$'\t' read -r pr_num is_draft issue_num current_labels <<< "${line}"
process_pr_optimized "${pr_num}" "${is_draft}" "${issue_num}" "${current_labels}"
else
echo "📥 Getting all open pull requests..."
PR_DATA_ALL=$(gh pr list --repo "${GITHUB_REPOSITORY}" --state open --limit 1000 --json number,closingIssuesReferences,isDraft,body,labels 2>/dev/null) || {
echo "❌ Failed to fetch PR list"
exit 1
}
PR_COUNT=$(echo "${PR_DATA_ALL}" | jq '. | length')
echo "📊 Found ${PR_COUNT} open PRs to process"
# Use a temporary file to avoid masking exit codes in process substitution
tmp_file=$(mktemp)
echo "${PR_DATA_ALL}" | jq -r ".[] | ${JQ_EXTRACT_FIELDS} | ${JQ_TSV_FORMAT}" > "${tmp_file}"
while read -r line; do
[[ -z "${line}" ]] && continue
IFS=$'\t' read -r pr_num is_draft issue_num current_labels <<< "${line}"
process_pr_optimized "${pr_num}" "${is_draft}" "${issue_num}" "${current_labels}"
done < "${tmp_file}"
rm -f "${tmp_file}"
fi
if [[ -z "${PRS_NEEDING_COMMENT}" ]]; then
echo "prs_needing_comment=[]" >> "${GITHUB_OUTPUT}"
else
echo "prs_needing_comment=[${PRS_NEEDING_COMMENT}]" >> "${GITHUB_OUTPUT}"
fi
echo "✅ PR triage completed"
================================================
FILE: .github/scripts/sync-maintainer-labels.cjs
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
const { Octokit } = require('@octokit/rest');
/**
* Sync Maintainer Labels (Recursive with strict parent-child relationship detection)
* - Uses Native Sub-issues.
* - Uses Markdown Task Lists (- [ ] #123).
* - Filters for OPEN issues only.
* - Skips DUPLICATES.
* - Skips Pull Requests.
* - ONLY labels issues in the PUBLIC (gemini-cli) repo.
*/
const REPO_OWNER = 'google-gemini';
const PUBLIC_REPO = 'gemini-cli';
const PRIVATE_REPO = 'maintainers-gemini-cli';
const ALLOWED_REPOS = [PUBLIC_REPO, PRIVATE_REPO];
const ROOT_ISSUES = [
{ owner: REPO_OWNER, repo: PUBLIC_REPO, number: 15374 },
{ owner: REPO_OWNER, repo: PUBLIC_REPO, number: 15456 },
{ owner: REPO_OWNER, repo: PUBLIC_REPO, number: 15324 },
];
const TARGET_LABEL = '🔒 maintainer only';
const isDryRun =
process.argv.includes('--dry-run') || process.env.DRY_RUN === 'true';
const octokit = new Octokit({
auth: process.env.GITHUB_TOKEN,
});
/**
* Extracts child issue references from markdown Task Lists ONLY.
* e.g. - [ ] #123 or - [x] google-gemini/gemini-cli#123
*/
function extractTaskListLinks(text, contextOwner, contextRepo) {
if (!text) return [];
const childIssues = new Map();
const add = (owner, repo, number) => {
if (ALLOWED_REPOS.includes(repo)) {
const key = `${owner}/${repo}#${number}`;
childIssues.set(key, { owner, repo, number: parseInt(number, 10) });
}
};
// 1. Full URLs in task lists
const urlRegex =
/-\s+\[[ x]\].*https:\/\/github\.com\/([a-zA-Z0-9._-]+)\/([a-zA-Z0-9._-]+)\/issues\/(\d+)\b/g;
let match;
while ((match = urlRegex.exec(text)) !== null) {
add(match[1], match[2], match[3]);
}
// 2. Cross-repo refs in task lists: owner/repo#123
const crossRepoRegex =
/-\s+\[[ x]\].*([a-zA-Z0-9._-]+)\/([a-zA-Z0-9._-]+)#(\d+)\b/g;
while ((match = crossRepoRegex.exec(text)) !== null) {
add(match[1], match[2], match[3]);
}
// 3. Short refs in task lists: #123
const shortRefRegex = /-\s+\[[ x]\].*#(\d+)\b/g;
while ((match = shortRefRegex.exec(text)) !== null) {
add(contextOwner, contextRepo, match[1]);
}
return Array.from(childIssues.values());
}
/**
* Fetches issue data via GraphQL with full pagination for sub-issues, comments, and labels.
*/
async function fetchIssueData(owner, repo, number) {
const query = `
query($owner:String!, $repo:String!, $number:Int!) {
repository(owner:$owner, name:$repo) {
issue(number:$number) {
state
title
body
labels(first: 100) {
nodes { name }
pageInfo { hasNextPage endCursor }
}
subIssues(first: 100) {
nodes {
number
repository {
name
owner { login }
}
}
pageInfo { hasNextPage endCursor }
}
comments(first: 100) {
nodes {
body
}
}
}
}
}
`;
try {
const response = await octokit.graphql(query, { owner, repo, number });
const data = response.repository.issue;
if (!data) return null;
const issue = {
state: data.state,
title: data.title,
body: data.body || '',
labels: data.labels.nodes.map((n) => n.name),
subIssues: [...data.subIssues.nodes],
comments: data.comments.nodes.map((n) => n.body),
};
// Paginate subIssues if there are more than 100
if (data.subIssues.pageInfo.hasNextPage) {
const moreSubIssues = await paginateConnection(
owner,
repo,
number,
'subIssues',
'number repository { name owner { login } }',
data.subIssues.pageInfo.endCursor,
);
issue.subIssues.push(...moreSubIssues);
}
// Paginate labels if there are more than 100 (unlikely but for completeness)
if (data.labels.pageInfo.hasNextPage) {
const moreLabels = await paginateConnection(
owner,
repo,
number,
'labels',
'name',
data.labels.pageInfo.endCursor,
(n) => n.name,
);
issue.labels.push(...moreLabels);
}
// Note: Comments are handled via Task Lists in body + first 100 comments.
// If an issue has > 100 comments with task lists, we'd need to paginate those too.
// Given the 1,100+ issue discovery count, 100 comments is usually sufficient,
// but we can add it for absolute completeness.
// (Skipping for now to avoid excessive API churn unless clearly needed).
return issue;
} catch (error) {
if (error.errors && error.errors.some((e) => e.type === 'NOT_FOUND')) {
return null;
}
throw error;
}
}
/**
* Helper to paginate any GraphQL connection.
*/
async function paginateConnection(
owner,
repo,
number,
connectionName,
nodeFields,
initialCursor,
transformNode = (n) => n,
) {
let additionalNodes = [];
let hasNext = true;
let cursor = initialCursor;
while (hasNext) {
const query = `
query($owner:String!, $repo:String!, $number:Int!, $cursor:String) {
repository(owner:$owner, name:$repo) {
issue(number:$number) {
${connectionName}(first: 100, after: $cursor) {
nodes { ${nodeFields} }
pageInfo { hasNextPage endCursor }
}
}
}
}
`;
const response = await octokit.graphql(query, {
owner,
repo,
number,
cursor,
});
const connection = response.repository.issue[connectionName];
additionalNodes.push(...connection.nodes.map(transformNode));
hasNext = connection.pageInfo.hasNextPage;
cursor = connection.pageInfo.endCursor;
}
return additionalNodes;
}
/**
* Validates if an issue should be processed (Open, not a duplicate, not a PR)
*/
function shouldProcess(issueData) {
if (!issueData) return false;
if (issueData.state !== 'OPEN') return false;
const labels = issueData.labels.map((l) => l.toLowerCase());
if (labels.includes('duplicate') || labels.includes('kind/duplicate')) {
return false;
}
return true;
}
async function getAllDescendants(roots) {
const allDescendants = new Map();
const visited = new Set();
const queue = [...roots];
for (const root of roots) {
visited.add(`${root.owner}/${root.repo}#${root.number}`);
}
console.log(`Starting discovery from ${roots.length} roots...`);
while (queue.length > 0) {
const current = queue.shift();
const currentKey = `${current.owner}/${current.repo}#${current.number}`;
try {
const issueData = await fetchIssueData(
current.owner,
current.repo,
current.number,
);
if (!shouldProcess(issueData)) {
continue;
}
// ONLY add to labeling list if it's in the PUBLIC repository
if (current.repo === PUBLIC_REPO) {
// Don't label the roots themselves
if (
!ROOT_ISSUES.some(
(r) => r.number === current.number && r.repo === current.repo,
)
) {
allDescendants.set(currentKey, {
...current,
title: issueData.title,
labels: issueData.labels,
});
}
}
const children = new Map();
// 1. Process Native Sub-issues
if (issueData.subIssues) {
for (const node of issueData.subIssues) {
const childOwner = node.repository.owner.login;
const childRepo = node.repository.name;
const childNumber = node.number;
const key = `${childOwner}/${childRepo}#${childNumber}`;
children.set(key, {
owner: childOwner,
repo: childRepo,
number: childNumber,
});
}
}
// 2. Process Markdown Task Lists in Body and Comments
let combinedText = issueData.body || '';
if (issueData.comments) {
for (const commentBody of issueData.comments) {
combinedText += '\n' + (commentBody || '');
}
}
const taskListLinks = extractTaskListLinks(
combinedText,
current.owner,
current.repo,
);
for (const link of taskListLinks) {
const key = `${link.owner}/${link.repo}#${link.number}`;
children.set(key, link);
}
// Queue children (regardless of which repo they are in, for recursion)
for (const [key, child] of children) {
if (!visited.has(key)) {
visited.add(key);
queue.push(child);
}
}
} catch (error) {
console.error(`Error processing ${currentKey}: ${error.message}`);
}
}
return Array.from(allDescendants.values());
}
async function run() {
if (isDryRun) {
console.log('=== DRY RUN MODE: No labels will be applied ===');
}
const descendants = await getAllDescendants(ROOT_ISSUES);
console.log(
`\nFound ${descendants.length} total unique open descendant issues in ${PUBLIC_REPO}.`,
);
for (const issueInfo of descendants) {
const issueKey = `${issueInfo.owner}/${issueInfo.repo}#${issueInfo.number}`;
try {
// Data is already available from the discovery phase
const hasLabel = issueInfo.labels.some((l) => l === TARGET_LABEL);
if (!hasLabel) {
if (isDryRun) {
console.log(
`[DRY RUN] Would label ${issueKey}: "${issueInfo.title}"`,
);
} else {
console.log(`Labeling ${issueKey}: "${issueInfo.title}"...`);
await octokit.rest.issues.addLabels({
owner: issueInfo.owner,
repo: issueInfo.repo,
issue_number: issueInfo.number,
labels: [TARGET_LABEL],
});
}
}
// Remove status/need-triage from maintainer-only issues since they
// don't need community triage. We always attempt removal rather than
// checking the (potentially stale) label snapshot, because the
// issue-opened-labeler workflow runs concurrently and may add the
// label after our snapshot was taken.
if (isDryRun) {
console.log(
`[DRY RUN] Would remove status/need-triage from ${issueKey}`,
);
} else {
try {
await octokit.rest.issues.removeLabel({
owner: issueInfo.owner,
repo: issueInfo.repo,
issue_number: issueInfo.number,
name: 'status/need-triage',
});
console.log(`Removed status/need-triage from ${issueKey}`);
} catch (removeError) {
// 404 means the label wasn't present — that's fine.
if (removeError.status === 404) {
console.log(
`status/need-triage not present on ${issueKey}, skipping.`,
);
} else {
throw removeError;
}
}
}
} catch (error) {
console.error(`Error processing label for ${issueKey}: ${error.message}`);
}
}
}
run().catch((error) => {
console.error(error);
process.exit(1);
});
================================================
FILE: .github/workflows/chained_e2e.yml
================================================
name: 'Testing: E2E (Chained)'
on:
push:
branches:
- 'main'
merge_group:
workflow_run:
workflows: ['Trigger E2E']
types: ['completed']
workflow_dispatch:
inputs:
head_sha:
description: 'SHA of the commit to test'
required: true
repo_name:
description: 'Repository name (e.g., owner/repo)'
required: true
concurrency:
group: '${{ github.workflow }}-${{ github.head_ref || github.event.workflow_run.head_branch || github.ref }}'
cancel-in-progress: |-
${{ github.event_name != 'push' && github.event_name != 'merge_group' }}
permissions:
contents: 'read'
statuses: 'write'
jobs:
merge_queue_skipper:
name: 'Merge Queue Skipper'
permissions: 'read-all'
runs-on: 'gemini-cli-ubuntu-16-core'
if: "github.repository == 'google-gemini/gemini-cli'"
outputs:
skip: '${{ steps.merge-queue-e2e-skipper.outputs.skip-check }}'
steps:
- id: 'merge-queue-e2e-skipper'
uses: 'cariad-tech/merge-queue-ci-skipper@1032489e59437862c90a08a2c92809c903883772' # ratchet:cariad-tech/merge-queue-ci-skipper@main
with:
secret: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
continue-on-error: true
download_repo_name:
runs-on: 'gemini-cli-ubuntu-16-core'
if: "github.repository == 'google-gemini/gemini-cli' && (github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_run')"
outputs:
repo_name: '${{ steps.output-repo-name.outputs.repo_name }}'
head_sha: '${{ steps.output-repo-name.outputs.head_sha }}'
steps:
- name: 'Mock Repo Artifact'
if: "${{ github.event_name == 'workflow_dispatch' }}"
env:
REPO_NAME: '${{ github.event.inputs.repo_name }}'
run: |
mkdir -p ./pr
echo "${REPO_NAME}" > ./pr/repo_name
- uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4
with:
name: 'repo_name'
path: 'pr/'
- name: 'Download the repo_name artifact'
uses: 'actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0' # ratchet:actions/download-artifact@v5
env:
RUN_ID: "${{ github.event_name == 'workflow_run' && github.event.workflow_run.id || github.run_id }}"
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
name: 'repo_name'
run-id: '${{ env.RUN_ID }}'
path: '${{ runner.temp }}/artifacts'
- name: 'Output Repo Name and SHA'
id: 'output-repo-name'
uses: 'actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd' # ratchet:actions/github-script@v8
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
const fs = require('fs');
const path = require('path');
const temp = '${{ runner.temp }}/artifacts';
const repoPath = path.join(temp, 'repo_name');
if (fs.existsSync(repoPath)) {
const repo_name = String(fs.readFileSync(repoPath)).trim();
core.setOutput('repo_name', repo_name);
}
const shaPath = path.join(temp, 'head_sha');
if (fs.existsSync(shaPath)) {
const head_sha = String(fs.readFileSync(shaPath)).trim();
core.setOutput('head_sha', head_sha);
}
parse_run_context:
name: 'Parse run context'
runs-on: 'gemini-cli-ubuntu-16-core'
needs: 'download_repo_name'
if: "github.repository == 'google-gemini/gemini-cli' && always()"
outputs:
repository: '${{ steps.set_context.outputs.REPO }}'
sha: '${{ steps.set_context.outputs.SHA }}'
steps:
- id: 'set_context'
name: 'Set dynamic repository and SHA'
env:
REPO: '${{ needs.download_repo_name.outputs.repo_name || github.repository }}'
SHA: '${{ needs.download_repo_name.outputs.head_sha || github.event.inputs.head_sha || github.event.workflow_run.head_sha || github.sha }}'
shell: 'bash'
run: |
echo "REPO=$REPO" >> "$GITHUB_OUTPUT"
echo "SHA=$SHA" >> "$GITHUB_OUTPUT"
set_pending_status:
runs-on: 'gemini-cli-ubuntu-16-core'
permissions: 'write-all'
needs:
- 'parse_run_context'
if: "github.repository == 'google-gemini/gemini-cli' && always()"
steps:
- name: 'Set pending status'
uses: 'myrotvorets/set-commit-status-action@16037e056d73b2d3c88e37e393ff369047f70886' # ratchet:myrotvorets/set-commit-status-action@master
if: "github.repository == 'google-gemini/gemini-cli' && always()"
with:
allowForks: 'true'
repo: '${{ github.repository }}'
sha: '${{ needs.parse_run_context.outputs.sha }}'
token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
status: 'pending'
context: 'E2E (Chained)'
e2e_linux:
name: 'E2E Test (Linux) - ${{ matrix.sandbox }}'
needs:
- 'merge_queue_skipper'
- 'parse_run_context'
runs-on: 'gemini-cli-ubuntu-16-core'
if: |
github.repository == 'google-gemini/gemini-cli' && always() && (needs.merge_queue_skipper.result !='success' || needs.merge_queue_skipper.outputs.skip != 'true')
strategy:
fail-fast: false
matrix:
sandbox:
- 'sandbox:none'
- 'sandbox:docker'
node-version:
- '20.x'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '${{ matrix.node-version }}'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Set up Docker'
if: "${{matrix.sandbox == 'sandbox:docker'}}"
uses: 'docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435' # ratchet:docker/setup-buildx-action@v3
- name: 'Run E2E tests'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
KEEP_OUTPUT: 'true'
VERBOSE: 'true'
BUILD_SANDBOX_FLAGS: '--cache-from type=gha --cache-to type=gha,mode=max'
shell: 'bash'
run: |
if [[ "${{ matrix.sandbox }}" == "sandbox:docker" ]]; then
npm run test:integration:sandbox:docker
else
npm run test:integration:sandbox:none
fi
e2e_mac:
name: 'E2E Test (macOS)'
needs:
- 'merge_queue_skipper'
- 'parse_run_context'
runs-on: 'macos-latest'
if: |
github.repository == 'google-gemini/gemini-cli' && always() && (needs.merge_queue_skipper.result !='success' || needs.merge_queue_skipper.outputs.skip != 'true')
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '20.x'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Fix rollup optional dependencies on macOS'
if: "${{runner.os == 'macOS'}}"
run: |
npm cache clean --force
- name: 'Run E2E tests (non-Windows)'
if: "${{runner.os != 'Windows'}}"
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
KEEP_OUTPUT: 'true'
SANDBOX: 'sandbox:none'
VERBOSE: 'true'
run: 'npm run test:integration:sandbox:none'
e2e_windows:
name: 'Slow E2E - Win'
needs:
- 'merge_queue_skipper'
- 'parse_run_context'
if: |
github.repository == 'google-gemini/gemini-cli' && always() && (needs.merge_queue_skipper.result !='success' || needs.merge_queue_skipper.outputs.skip != 'true')
runs-on: 'gemini-cli-windows-16-core'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '20.x'
cache: 'npm'
- name: 'Configure Windows Defender exclusions'
run: |
Add-MpPreference -ExclusionPath $env:GITHUB_WORKSPACE -Force
Add-MpPreference -ExclusionPath "$env:GITHUB_WORKSPACE\node_modules" -Force
Add-MpPreference -ExclusionPath "$env:GITHUB_WORKSPACE\packages" -Force
Add-MpPreference -ExclusionPath "$env:TEMP" -Force
shell: 'pwsh'
- name: 'Configure npm for Windows performance'
run: |
npm config set progress false
npm config set audit false
npm config set fund false
npm config set loglevel error
npm config set maxsockets 32
npm config set registry https://registry.npmjs.org/
shell: 'pwsh'
- name: 'Install dependencies'
run: 'npm ci'
shell: 'pwsh'
- name: 'Build project'
run: 'npm run build'
shell: 'pwsh'
- name: 'Ensure Chrome is available'
shell: 'pwsh'
run: |
$chromePaths = @(
"${env:ProgramFiles}\Google\Chrome\Application\chrome.exe",
"${env:ProgramFiles(x86)}\Google\Chrome\Application\chrome.exe"
)
$chromeExists = $chromePaths | Where-Object { Test-Path $_ } | Select-Object -First 1
if (-not $chromeExists) {
Write-Host 'Chrome not found, installing via Chocolatey...'
choco install googlechrome -y --no-progress --ignore-checksums
}
$installed = $chromePaths | Where-Object { Test-Path $_ } | Select-Object -First 1
if ($installed) {
Write-Host "Chrome found at: $installed"
& $installed --version
} else {
Write-Error 'Chrome installation failed'
exit 1
}
- name: 'Run E2E tests'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
KEEP_OUTPUT: 'true'
SANDBOX: 'sandbox:none'
VERBOSE: 'true'
NODE_OPTIONS: '--max-old-space-size=32768 --max-semi-space-size=256'
UV_THREADPOOL_SIZE: '32'
NODE_ENV: 'test'
shell: 'pwsh'
run: 'npm run test:integration:sandbox:none'
evals:
name: 'Evals (ALWAYS_PASSING)'
needs:
- 'merge_queue_skipper'
- 'parse_run_context'
runs-on: 'gemini-cli-ubuntu-16-core'
if: |
github.repository == 'google-gemini/gemini-cli' && always() && (needs.merge_queue_skipper.result !='success' || needs.merge_queue_skipper.outputs.skip != 'true')
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ needs.parse_run_context.outputs.sha }}'
repository: '${{ needs.parse_run_context.outputs.repository }}'
fetch-depth: 0
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '20.x'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Check if evals should run'
id: 'check_evals'
run: |
SHOULD_RUN=$(node scripts/changed_prompt.js)
echo "should_run=$SHOULD_RUN" >> "$GITHUB_OUTPUT"
- name: 'Run Evals (Required to pass)'
if: "${{ steps.check_evals.outputs.should_run == 'true' }}"
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
run: 'npm run test:always_passing_evals'
e2e:
name: 'E2E'
if: |
github.repository == 'google-gemini/gemini-cli' && always() && (needs.merge_queue_skipper.result !='success' || needs.merge_queue_skipper.outputs.skip != 'true')
needs:
- 'e2e_linux'
- 'e2e_mac'
- 'e2e_windows'
- 'evals'
- 'merge_queue_skipper'
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- name: 'Check E2E test results'
run: |
if [[ ${NEEDS_E2E_LINUX_RESULT} != 'success' || \
${NEEDS_E2E_MAC_RESULT} != 'success' || \
${NEEDS_E2E_WINDOWS_RESULT} != 'success' || \
${NEEDS_EVALS_RESULT} != 'success' ]]; then
echo "One or more E2E jobs failed."
exit 1
fi
echo "All required E2E jobs passed!"
env:
NEEDS_E2E_LINUX_RESULT: '${{ needs.e2e_linux.result }}'
NEEDS_E2E_MAC_RESULT: '${{ needs.e2e_mac.result }}'
NEEDS_E2E_WINDOWS_RESULT: '${{ needs.e2e_windows.result }}'
NEEDS_EVALS_RESULT: '${{ needs.evals.result }}'
set_workflow_status:
runs-on: 'gemini-cli-ubuntu-16-core'
permissions: 'write-all'
if: "github.repository == 'google-gemini/gemini-cli' && always()"
needs:
- 'parse_run_context'
- 'e2e'
steps:
- name: 'Set workflow status'
uses: 'myrotvorets/set-commit-status-action@16037e056d73b2d3c88e37e393ff369047f70886' # ratchet:myrotvorets/set-commit-status-action@master
if: "github.repository == 'google-gemini/gemini-cli' && always()"
with:
allowForks: 'true'
repo: '${{ github.repository }}'
sha: '${{ needs.parse_run_context.outputs.sha }}'
token: '${{ secrets.GITHUB_TOKEN }}'
status: '${{ needs.e2e.result }}'
context: 'E2E (Chained)'
================================================
FILE: .github/workflows/ci.yml
================================================
name: 'Testing: CI'
on:
push:
branches:
- 'main'
- 'release/**'
pull_request:
branches:
- 'main'
- 'release/**'
merge_group:
workflow_dispatch:
inputs:
branch_ref:
description: 'Branch to run on'
required: true
default: 'main'
type: 'string'
concurrency:
group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}'
cancel-in-progress: |-
${{ github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/heads/release/') }}
permissions:
checks: 'write'
contents: 'read'
statuses: 'write'
defaults:
run:
shell: 'bash'
jobs:
merge_queue_skipper:
permissions: 'read-all'
name: 'Merge Queue Skipper'
runs-on: 'gemini-cli-ubuntu-16-core'
if: "github.repository == 'google-gemini/gemini-cli'"
outputs:
skip: '${{ steps.merge-queue-ci-skipper.outputs.skip-check }}'
steps:
- id: 'merge-queue-ci-skipper'
uses: 'cariad-tech/merge-queue-ci-skipper@1032489e59437862c90a08a2c92809c903883772' # ratchet:cariad-tech/merge-queue-ci-skipper@main
with:
secret: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
lint:
name: 'Lint'
runs-on: 'gemini-cli-ubuntu-16-core'
needs: 'merge_queue_skipper'
if: "github.repository == 'google-gemini/gemini-cli' && needs.merge_queue_skipper.outputs.skip == 'false'"
env:
GEMINI_LINT_TEMP_DIR: '${{ github.workspace }}/.gemini-linters'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
fetch-depth: 0
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4.4.0
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Cache Linters'
uses: 'actions/cache@v4'
with:
path: '${{ env.GEMINI_LINT_TEMP_DIR }}'
key: "${{ runner.os }}-${{ runner.arch }}-linters-${{ hashFiles('scripts/lint.js') }}"
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Cache ESLint'
uses: 'actions/cache@v4'
with:
path: '.eslintcache'
key: "${{ runner.os }}-eslint-${{ hashFiles('package-lock.json', 'eslint.config.js') }}"
- name: 'Validate NOTICES.txt'
run: 'git diff --exit-code packages/vscode-ide-companion/NOTICES.txt'
- name: 'Check lockfile'
run: 'npm run check:lockfile'
- name: 'Install linters'
run: 'node scripts/lint.js --setup'
- name: 'Run ESLint'
run: 'node scripts/lint.js --eslint'
- name: 'Run actionlint'
run: 'node scripts/lint.js --actionlint'
- name: 'Run shellcheck'
run: 'node scripts/lint.js --shellcheck'
- name: 'Run yamllint'
run: 'node scripts/lint.js --yamllint'
- name: 'Run Prettier'
run: 'node scripts/lint.js --prettier'
- name: 'Build docs prerequisites'
run: 'npm run predocs:settings'
- name: 'Verify settings docs'
run: 'npm run docs:settings -- --check'
- name: 'Run sensitive keyword linter'
run: 'node scripts/lint.js --sensitive-keywords'
link_checker:
name: 'Link Checker'
runs-on: 'ubuntu-latest'
if: "github.repository == 'google-gemini/gemini-cli'"
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Link Checker'
uses: 'lycheeverse/lychee-action@885c65f3dc543b57c898c8099f4e08c8afd178a2' # ratchet: lycheeverse/lychee-action@v2.6.1
with:
args: '--verbose --accept 200,503 ./**/*.md'
fail: true
test_linux:
name: 'Test (Linux) - ${{ matrix.node-version }}, ${{ matrix.shard }}'
runs-on: 'gemini-cli-ubuntu-16-core'
needs:
- 'merge_queue_skipper'
if: "github.repository == 'google-gemini/gemini-cli' && needs.merge_queue_skipper.outputs.skip == 'false'"
permissions:
contents: 'read'
checks: 'write'
pull-requests: 'write'
strategy:
matrix:
node-version:
- '20.x'
- '22.x'
- '24.x'
shard:
- 'cli'
- 'others'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version: '${{ matrix.node-version }}'
cache: 'npm'
- name: 'Build project'
run: 'npm run build'
- name: 'Install dependencies for testing'
run: 'npm ci'
- name: 'Run tests and generate reports'
env:
NO_COLOR: true
run: |
if [[ "${{ matrix.shard }}" == "cli" ]]; then
npm run test:ci --workspace @google/gemini-cli
else
# Explicitly list non-cli packages to ensure they are sharded correctly
npm run test:ci --workspace @google/gemini-cli-core --workspace @google/gemini-cli-a2a-server --workspace gemini-cli-vscode-ide-companion --workspace @google/gemini-cli-test-utils --if-present -- --coverage.enabled=false
npm run test:scripts
fi
- name: 'Bundle'
run: 'npm run bundle'
- name: 'Smoke test bundle'
run: 'node ./bundle/gemini.js --version'
- name: 'Smoke test npx installation'
run: |
# 1. Package the project into a tarball
TARBALL=$(npm pack | tail -n 1)
# 2. Move to a fresh directory for isolation
mkdir -p ../smoke-test-dir
mv "$TARBALL" ../smoke-test-dir/
cd ../smoke-test-dir
# 3. Run npx from the tarball
npx "./$TARBALL" --version
- name: 'Wait for file system sync'
run: 'sleep 2'
- name: 'Publish Test Report (for non-forks)'
if: |-
${{ always() && (github.event.pull_request.head.repo.full_name == github.repository) }}
uses: 'dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3' # ratchet:dorny/test-reporter@v2
with:
name: 'Test Results (Node ${{ runner.os }}, ${{ matrix.node-version }}, ${{ matrix.shard }})'
path: 'packages/*/junit.xml'
reporter: 'java-junit'
fail-on-error: 'false'
- name: 'Upload Test Results Artifact (for forks)'
if: |-
${{ always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) }}
uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4
with:
name: 'test-results-fork-${{ runner.os }}-${{ matrix.node-version }}-${{ matrix.shard }}'
path: 'packages/*/junit.xml'
test_mac:
name: 'Test (Mac) - ${{ matrix.node-version }}, ${{ matrix.shard }}'
runs-on: 'macos-latest'
needs:
- 'merge_queue_skipper'
if: "github.repository == 'google-gemini/gemini-cli' && needs.merge_queue_skipper.outputs.skip == 'false'"
permissions:
contents: 'read'
checks: 'write'
pull-requests: 'write'
continue-on-error: true
strategy:
matrix:
node-version:
- '20.x'
- '22.x'
- '24.x'
shard:
- 'cli'
- 'others'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version: '${{ matrix.node-version }}'
cache: 'npm'
- name: 'Build project'
run: 'npm run build'
- name: 'Install dependencies for testing'
run: 'npm ci'
- name: 'Run tests and generate reports'
env:
NO_COLOR: true
run: |
if [[ "${{ matrix.shard }}" == "cli" ]]; then
npm run test:ci --workspace @google/gemini-cli -- --coverage.enabled=false
else
# Explicitly list non-cli packages to ensure they are sharded correctly
npm run test:ci --workspace @google/gemini-cli-core --workspace @google/gemini-cli-a2a-server --workspace gemini-cli-vscode-ide-companion --workspace @google/gemini-cli-test-utils --if-present -- --coverage.enabled=false
npm run test:scripts
fi
- name: 'Bundle'
run: 'npm run bundle'
- name: 'Smoke test bundle'
run: 'node ./bundle/gemini.js --version'
- name: 'Smoke test npx installation'
run: |
# 1. Package the project into a tarball
TARBALL=$(npm pack | tail -n 1)
# 2. Move to a fresh directory for isolation
mkdir -p ../smoke-test-dir
mv "$TARBALL" ../smoke-test-dir/
cd ../smoke-test-dir
# 3. Run npx from the tarball
npx "./$TARBALL" --version
- name: 'Wait for file system sync'
run: 'sleep 2'
- name: 'Publish Test Report (for non-forks)'
if: |-
${{ always() && (github.event.pull_request.head.repo.full_name == github.repository) }}
uses: 'dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3' # ratchet:dorny/test-reporter@v2
with:
name: 'Test Results (Node ${{ runner.os }}, ${{ matrix.node-version }}, ${{ matrix.shard }})'
path: 'packages/*/junit.xml'
reporter: 'java-junit'
fail-on-error: 'false'
- name: 'Upload Test Results Artifact (for forks)'
if: |-
${{ always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) }}
uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4
with:
name: 'test-results-fork-${{ runner.os }}-${{ matrix.node-version }}-${{ matrix.shard }}'
path: 'packages/*/junit.xml'
- name: 'Upload coverage reports'
if: |-
${{ always() }}
uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4
with:
name: 'coverage-reports-${{ runner.os }}-${{ matrix.node-version }}-${{ matrix.shard }}'
path: 'packages/*/coverage'
codeql:
name: 'CodeQL'
runs-on: 'gemini-cli-ubuntu-16-core'
needs: 'merge_queue_skipper'
if: "github.repository == 'google-gemini/gemini-cli' && needs.merge_queue_skipper.outputs.skip == 'false'"
permissions:
actions: 'read'
contents: 'read'
security-events: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
- name: 'Initialize CodeQL'
uses: 'github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2' # ratchet:github/codeql-action/init@v3
with:
languages: 'javascript'
- name: 'Perform CodeQL Analysis'
uses: 'github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2' # ratchet:github/codeql-action/analyze@v3
# Check for changes in bundle size.
bundle_size:
name: 'Check Bundle Size'
needs: 'merge_queue_skipper'
if: "github.repository == 'google-gemini/gemini-cli' && github.event_name == 'pull_request' && needs.merge_queue_skipper.outputs.skip == 'false'"
runs-on: 'gemini-cli-ubuntu-16-core'
permissions:
contents: 'read' # For checkout
pull-requests: 'write' # For commenting
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
fetch-depth: 1
- uses: 'preactjs/compressed-size-action@946a292cd35bd1088e0d7eb92b69d1a8d5b5d76a'
with:
repo-token: '${{ secrets.GITHUB_TOKEN }}'
pattern: './bundle/**/*.{js,sb}'
minimum-change-threshold: '1000'
compression: 'none'
clean-script: 'clean'
test_windows:
name: 'Slow Test - Win - ${{ matrix.shard }}'
runs-on: 'gemini-cli-windows-16-core'
needs: 'merge_queue_skipper'
if: "github.repository == 'google-gemini/gemini-cli' && needs.merge_queue_skipper.outputs.skip == 'false'"
timeout-minutes: 60
strategy:
matrix:
shard:
- 'cli'
- 'others'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.inputs.branch_ref || github.ref }}'
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version: '20.x'
cache: 'npm'
- name: 'Configure Windows Defender exclusions'
run: |
Add-MpPreference -ExclusionPath $env:GITHUB_WORKSPACE -Force
Add-MpPreference -ExclusionPath "$env:GITHUB_WORKSPACE\node_modules" -Force
Add-MpPreference -ExclusionPath "$env:GITHUB_WORKSPACE\packages" -Force
Add-MpPreference -ExclusionPath "$env:TEMP" -Force
shell: 'pwsh'
- name: 'Configure npm for Windows performance'
run: |
npm config set progress false
npm config set audit false
npm config set fund false
npm config set loglevel error
npm config set maxsockets 32
npm config set registry https://registry.npmjs.org/
shell: 'pwsh'
- name: 'Install dependencies'
run: 'npm ci'
shell: 'pwsh'
- name: 'Build project'
run: 'npm run build'
shell: 'pwsh'
env:
NODE_OPTIONS: '--max-old-space-size=32768 --max-semi-space-size=256'
UV_THREADPOOL_SIZE: '32'
NODE_ENV: 'production'
- name: 'Run tests and generate reports'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
NO_COLOR: true
NODE_OPTIONS: '--max-old-space-size=32768 --max-semi-space-size=256'
UV_THREADPOOL_SIZE: '32'
NODE_ENV: 'test'
run: |
if ("${{ matrix.shard }}" -eq "cli") {
npm run test:ci --workspace @google/gemini-cli -- --coverage.enabled=false
} else {
# Explicitly list non-cli packages to ensure they are sharded correctly
npm run test:ci --workspace @google/gemini-cli-core --workspace @google/gemini-cli-a2a-server --workspace gemini-cli-vscode-ide-companion --workspace @google/gemini-cli-test-utils --if-present -- --coverage.enabled=false
npm run test:scripts
}
shell: 'pwsh'
- name: 'Bundle'
run: 'npm run bundle'
shell: 'pwsh'
- name: 'Smoke test bundle'
run: 'node ./bundle/gemini.js --version'
shell: 'pwsh'
- name: 'Smoke test npx installation'
run: |
# 1. Package the project into a tarball
$PACK_OUTPUT = npm pack
$TARBALL = $PACK_OUTPUT[-1]
# 2. Move to a fresh directory for isolation
New-Item -ItemType Directory -Force -Path ../smoke-test-dir
Move-Item $TARBALL ../smoke-test-dir/
Set-Location ../smoke-test-dir
# 3. Run npx from the tarball
npx "./$TARBALL" --version
shell: 'pwsh'
ci:
name: 'CI'
if: "github.repository == 'google-gemini/gemini-cli' && always()"
needs:
- 'lint'
- 'link_checker'
- 'test_linux'
- 'test_mac'
- 'test_windows'
- 'codeql'
- 'bundle_size'
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- name: 'Check all job results'
run: |
if [[ (${NEEDS_LINT_RESULT} != 'success' && ${NEEDS_LINT_RESULT} != 'skipped') || \
(${NEEDS_LINK_CHECKER_RESULT} != 'success' && ${NEEDS_LINK_CHECKER_RESULT} != 'skipped') || \
(${NEEDS_TEST_LINUX_RESULT} != 'success' && ${NEEDS_TEST_LINUX_RESULT} != 'skipped') || \
(${NEEDS_TEST_MAC_RESULT} != 'success' && ${NEEDS_TEST_MAC_RESULT} != 'skipped') || \
(${NEEDS_TEST_WINDOWS_RESULT} != 'success' && ${NEEDS_TEST_WINDOWS_RESULT} != 'skipped') || \
(${NEEDS_CODEQL_RESULT} != 'success' && ${NEEDS_CODEQL_RESULT} != 'skipped') || \
(${NEEDS_BUNDLE_SIZE_RESULT} != 'success' && ${NEEDS_BUNDLE_SIZE_RESULT} != 'skipped') ]]; then
echo "One or more CI jobs failed."
exit 1
fi
echo "All CI jobs passed!"
env:
NEEDS_LINT_RESULT: '${{ needs.lint.result }}'
NEEDS_LINK_CHECKER_RESULT: '${{ needs.link_checker.result }}'
NEEDS_TEST_LINUX_RESULT: '${{ needs.test_linux.result }}'
NEEDS_TEST_MAC_RESULT: '${{ needs.test_mac.result }}'
NEEDS_TEST_WINDOWS_RESULT: '${{ needs.test_windows.result }}'
NEEDS_CODEQL_RESULT: '${{ needs.codeql.result }}'
NEEDS_BUNDLE_SIZE_RESULT: '${{ needs.bundle_size.result }}'
================================================
FILE: .github/workflows/community-report.yml
================================================
name: 'Generate Weekly Community Report 📊'
on:
schedule:
- cron: '0 12 * * 1' # Run at 12:00 UTC on Monday
workflow_dispatch:
inputs:
days:
description: 'Number of days to look back for the report'
required: true
default: '7'
jobs:
generate-report:
name: 'Generate Report 📝'
if: |-
${{ github.repository == 'google-gemini/gemini-cli' }}
runs-on: 'ubuntu-latest'
permissions:
issues: 'write'
pull-requests: 'read'
discussions: 'read'
contents: 'read'
id-token: 'write'
steps:
- name: 'Generate GitHub App Token 🔑'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
permission-pull-requests: 'read'
permission-discussions: 'read'
permission-contents: 'read'
- name: 'Generate Report 📜'
id: 'report'
env:
GH_TOKEN: '${{ steps.generate_token.outputs.token }}'
REPO: '${{ github.repository }}'
DAYS: '${{ github.event.inputs.days || 7 }}'
run: |-
set -e
START_DATE="$(date -u -d "$DAYS days ago" +'%Y-%m-%d')"
END_DATE="$(date -u +'%Y-%m-%d')"
echo "⏳ Generating report for contributions from ${START_DATE} to ${END_DATE}..."
declare -A author_is_googler
check_googler_status() {
local author="$1"
if [[ "${author}" == *"[bot]" ]]; then
author_is_googler[${author}]=1
return 1
fi
if [[ -v "author_is_googler[${author}]" ]]; then
return "${author_is_googler[${author}]}"
fi
if gh api "orgs/googlers/members/${author}" --silent 2>/dev/null; then
echo "🧑💻 ${author} is a Googler."
author_is_googler[${author}]=0
else
echo "🌍 ${author} is a community contributor."
author_is_googler[${author}]=1
fi
return "${author_is_googler[${author}]}"
}
googler_issues=0
non_googler_issues=0
googler_prs=0
non_googler_prs=0
echo "🔎 Fetching issues and pull requests..."
ITEMS_JSON="$(gh search issues --repo "${REPO}" "created:>${START_DATE}" --json author,isPullRequest --limit 1000)"
for row in $(echo "${ITEMS_JSON}" | jq -r '.[] | @base64'); do
_jq() {
echo "${row}" | base64 --decode | jq -r "${1}"
}
author="$(_jq '.author.login')"
is_pr="$(_jq '.isPullRequest')"
if [[ -z "${author}" || "${author}" == "null" ]]; then
continue
fi
if check_googler_status "${author}"; then
if [[ "${is_pr}" == "true" ]]; then
((googler_prs++))
else
((googler_issues++))
fi
else
if [[ "${is_pr}" == "true" ]]; then
((non_googler_prs++))
else
((non_googler_issues++))
fi
fi
done
googler_discussions=0
non_googler_discussions=0
echo "🗣️ Fetching discussions..."
DISCUSSION_QUERY='''
query($q: String!) {
search(query: $q, type: DISCUSSION, first: 100) {
nodes {
... on Discussion {
author {
login
}
}
}
}
}'''
DISCUSSIONS_JSON="$(gh api graphql -f q="repo:${REPO} created:>${START_DATE}" -f query="${DISCUSSION_QUERY}")"
for row in $(echo "${DISCUSSIONS_JSON}" | jq -r '.data.search.nodes[] | @base64'); do
_jq() {
echo "${row}" | base64 --decode | jq -r "${1}"
}
author="$(_jq '.author.login')"
if [[ -z "${author}" || "${author}" == "null" ]]; then
continue
fi
if check_googler_status "${author}"; then
((googler_discussions++))
else
((non_googler_discussions++))
fi
done
echo "✍️ Generating report content..."
TOTAL_ISSUES=$((googler_issues + non_googler_issues))
TOTAL_PRS=$((googler_prs + non_googler_prs))
TOTAL_DISCUSSIONS=$((googler_discussions + non_googler_discussions))
REPORT_BODY=$(cat <> "${GITHUB_OUTPUT}"
echo "${REPORT_BODY}" >> "${GITHUB_OUTPUT}"
echo "EOF" >> "${GITHUB_OUTPUT}"
echo "📊 Community Contribution Report:"
echo "${REPORT_BODY}"
- name: '🤖 Get Insights from Report'
if: |-
${{ steps.report.outputs.report_body != '' }}
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}'
REPOSITORY: '${{ github.repository }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"coreTools": [
"run_shell_command(gh issue list)",
"run_shell_command(gh pr list)",
"run_shell_command(gh search issues)",
"run_shell_command(gh search prs)"
]
}
prompt: |-
You are a helpful assistant that analyzes community contribution reports.
Based on the following report, please provide a brief summary and highlight any interesting trends or potential areas for improvement.
Report:
${{ steps.report.outputs.report_body }}
================================================
FILE: .github/workflows/deflake.yml
================================================
name: 'Deflake E2E'
on:
workflow_dispatch:
inputs:
branch_ref:
description: 'Branch to run on'
required: true
default: 'main'
type: 'string'
test_name_pattern:
description: 'The test name pattern to use'
required: false
type: 'string'
runs:
description: 'The number of runs'
required: false
default: 5
type: 'number'
concurrency:
group: '${{ github.workflow }}-${{ github.head_ref || github.ref }}'
cancel-in-progress: |-
${{ github.ref != 'refs/heads/main' && !startsWith(github.ref, 'refs/heads/release/') }}
jobs:
deflake_e2e_linux:
name: 'E2E Test (Linux) - ${{ matrix.sandbox }}'
runs-on: 'gemini-cli-ubuntu-16-core'
if: "github.repository == 'google-gemini/gemini-cli'"
strategy:
fail-fast: false
matrix:
sandbox:
- 'sandbox:none'
- 'sandbox:docker'
node-version:
- '20.x'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.pull_request.head.sha }}'
repository: '${{ github.repository }}'
- name: 'Set up Node.js ${{ matrix.node-version }}'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '${{ matrix.node-version }}'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Set up Docker'
if: "matrix.sandbox == 'sandbox:docker'"
uses: 'docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435' # ratchet:docker/setup-buildx-action@v3
- name: 'Run E2E tests'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
IS_DOCKER: "${{ matrix.sandbox == 'sandbox:docker' }}"
KEEP_OUTPUT: 'true'
RUNS: '${{ github.event.inputs.runs }}'
TEST_NAME_PATTERN: '${{ github.event.inputs.test_name_pattern }}'
VERBOSE: 'true'
shell: 'bash'
run: |
if [[ "${IS_DOCKER}" == "true" ]]; then
npm run deflake:test:integration:sandbox:docker -- --runs="${RUNS}" -- --testNamePattern "'${TEST_NAME_PATTERN}'"
else
npm run deflake:test:integration:sandbox:none -- --runs="${RUNS}" -- --testNamePattern "'${TEST_NAME_PATTERN}'"
fi
deflake_e2e_mac:
name: 'E2E Test (macOS)'
runs-on: 'macos-latest'
if: "github.repository == 'google-gemini/gemini-cli'"
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.pull_request.head.sha }}'
repository: '${{ github.repository }}'
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '20.x'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Fix rollup optional dependencies on macOS'
if: "runner.os == 'macOS'"
run: |
npm cache clean --force
- name: 'Run E2E tests (non-Windows)'
if: "runner.os != 'Windows'"
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
KEEP_OUTPUT: 'true'
RUNS: '${{ github.event.inputs.runs }}'
SANDBOX: 'sandbox:none'
TEST_NAME_PATTERN: '${{ github.event.inputs.test_name_pattern }}'
VERBOSE: 'true'
run: |
npm run deflake:test:integration:sandbox:none -- --runs="${RUNS}" -- --testNamePattern "'${TEST_NAME_PATTERN}'"
deflake_e2e_windows:
name: 'Slow E2E - Win'
runs-on: 'gemini-cli-windows-16-core'
if: "github.repository == 'google-gemini/gemini-cli'"
steps:
- name: 'Checkout'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.pull_request.head.sha }}'
repository: '${{ github.repository }}'
- name: 'Set up Node.js 20.x'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions-node@v4
with:
node-version: '20.x'
cache: 'npm'
- name: 'Configure Windows Defender exclusions'
run: |
Add-MpPreference -ExclusionPath $env:GITHUB_WORKSPACE -Force
Add-MpPreference -ExclusionPath "$env:GITHUB_WORKSPACE\node_modules" -Force
Add-MpPreference -ExclusionPath "$env:GITHUB_WORKSPACE\packages" -Force
Add-MpPreference -ExclusionPath "$env:TEMP" -Force
shell: 'pwsh'
- name: 'Configure npm for Windows performance'
run: |
npm config set progress false
npm config set audit false
npm config set fund false
npm config set loglevel error
npm config set maxsockets 32
npm config set registry https://registry.npmjs.org/
shell: 'pwsh'
- name: 'Install dependencies'
run: 'npm ci'
shell: 'pwsh'
- name: 'Build project'
run: 'npm run build'
shell: 'pwsh'
- name: 'Run E2E tests'
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
KEEP_OUTPUT: 'true'
SANDBOX: 'sandbox:none'
VERBOSE: 'true'
NODE_OPTIONS: '--max-old-space-size=32768 --max-semi-space-size=256'
UV_THREADPOOL_SIZE: '32'
NODE_ENV: 'test'
RUNS: '${{ github.event.inputs.runs }}'
TEST_NAME_PATTERN: '${{ github.event.inputs.test_name_pattern }}'
shell: 'pwsh'
run: |
npm run deflake:test:integration:sandbox:none -- --runs="$env:RUNS" -- --testNamePattern "'$env:TEST_NAME_PATTERN'"
================================================
FILE: .github/workflows/docs-page-action.yml
================================================
name: 'Deploy GitHub Pages'
on:
push:
tags: 'v*'
workflow_dispatch:
permissions:
contents: 'read'
pages: 'write'
id-token: 'write'
# Allow only one concurrent deployment, skipping runs queued between the run
# in-progress and latest queued. However, do NOT cancel in-progress runs as we
# want to allow these production deployments to complete.
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: false
jobs:
build:
if: "github.repository == 'google-gemini/gemini-cli' && !contains(github.ref_name, 'nightly')"
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Setup Pages'
uses: 'actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b' # ratchet:actions/configure-pages@v5
- name: 'Build with Jekyll'
uses: 'actions/jekyll-build-pages@44a6e6beabd48582f863aeeb6cb2151cc1716697' # ratchet:actions/jekyll-build-pages@v1
with:
source: './'
destination: './_site'
- name: 'Upload artifact'
uses: 'actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa' # ratchet:actions/upload-pages-artifact@v3
deploy:
if: "github.repository == 'google-gemini/gemini-cli'"
environment:
name: 'github-pages'
url: '${{ steps.deployment.outputs.page_url }}'
runs-on: 'ubuntu-latest'
needs: 'build'
steps:
- name: 'Deploy to GitHub Pages'
id: 'deployment'
uses: 'actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e' # ratchet:actions/deploy-pages@v4
================================================
FILE: .github/workflows/docs-rebuild.yml
================================================
name: 'Trigger Docs Rebuild'
on:
push:
branches:
- 'main'
paths:
- 'docs/**'
jobs:
trigger-rebuild:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
steps:
- name: 'Trigger rebuild'
run: |
curl -X POST \
-H "Content-Type: application/json" \
-d '{}' \
"${{ secrets.DOCS_REBUILD_URL }}"
================================================
FILE: .github/workflows/eval.yml
================================================
name: 'Eval'
on:
workflow_dispatch:
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
packages: 'read'
jobs:
eval:
name: 'Eval'
if: >-
github.repository == 'google-gemini/gemini-cli'
runs-on: 'ubuntu-latest'
container:
image: 'ghcr.io/google-gemini/gemini-cli-swe-agent-eval@sha256:cd5edc4afd2245c1f575e791c0859b3c084a86bb3bd9a6762296da5162b35a8f'
credentials:
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
DEFAULT_VERTEXAI_PROJECT: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
GOOGLE_CLOUD_PROJECT: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
GEMINI_API_KEY: '${{ secrets.EVAL_GEMINI_API_KEY }}'
GCLI_LOCAL_FILE_TELEMETRY: 'True'
EVAL_GCS_BUCKET: '${{ vars.EVAL_GCS_ARTIFACTS_BUCKET }}'
steps:
- name: 'Authenticate to Google Cloud'
id: 'auth'
uses: 'google-github-actions/auth@c200f3691d83b41bf9bbd8638997a462592937ed' # ratchet:exclude pin@v2.1.7
with:
project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
token_format: 'access_token'
access_token_scopes: 'https://www.googleapis.com/auth/cloud-platform'
- name: 'Run evaluation'
working-directory: '/app'
run: |
poetry run exp_run --experiment-mode=on-demand --branch-or-commit="${GITHUB_REF_NAME}" --model-name=gemini-2.5-pro --dataset=swebench_verified --concurrency=15
poetry run python agent_prototypes/scripts/parse_gcli_logs_experiment.py --experiment_dir=experiments/adhoc/gcli_temp_exp --gcs-bucket="${EVAL_GCS_BUCKET}" --gcs-path=gh_action_artifacts
================================================
FILE: .github/workflows/evals-nightly.yml
================================================
name: 'Evals: Nightly'
on:
schedule:
- cron: '0 1 * * *' # Runs at 1 AM every day
workflow_dispatch:
inputs:
run_all:
description: 'Run all evaluations (including usually passing)'
type: 'boolean'
default: true
test_name_pattern:
description: 'Test name pattern or file name'
required: false
type: 'string'
permissions:
contents: 'read'
checks: 'write'
actions: 'read'
jobs:
evals:
name: 'Evals (USUALLY_PASSING) nightly run'
runs-on: 'gemini-cli-ubuntu-16-core'
if: "github.repository == 'google-gemini/gemini-cli'"
strategy:
fail-fast: false
matrix:
model:
- 'gemini-3.1-pro-preview-customtools'
- 'gemini-3-pro-preview'
- 'gemini-3-flash-preview'
- 'gemini-2.5-pro'
- 'gemini-2.5-flash'
- 'gemini-2.5-flash-lite'
run_attempt: [1, 2, 3]
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Set up Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Build project'
run: 'npm run build'
- name: 'Create logs directory'
run: 'mkdir -p evals/logs'
- name: 'Run Evals'
continue-on-error: true
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
GEMINI_MODEL: '${{ matrix.model }}'
RUN_EVALS: "${{ github.event.inputs.run_all != 'false' }}"
TEST_NAME_PATTERN: '${{ github.event.inputs.test_name_pattern }}'
run: |
CMD="npm run test:all_evals"
PATTERN="${TEST_NAME_PATTERN}"
if [[ -n "$PATTERN" ]]; then
if [[ "$PATTERN" == *.ts || "$PATTERN" == *.js || "$PATTERN" == */* ]]; then
$CMD -- "$PATTERN"
else
$CMD -- -t "$PATTERN"
fi
else
$CMD
fi
- name: 'Upload Logs'
if: 'always()'
uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4
with:
name: 'eval-logs-${{ matrix.model }}-${{ matrix.run_attempt }}'
path: 'evals/logs'
retention-days: 7
aggregate-results:
name: 'Aggregate Results'
needs: ['evals']
if: "github.repository == 'google-gemini/gemini-cli' && always()"
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Download Logs'
uses: 'actions/download-artifact@cc203385981b70ca67e1cc392babf9cc229d5806' # ratchet:actions/download-artifact@v4
with:
path: 'artifacts'
- name: 'Generate Summary'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
run: 'node scripts/aggregate_evals.js artifacts >> "$GITHUB_STEP_SUMMARY"'
================================================
FILE: .github/workflows/gemini-automated-issue-dedup.yml
================================================
name: '🏷️ Gemini Automated Issue Deduplication'
on:
issues:
types:
- 'opened'
- 'reopened'
issue_comment:
types:
- 'created'
workflow_dispatch:
inputs:
issue_number:
description: 'issue number to dedup'
required: true
type: 'number'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
find-duplicates:
if: |-
github.repository == 'google-gemini/gemini-cli' &&
vars.TRIAGE_DEDUPLICATE_ISSUES != '' &&
(github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /deduplicate') &&
(github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR')))
permissions:
contents: 'read'
id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings
issues: 'read'
statuses: 'read'
packages: 'read'
timeout-minutes: 20
runs-on: 'ubuntu-latest'
outputs:
duplicate_issues_csv: '${{ env.DUPLICATE_ISSUES_CSV }}'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Find Duplicate Issues'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_issue_deduplication'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"mcpServers": {
"issue_deduplication": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--network", "host",
"-e", "GITHUB_TOKEN",
"-e", "GEMINI_API_KEY",
"-e", "DATABASE_TYPE",
"-e", "FIRESTORE_DATABASE_ID",
"-e", "GCP_PROJECT",
"-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json",
"-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json",
"ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3"
],
"env": {
"GITHUB_TOKEN": "${GITHUB_TOKEN}",
"GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
"DATABASE_TYPE":"firestore",
"GCP_PROJECT": "${FIRESTORE_PROJECT}",
"FIRESTORE_DATABASE_ID": "(default)",
"GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}"
},
"timeout": 600000
}
},
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)",
"run_shell_command(gh issue view)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue de-duplication assistant. Your goal is to find
duplicate issues for a given issue.
## Steps
1. **Find Potential Duplicates:**
- The repository is ${{ github.repository }} and the issue number is ${{ github.event.issue.number }}.
- Use the `duplicates` tool with the `repo` and `issue_number` to find potential duplicates for the current issue. Do not use the `threshold` parameter.
- If no duplicates are found, you are done.
- Print the JSON output from the `duplicates` tool to the logs.
2. **Refine Duplicates List (if necessary):**
- If the `duplicates` tool returns between 1 and 14 results, you must refine the list.
- For each potential duplicate issue, run `gh issue view --json title,body,comments` to fetch its content.
- Also fetch the content of the original issue: `gh issue view "${ISSUE_NUMBER}" --json title,body,comments`.
- Carefully analyze the content (title, body, comments) of the original issue and all potential duplicates.
- It is very important if the comments on either issue mention that they are not duplicates of each other, to treat them as not duplicates.
- Based on your analysis, create a final list containing only the issues you are highly confident are actual duplicates.
- If your final list is empty, you are done.
- Print to the logs if you omitted any potential duplicates based on your analysis.
- If the `duplicates` tool returned 15+ results, use the top 15 matches (based on descending similarity score value) to perform this step.
3. **Output final duplicates list as CSV:**
- Convert the list of appropriate duplicate issue numbers into a comma-separated list (CSV). If there are no appropriate duplicates, use the empty string.
- Use the "echo" shell command to append the CSV of issue numbers into the filepath referenced by the environment variable "${GITHUB_ENV}":
echo "DUPLICATE_ISSUES_CSV=[DUPLICATE_ISSUES_AS_CSV]" >> "${GITHUB_ENV}"
## Guidelines
- Only use the `duplicates` and `run_shell_command` tools.
- The `run_shell_command` tool can be used with `gh issue view`.
- Do not download or read media files like images, videos, or links. The `--json` flag for `gh issue view` will prevent this.
- Do not modify the issue content or status.
- Do not add comments or labels.
- Reference all shell variables as "${VAR}" (with quotes and braces).
add-comment-and-label:
needs: 'find-duplicates'
if: |-
github.repository == 'google-gemini/gemini-cli' &&
vars.TRIAGE_DEDUPLICATE_ISSUES != '' &&
needs.find-duplicates.outputs.duplicate_issues_csv != '' &&
(
github.event_name == 'issues' ||
github.event_name == 'workflow_dispatch' ||
(
github.event_name == 'issue_comment' &&
contains(github.event.comment.body, '@gemini-cli /deduplicate') &&
(
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR'
)
)
)
permissions:
issues: 'write'
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
- name: 'Comment and Label Duplicate Issue'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
env:
DUPLICATES_OUTPUT: '${{ needs.find-duplicates.outputs.duplicate_issues_csv }}'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
const rawCsv = process.env.DUPLICATES_OUTPUT;
core.info(`Raw duplicates CSV: ${rawCsv}`);
const duplicateIssues = rawCsv.split(',').map(s => s.trim()).filter(s => s);
if (duplicateIssues.length === 0) {
core.info('No duplicate issues found. Nothing to do.');
return;
}
const issueNumber = ${{ github.event.issue.number }};
function formatCommentBody(issues, updated = false) {
const header = updated
? 'Found possible duplicate issues (updated):'
: 'Found possible duplicate issues:';
const issuesList = issues.map(num => `- #${num}`).join('\n');
const footer = 'If you believe this is not a duplicate, please remove the `status/possible-duplicate` label.';
const magicComment = '';
return `${header}\n\n${issuesList}\n\n${footer}\n${magicComment}`;
}
const newCommentBody = formatCommentBody(duplicateIssues);
const newUpdatedCommentBody = formatCommentBody(duplicateIssues, true);
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
const magicComment = '';
const existingComment = comments.find(comment =>
comment.user.type === 'Bot' && comment.body.includes(magicComment)
);
let commentMade = false;
if (existingComment) {
// To check if lists are same, just compare the formatted bodies without headers.
const existingBodyForCompare = existingComment.body.substring(existingComment.body.indexOf('- #'));
const newBodyForCompare = newCommentBody.substring(newCommentBody.indexOf('- #'));
if (existingBodyForCompare.trim() !== newBodyForCompare.trim()) {
core.info(`Updating existing comment ${existingComment.id}`);
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: newUpdatedCommentBody,
});
commentMade = true;
} else {
core.info('Existing comment is up-to-date. Nothing to do.');
}
} else {
core.info('Creating new comment.');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: newCommentBody,
});
commentMade = true;
}
if (commentMade) {
core.info('Adding "status/possible-duplicate" label.');
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
labels: ['status/possible-duplicate'],
});
}
================================================
FILE: .github/workflows/gemini-automated-issue-triage.yml
================================================
name: '🏷️ Gemini Automated Issue Triage'
on:
issues:
types:
- 'opened'
- 'reopened'
issue_comment:
types:
- 'created'
workflow_dispatch:
inputs:
issue_number:
description: 'issue number to triage'
required: true
type: 'number'
workflow_call:
inputs:
issue_number:
description: 'issue number to triage'
required: false
type: 'string'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number || github.event.inputs.issue_number || inputs.issue_number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
packages: 'read'
actions: 'write' # Required for cancelling a workflow run
jobs:
triage-issue:
if: |-
(github.repository == 'google-gemini/gemini-cli' || github.repository == 'google-gemini/maintainers-gemini-cli') &&
(
github.event_name == 'workflow_dispatch' ||
(
(github.event_name == 'issues' || github.event_name == 'issue_comment') &&
(github.event_name != 'issue_comment' || (
contains(github.event.comment.body, '@gemini-cli /triage') &&
(github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'COLLABORATOR')
))
)
) &&
!contains(github.event.issue.labels.*.name, 'area/')
timeout-minutes: 5
runs-on: 'ubuntu-latest'
steps:
- name: 'Get issue data for manual trigger'
id: 'get_issue_data'
if: |-
github.event_name == 'workflow_dispatch'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
const issueNumber = ${{ github.event.inputs.issue_number || inputs.issue_number }};
const { data: issue } = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
core.setOutput('title', issue.title);
core.setOutput('body', issue.body);
core.setOutput('labels', issue.labels.map(label => label.name).join(','));
return issue;
- name: 'Manual Trigger Pre-flight Checks'
if: |-
github.event_name == 'workflow_dispatch'
env:
ISSUE_NUMBER_INPUT: '${{ github.event.inputs.issue_number || inputs.issue_number }}'
LABELS: '${{ steps.get_issue_data.outputs.labels }}'
run: |
if echo "${LABELS}" | grep -q 'area/'; then
echo "Issue #${ISSUE_NUMBER_INPUT} already has 'area/' label. Stopping workflow."
exit 1
fi
echo "Manual triage checks passed."
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Generate GitHub App Token'
id: 'generate_token'
env:
APP_ID: '${{ secrets.APP_ID }}'
if: |-
${{ env.APP_ID != '' }}
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
- name: 'Get Repository Labels'
id: 'get_labels'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
const { data: labels } = await github.rest.issues.listLabelsForRepo({
owner: context.repo.owner,
repo: context.repo.repo,
});
const allowedLabels = [
'area/agent',
'area/enterprise',
'area/non-interactive',
'area/core',
'area/security',
'area/platform',
'area/extensions',
'area/documentation',
'area/unknown'
];
const labelNames = labels.map(label => label.name).filter(name => allowedLabels.includes(name));
core.setOutput('available_labels', labelNames.join(','));
core.info(`Found ${labelNames.length} labels: ${labelNames.join(', ')}`);
return labelNames;
- name: 'Run Gemini Issue Analysis'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_issue_analysis'
env:
GITHUB_TOKEN: '' # Do not pass any auth token here since this runs on untrusted inputs
ISSUE_TITLE: >-
${{ github.event_name == 'workflow_dispatch' && steps.get_issue_data.outputs.title || github.event.issue.title }}
ISSUE_BODY: >-
${{ github.event_name == 'workflow_dispatch' && steps.get_issue_data.outputs.body || github.event.issue.body }}
ISSUE_NUMBER: >-
${{ github.event_name == 'workflow_dispatch' && (github.event.inputs.issue_number || inputs.issue_number) || github.event.issue.number }}
REPOSITORY: '${{ github.repository }}'
AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 25,
"telemetry": {
"enabled": true,
"target": "gcp"
},
"coreTools": [
"run_shell_command(echo)"
]
}
prompt: |-
## Role
You are an issue triage assistant. Your role is to analyze a GitHub issue and determine the single most appropriate area/ label based on the definitions provided.
## Steps
1. Review the issue title and body: ${{ env.ISSUE_TITLE }} and ${{ env.ISSUE_BODY }}.
2. Review the available labels: ${{ env.AVAILABLE_LABELS }}.
3. Select exactly one area/ label that best matches the issue based on Reference 1: Area Definitions.
4. Fallback Logic:
- If you cannot confidently determine the correct area/ label from the definitions, you must use area/unknown.
5. Output your selected label in JSON format and nothing else. Example:
{"labels_to_set": ["area/core"]}
## Guidelines
- Your output must contain exactly one area/ label.
- Triage only the current issue based on its title and body.
- Output only valid JSON format.
- Do not include any explanation or additional text, just the JSON.
Reference 1: Area Definitions
area/agent
- Description: Issues related to the "brain" of the CLI. This includes the core agent logic, model quality, tool/function calling, and memory.
- Example Issues:
"I am not getting a reasonable or expected response."
"The model is not calling the tool I expected."
"The web search tool is not working as expected."
"Feature request for a new built-in tool (e.g., read file, write file)."
"The generated code is poor quality or incorrect."
"The model seems stuck in a loop."
"The response from the model is malformed (e.g., broken JSON, bad formatting)."
"Concerns about unnecessary token consumption."
"Issues with how memory or chat history is managed."
"Issues with sub-agents."
"Model is switching from one to another unexpectedly."
area/enterprise
- Description: Issues specific to enterprise-level features, including telemetry, policy, and licenses.
- Example Issues:
"Usage data is not appearing in our telemetry dashboard."
"A user is able to perform an action that should be blocked by an admin policy."
"Questions about billing, licensing tiers, or enterprise quotas."
area/non-interactive
- Description: Issues related to using the CLI in automated or non-interactive environments (headless mode).
- Example Issues:
"Problems using the CLI as an SDK in another surface."
"The CLI is behaving differently when run from a shell script vs. an interactive terminal."
"GitHub action is failing."
"I am having trouble running the CLI in headless mode"
area/core
- Description: Issues with the fundamental CLI app itself. This includes the user interface (UI/UX), installation, OS compatibility, and performance.
- Example Issues:
"I am seeing my screen flicker when using the CLI."
"The output in my terminal is malformed or unreadable."
"Theme changes are not taking effect."
"Keyboard inputs (e.g., arrow keys, Ctrl+C) are not being recognized."
"The CLI failed to install or update."
"An issue specific to running on Windows, macOS, or Linux."
"Problems with command parsing, flags, or argument handling."
"High CPU or memory usage by the CLI process."
"Issues related to multi-modality (e.g., handling image inputs)."
"Problems with the IDE integration connection or installation"
area/security
- Description: Issues related to user authentication, authorization, data security, and privacy.
- Example Issues:
"I am unable to sign in."
"The login flow is selecting the wrong authentication path"
"Problems with API key handling or credential storage."
"A report of a security vulnerability"
"Concerns about data sanitization or potential data leaks."
"Issues or requests related to privacy controls."
"Preventing unauthorized data access."
area/platform
- Description: Issues related to CI/CD, release management, testing, eval infrastructure, capacity, quota management, and sandbox environments.
- Example Issues:
"I am getting a 429 'Resource Exhausted' or 500-level server error."
"General slowness or high latency from the service."
"The build script is broken on the main branch."
"Tests are failing in the CI/CD pipeline."
"Issues with the release management or publishing process."
"User is running out of capacity."
"Problems specific to the sandbox or staging environments."
"Questions about quota limits or requests for increases."
area/extensions
- Description: Issues related to the extension ecosystem, including the marketplace and website.
- Example Issues:
"Bugs related to the extension marketplace website."
"Issues with a specific extension."
"Feature request for the extension ecosystem."
area/documentation
- Description: Issues related to user-facing documentation and other content on the documentation website.
- Example Issues:
"A typo in a README file."
"DOCS: A command is not working as described in the documentation."
"A request for a new documentation page."
"Instructions missing for skills feature"
area/unknown
- Description: Issues that do not clearly fit into any other defined area/ category, or where information is too limited to make a determination. Use this when no other area is appropriate.
- name: 'Apply Labels to Issue'
if: |-
${{ steps.gemini_issue_analysis.outputs.summary != '' }}
env:
REPOSITORY: '${{ github.repository }}'
ISSUE_NUMBER: '${{ github.event.issue.number || github.event.inputs.issue_number }}'
LABELS_OUTPUT: '${{ steps.gemini_issue_analysis.outputs.summary }}'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |
const rawOutput = process.env.LABELS_OUTPUT;
core.info(`Raw output from model: ${rawOutput}`);
let parsedLabels;
try {
// First, try to parse the raw output as JSON.
parsedLabels = JSON.parse(rawOutput);
} catch (jsonError) {
// If that fails, check for a markdown code block.
core.warning(`Direct JSON parsing failed: ${jsonError.message}. Trying to extract from a markdown block.`);
const jsonMatch = rawOutput.match(/```json\s*([\s\S]*?)\s*```/);
if (jsonMatch && jsonMatch[1]) {
try {
parsedLabels = JSON.parse(jsonMatch[1].trim());
} catch (markdownError) {
core.setFailed(`Failed to parse JSON even after extracting from markdown block: ${markdownError.message}\nRaw output: ${rawOutput}`);
return;
}
} else {
// If no markdown block, try to find a raw JSON object in the output.
// The CLI may include debug/log lines (e.g. telemetry init, YOLO mode)
// before the actual JSON response.
const jsonObjectMatch = rawOutput.match(/(\{[\s\S]*"labels_to_set"[\s\S]*\})/);
if (jsonObjectMatch) {
try {
parsedLabels = JSON.parse(jsonObjectMatch[0]);
} catch (extractError) {
core.setFailed(`Found JSON-like content but failed to parse: ${extractError.message}\nRaw output: ${rawOutput}`);
return;
}
} else {
core.setFailed(`Output is not valid JSON and does not contain extractable JSON.\nRaw output: ${rawOutput}`);
return;
}
}
}
const issueNumber = parseInt(process.env.ISSUE_NUMBER);
const labelsToAdd = parsedLabels.labels_to_set || [];
if (labelsToAdd.length !== 1) {
core.setFailed(`Expected exactly 1 label (area/), but got ${labelsToAdd.length}. Labels: ${labelsToAdd.join(', ')}`);
return;
}
// Set labels based on triage result
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
labels: labelsToAdd
});
core.info(`Successfully added labels for #${issueNumber}: ${labelsToAdd.join(', ')}`);
- name: 'Post Issue Analysis Failure Comment'
if: |-
${{ failure() && steps.gemini_issue_analysis.outcome == 'failure' }}
env:
ISSUE_NUMBER: '${{ github.event.issue.number || github.event.inputs.issue_number }}'
RUN_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: parseInt(process.env.ISSUE_NUMBER),
body: 'There is a problem with the Gemini CLI issue triaging. Please check the [action logs](${process.env.RUN_URL}) for details.'
})
================================================
FILE: .github/workflows/gemini-scheduled-issue-dedup.yml
================================================
name: '📋 Gemini Scheduled Issue Deduplication'
on:
schedule:
- cron: '0 * * * *' # Runs every hour
workflow_dispatch:
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
refresh-embeddings:
if: |-
${{ vars.TRIAGE_DEDUPLICATE_ISSUES != '' && github.repository == 'google-gemini/gemini-cli' }}
permissions:
contents: 'read'
id-token: 'write' # Required for WIF, see https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-google-cloud-platform#adding-permissions-settings
issues: 'read'
statuses: 'read'
packages: 'read'
timeout-minutes: 20
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Log in to GitHub Container Registry'
uses: 'docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1' # ratchet:docker/login-action@v3
with:
registry: 'ghcr.io'
username: '${{ github.actor }}'
password: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Run Gemini Issue Deduplication Refresh'
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_refresh_embeddings'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ISSUE_TITLE: '${{ github.event.issue.title }}'
ISSUE_BODY: '${{ github.event.issue.body }}'
ISSUE_NUMBER: '${{ github.event.issue.number }}'
REPOSITORY: '${{ github.repository }}'
FIRESTORE_PROJECT: '${{ vars.FIRESTORE_PROJECT }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"mcpServers": {
"issue_deduplication": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"--network", "host",
"-e", "GITHUB_TOKEN",
"-e", "GEMINI_API_KEY",
"-e", "DATABASE_TYPE",
"-e", "FIRESTORE_DATABASE_ID",
"-e", "GCP_PROJECT",
"-e", "GOOGLE_APPLICATION_CREDENTIALS=/app/gcp-credentials.json",
"-v", "${GOOGLE_APPLICATION_CREDENTIALS}:/app/gcp-credentials.json",
"ghcr.io/google-gemini/gemini-cli-issue-triage@sha256:e3de1523f6c83aabb3c54b76d08940a2bf42febcb789dd2da6f95169641f94d3"
],
"env": {
"GITHUB_TOKEN": "${GITHUB_TOKEN}",
"GEMINI_API_KEY": "${{ secrets.GEMINI_API_KEY }}",
"DATABASE_TYPE":"firestore",
"GCP_PROJECT": "${FIRESTORE_PROJECT}",
"FIRESTORE_DATABASE_ID": "(default)",
"GOOGLE_APPLICATION_CREDENTIALS": "${GOOGLE_APPLICATION_CREDENTIALS}"
},
"timeout": 600000
}
},
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are a database maintenance assistant for a GitHub issue deduplication system.
## Goal
Your sole responsibility is to refresh the embeddings for all open issues in the repository to ensure the deduplication database is up-to-date.
## Steps
1. **Extract Repository Information:** The repository is ${{ github.repository }}.
2. **Refresh Embeddings:** Call the `refresh` tool with the correct `repo`. Do not use the `force` parameter.
3. **Log Output:** Print the JSON output from the `refresh` tool to the logs.
## Guidelines
- Only use the `refresh` tool.
- Do not attempt to find duplicates or modify any issues.
- Your only task is to call the `refresh` tool and log its output.
================================================
FILE: .github/workflows/gemini-scheduled-issue-triage.yml
================================================
name: '📋 Gemini Scheduled Issue Triage'
on:
issues:
types:
- 'opened'
- 'reopened'
schedule:
- cron: '0 * * * *' # Runs every hour
workflow_dispatch:
concurrency:
group: '${{ github.workflow }}-${{ github.event.number || github.run_id }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
id-token: 'write'
issues: 'write'
jobs:
triage-issues:
timeout-minutes: 10
if: |-
${{ github.repository == 'google-gemini/gemini-cli' }}
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
- name: 'Get issue from event'
if: |-
${{ github.event_name == 'issues' }}
id: 'get_issue_from_event'
env:
ISSUE_EVENT: '${{ toJSON(github.event.issue) }}'
run: |
set -euo pipefail
ISSUE_JSON=$(echo "$ISSUE_EVENT" | jq -c '[{number: .number, title: .title, body: .body}]')
echo "issues_to_triage=${ISSUE_JSON}" >> "${GITHUB_OUTPUT}"
echo "✅ Found issue #${{ github.event.issue.number }} from event to triage! 🎯"
- name: 'Find untriaged issues'
if: |-
${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
id: 'find_issues'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}'
GITHUB_REPOSITORY: '${{ github.repository }}'
run: |-
set -euo pipefail
echo '🔍 Finding issues missing area labels...'
NO_AREA_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue -label:area/core -label:area/agent -label:area/enterprise -label:area/non-interactive -label:area/security -label:area/platform -label:area/extensions -label:area/documentation -label:area/unknown' --limit 100 --json number,title,body)"
echo '🔍 Finding issues missing kind labels...'
NO_KIND_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue -label:kind/bug -label:kind/enhancement -label:kind/customer-issue -label:kind/question' --limit 100 --json number,title,body)"
echo '🏷️ Finding issues missing priority labels...'
NO_PRIORITY_ISSUES="$(gh issue list --repo "${GITHUB_REPOSITORY}" \
--search 'is:open is:issue -label:priority/p0 -label:priority/p1 -label:priority/p2 -label:priority/p3 -label:priority/unknown' --limit 100 --json number,title,body)"
echo '🔄 Merging and deduplicating issues...'
ISSUES="$(echo "${NO_AREA_ISSUES}" "${NO_KIND_ISSUES}" "${NO_PRIORITY_ISSUES}" | jq -c -s 'add | unique_by(.number)')"
echo '📝 Setting output for GitHub Actions...'
echo "issues_to_triage=${ISSUES}" >> "${GITHUB_OUTPUT}"
ISSUE_COUNT="$(echo "${ISSUES}" | jq 'length')"
echo "✅ Found ${ISSUE_COUNT} unique issues to triage! 🎯"
- name: 'Get Repository Labels'
id: 'get_labels'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |-
const { data: labels } = await github.rest.issues.listLabelsForRepo({
owner: context.repo.owner,
repo: context.repo.repo,
});
const labelNames = labels.map(label => label.name);
core.setOutput('available_labels', labelNames.join(','));
core.info(`Found ${labelNames.length} labels: ${labelNames.join(', ')}`);
return labelNames;
- name: 'Run Gemini Issue Analysis'
if: |-
(steps.get_issue_from_event.outputs.issues_to_triage != '' && steps.get_issue_from_event.outputs.issues_to_triage != '[]') ||
(steps.find_issues.outputs.issues_to_triage != '' && steps.find_issues.outputs.issues_to_triage != '[]')
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
id: 'gemini_issue_analysis'
env:
GITHUB_TOKEN: '' # Do not pass any auth token here since this runs on untrusted inputs
ISSUES_TO_TRIAGE: '${{ steps.get_issue_from_event.outputs.issues_to_triage || steps.find_issues.outputs.issues_to_triage }}'
REPOSITORY: '${{ github.repository }}'
AVAILABLE_LABELS: '${{ steps.get_labels.outputs.available_labels }}'
with:
gcp_workload_identity_provider: '${{ vars.GCP_WIF_PROVIDER }}'
gcp_project_id: '${{ vars.GOOGLE_CLOUD_PROJECT }}'
gcp_location: '${{ vars.GOOGLE_CLOUD_LOCATION }}'
gcp_service_account: '${{ vars.SERVICE_ACCOUNT_EMAIL }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
use_vertex_ai: '${{ vars.GOOGLE_GENAI_USE_VERTEXAI }}'
use_gemini_code_assist: '${{ vars.GOOGLE_GENAI_USE_GCA }}'
settings: |-
{
"maxSessionTurns": 25,
"coreTools": [
"run_shell_command(echo)"
],
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
prompt: |-
## Role
You are an issue triage assistant. Analyze issues and identify
appropriate labels. Use the available tools to gather information;
do not ask for information to be provided.
## Steps
1. You are only able to use the echo command. Review the available labels in the environment variable: "${AVAILABLE_LABELS}".
2. Check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues)
3. Review the issue title, body and any comments provided in the environment variables.
4. Identify the most relevant labels from the existing labels, specifically focusing on area/*, kind/* and priority/*.
5. Label Policy:
- If the issue already has a kind/ label, do not change it.
- If the issue already has a priority/ label, do not change it.
- If the issue already has an area/ label, do not change it.
- If any of these are missing, select exactly ONE appropriate label for the missing category.
6. Identify other applicable labels based on the issue content, such as status/*, help wanted, good first issue, etc.
7. Give me a single short explanation about why you are selecting each label in the process.
8. Output a JSON array of objects, each containing the issue number
and the labels to add and remove, along with an explanation. For example:
```
[
{
"issue_number": 123,
"labels_to_add": ["area/core", "kind/bug", "priority/p2"],
"labels_to_remove": ["status/need-triage"],
"explanation": "This issue is a UI bug that needs to be addressed with medium priority."
}
]
```
If an issue cannot be classified, do not include it in the output array.
9. For each issue please check if CLI version is present, this is usually in the output of the /about command and will look like 0.1.5
- Anything more than 6 versions older than the most recent should add the status/need-retesting label
10. If you see that the issue doesn't look like it has sufficient information recommend the status/need-information label and leave a comment politely requesting the relevant information, eg.. if repro steps are missing request for repro steps. if version information is missing request for version information into the explanation section below.
11. If you think an issue might be a Priority/P0 do not apply the priority/p0 label. Instead apply a status/manual-triage label and include a note in your explanation.
12. If you are uncertain about a category, use the area/unknown, kind/question, or priority/unknown labels as appropriate. If you are extremely uncertain, apply the status/manual-triage label.
## Guidelines
- Output only valid JSON format
- Do not include any explanation or additional text, just the JSON
- Only use labels that already exist in the repository.
- Do not add comments or modify the issue content.
- Do not remove the following labels maintainer, help wanted or good first issue.
- Triage only the current issue.
- Identify only one area/ label.
- Identify only one kind/ label (Do not apply kind/duplicate or kind/parent-issue)
- Identify only one priority/ label.
- Once you categorize the issue if it needs information bump down the priority by 1 eg.. a p0 would become a p1 a p1 would become a p2. P2 and P3 can stay as is in this scenario.
Categorization Guidelines (Priority):
P0 - Urgent Blocking Issues:
- DO NOT APPLY THIS LABEL AUTOMATICALLY. Use status/manual-triage instead.
- Definition: Urgent, block a significant percentage of the user base, and prevent frequent use of the Gemini CLI.
- This includes core stability blockers (e.g., authentication failures, broken upgrades), critical crashes, and P0 security vulnerabilities.
- Impact: Blocks development or testing for the entire team; Major security vulnerability; Causes data loss or corruption with no workaround; Crashes the application or makes a core feature completely unusable for all or most users.
- Qualifier: Is the main function of the software broken?
P1 - High-Impact Issues:
- Definition: Affect a large number of users, blocking them from using parts of the Gemini CLI, or make the CLI frequently unusable even with workarounds available.
- Impact: A core feature is broken or behaving incorrectly for a large number of users or use cases; Severe performance degradation; No straightforward workaround exists.
- Qualifier: Is a key feature unusable or giving very wrong results?
P2 - Significant Issues:
- Definition: Affect some users significantly, such as preventing the use of certain features or authentication types.
- Can also be issues that many users complain about, causing annoyance or hindering daily use.
- Impact: Affects a non-critical feature or a smaller, specific subset of users; An inconvenient but functional workaround is available; Noticeable UI/UX problems that look unprofessional.
- Qualifier: Is it an annoying but non-blocking problem?
P3 - Low-Impact Issues:
- Definition: Typically usability issues that cause annoyance to a limited user base.
- Includes feature requests that could be addressed in the near future and may be suitable for community contributions.
- Impact: Minor cosmetic issues; An edge-case bug that is very difficult to reproduce and affects a tiny fraction of users.
- Qualifier: Is it a "nice-to-fix" issue?
Categorization Guidelines (Area):
area/agent: Core Agent, Tools, Memory, Sub-Agents, Hooks, Agent Quality
area/core: User Interface, OS Support, Core Functionality
area/documentation: End-user and contributor-facing documentation, website-related
area/enterprise: Telemetry, Policy, Quota / Licensing
area/extensions: Gemini CLI extensions capability
area/non-interactive: GitHub Actions, SDK, 3P Integrations, Shell Scripting, Command line automation
area/platform: Build infra, Release mgmt, Testing, Eval infra, Capacity, Quota mgmt
area/security: security related issues
Additional Context:
- If users are talking about issues where the model gets downgraded from pro to flash then i want you to categorize that as a performance issue.
- This product is designed to use different models eg.. using pro, downgrading to flash etc.
- When users report that they dont expect the model to change those would be categorized as feature requests.
- name: 'Apply Labels to Issues'
if: |-
${{ steps.gemini_issue_analysis.outcome == 'success' &&
steps.gemini_issue_analysis.outputs.summary != '[]' }}
env:
REPOSITORY: '${{ github.repository }}'
LABELS_OUTPUT: '${{ steps.gemini_issue_analysis.outputs.summary }}'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |-
const rawLabels = process.env.LABELS_OUTPUT;
core.info(`Raw labels JSON: ${rawLabels}`);
let parsedLabels;
try {
const jsonMatch = rawLabels.match(/```json\s*([\s\S]*?)\s*```/);
if (!jsonMatch || !jsonMatch[1]) {
throw new Error("Could not find a ```json ... ``` block in the output.");
}
const jsonString = jsonMatch[1].trim();
parsedLabels = JSON.parse(jsonString);
core.info(`Parsed labels JSON: ${JSON.stringify(parsedLabels)}`);
} catch (err) {
core.setFailed(`Failed to parse labels JSON from Gemini output: ${err.message}\nRaw output: ${rawLabels}`);
return;
}
for (const entry of parsedLabels) {
const issueNumber = entry.issue_number;
if (!issueNumber) {
core.info(`Skipping entry with no issue number: ${JSON.stringify(entry)}`);
continue;
}
const labelsToAdd = entry.labels_to_add || [];
labelsToAdd.push('status/bot-triaged');
if (labelsToAdd.length > 0) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
labels: labelsToAdd
});
const explanation = entry.explanation ? ` - ${entry.explanation}` : '';
core.info(`Successfully added labels for #${issueNumber}: ${labelsToAdd.join(', ')}${explanation}`);
}
if (entry.explanation) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: entry.explanation,
});
}
if ((!entry.labels_to_add || entry.labels_to_add.length === 0) && (!entry.labels_to_remove || entry.labels_to_remove.length === 0)) {
core.info(`No labels to add or remove for #${issueNumber}, leaving as is`);
}
}
================================================
FILE: .github/workflows/gemini-scheduled-pr-triage.yml
================================================
name: 'Gemini Scheduled PR Triage 🚀'
on:
schedule:
- cron: '*/15 * * * *' # Runs every 15 minutes
workflow_dispatch:
jobs:
audit-prs:
timeout-minutes: 15
if: |-
${{ github.repository == 'google-gemini/gemini-cli' }}
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
pull-requests: 'write'
runs-on: 'ubuntu-latest'
outputs:
prs_needing_comment: '${{ steps.run_triage.outputs.prs_needing_comment }}'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
permission-pull-requests: 'write'
- name: 'Run PR Triage Script'
id: 'run_triage'
shell: 'bash'
env:
GITHUB_TOKEN: '${{ steps.generate_token.outputs.token }}'
GITHUB_REPOSITORY: '${{ github.repository }}'
run: |-
./.github/scripts/pr-triage.sh
# If prs_needing_comment is empty, set it to [] explicitly for downstream steps
if [[ -z "$(grep 'prs_needing_comment' "${GITHUB_OUTPUT}" | cut -d'=' -f2-)" ]]; then
echo "prs_needing_comment=[]" >> "${GITHUB_OUTPUT}"
fi
================================================
FILE: .github/workflows/gemini-scheduled-stale-issue-closer.yml
================================================
name: '🔒 Gemini Scheduled Stale Issue Closer'
on:
schedule:
- cron: '0 0 * * 0' # Every Sunday at midnight UTC
workflow_dispatch:
inputs:
dry_run:
description: 'Run in dry-run mode (no changes applied)'
required: false
default: false
type: 'boolean'
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
close-stale-issues:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
issues: 'write'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@v2'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
permission-issues: 'write'
- name: 'Process Stale Issues'
uses: 'actions/github-script@v7'
env:
DRY_RUN: '${{ inputs.dry_run }}'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |
const dryRun = process.env.DRY_RUN === 'true';
if (dryRun) {
core.info('DRY RUN MODE ENABLED: No changes will be applied.');
}
const batchLabel = 'Stale';
const threeMonthsAgo = new Date();
threeMonthsAgo.setMonth(threeMonthsAgo.getMonth() - 3);
const tenDaysAgo = new Date();
tenDaysAgo.setDate(tenDaysAgo.getDate() - 10);
core.info(`Cutoff date for creation: ${threeMonthsAgo.toISOString()}`);
core.info(`Cutoff date for updates: ${tenDaysAgo.toISOString()}`);
const query = `repo:${context.repo.owner}/${context.repo.repo} is:issue is:open created:<${threeMonthsAgo.toISOString()}`;
core.info(`Searching with query: ${query}`);
const itemsToCheck = await github.paginate(github.rest.search.issuesAndPullRequests, {
q: query,
sort: 'created',
order: 'asc',
per_page: 100
});
core.info(`Found ${itemsToCheck.length} open issues to check.`);
let processedCount = 0;
for (const issue of itemsToCheck) {
const createdAt = new Date(issue.created_at);
const updatedAt = new Date(issue.updated_at);
const reactionCount = issue.reactions.total_count;
// Basic thresholds
if (reactionCount >= 5) {
continue;
}
// Skip if it has a maintainer, help wanted, or Public Roadmap label
const rawLabels = issue.labels.map((l) => l.name);
const lowercaseLabels = rawLabels.map((l) => l.toLowerCase());
if (
lowercaseLabels.some((l) => l.includes('maintainer')) ||
lowercaseLabels.includes('help wanted') ||
rawLabels.includes('🗓️ Public Roadmap')
) {
continue;
}
let isStale = updatedAt < tenDaysAgo;
// If apparently active, check if it's only bot activity
if (!isStale) {
try {
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue.number,
per_page: 100,
sort: 'created',
direction: 'desc'
});
const lastHumanComment = comments.data.find(comment => comment.user.type !== 'Bot');
if (lastHumanComment) {
isStale = new Date(lastHumanComment.created_at) < tenDaysAgo;
} else {
// No human comments. Check if creator is human.
if (issue.user.type !== 'Bot') {
isStale = createdAt < tenDaysAgo;
} else {
isStale = true; // Bot created, only bot comments
}
}
} catch (error) {
core.warning(`Failed to fetch comments for issue #${issue.number}: ${error.message}`);
continue;
}
}
if (isStale) {
processedCount++;
const message = `Closing stale issue #${issue.number}: "${issue.title}" (${issue.html_url})`;
core.info(message);
if (!dryRun) {
// Add label
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue.number,
labels: [batchLabel]
});
// Add comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue.number,
body: 'Hello! As part of our effort to keep our backlog manageable and focus on the most active issues, we are tidying up older reports.\n\nIt looks like this issue hasn\'t been active for a while, so we are closing it for now. However, if you are still experiencing this bug on the latest stable build, please feel free to comment on this issue or create a new one with updated details.\n\nThank you for your contribution!'
});
// Close issue
await github.rest.issues.update({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue.number,
state: 'closed',
state_reason: 'not_planned'
});
}
}
}
core.info(`\nTotal issues processed: ${processedCount}`);
================================================
FILE: .github/workflows/gemini-scheduled-stale-pr-closer.yml
================================================
name: 'Gemini Scheduled Stale PR Closer'
on:
schedule:
- cron: '0 2 * * *' # Every day at 2 AM UTC
pull_request:
types: ['opened', 'edited']
workflow_dispatch:
inputs:
dry_run:
description: 'Run in dry-run mode'
required: false
default: false
type: 'boolean'
jobs:
close-stale-prs:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
pull-requests: 'write'
issues: 'write'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
env:
APP_ID: '${{ secrets.APP_ID }}'
if: |-
${{ env.APP_ID != '' }}
uses: 'actions/create-github-app-token@v2'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
- name: 'Process Stale PRs'
uses: 'actions/github-script@v7'
env:
DRY_RUN: '${{ inputs.dry_run }}'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |
const dryRun = process.env.DRY_RUN === 'true';
const fourteenDaysAgo = new Date();
fourteenDaysAgo.setDate(fourteenDaysAgo.getDate() - 14);
const thirtyDaysAgo = new Date();
thirtyDaysAgo.setDate(thirtyDaysAgo.getDate() - 30);
// 1. Fetch maintainers for verification
let maintainerLogins = new Set();
const teams = ['gemini-cli-maintainers', 'gemini-cli-askmode-approvers', 'gemini-cli-docs'];
for (const team_slug of teams) {
try {
const members = await github.paginate(github.rest.teams.listMembersInOrg, {
org: context.repo.owner,
team_slug: team_slug
});
for (const m of members) maintainerLogins.add(m.login.toLowerCase());
core.info(`Successfully fetched ${members.length} team members from ${team_slug}`);
} catch (e) {
// Silently skip if permissions are insufficient; we will rely on author_association
core.debug(`Skipped team fetch for ${team_slug}: ${e.message}`);
}
}
const isMaintainer = async (login, assoc) => {
// Reliably identify maintainers using authorAssociation (provided by GitHub)
// and organization membership (if available).
const isTeamMember = maintainerLogins.has(login.toLowerCase());
const isRepoMaintainer = ['OWNER', 'MEMBER', 'COLLABORATOR'].includes(assoc);
if (isTeamMember || isRepoMaintainer) return true;
// Fallback: Check if user belongs to the 'google' or 'googlers' orgs (requires permission)
try {
const orgs = ['googlers', 'google'];
for (const org of orgs) {
try {
await github.rest.orgs.checkMembershipForUser({ org: org, username: login });
return true;
} catch (e) {
if (e.status !== 404) throw e;
}
}
} catch (e) {
// Gracefully ignore failures here
}
return false;
};
// 2. Fetch all open PRs
let prs = [];
if (context.eventName === 'pull_request') {
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.payload.pull_request.number
});
prs = [pr];
} else {
prs = await github.paginate(github.rest.pulls.list, {
owner: context.repo.owner,
repo: context.repo.repo,
state: 'open',
per_page: 100
});
}
for (const pr of prs) {
const maintainerPr = await isMaintainer(pr.user.login, pr.author_association);
const isBot = pr.user.type === 'Bot' || pr.user.login.endsWith('[bot]');
if (maintainerPr || isBot) continue;
// Helper: Fetch labels and linked issues via GraphQL
const prDetailsQuery = `query($owner:String!, $repo:String!, $number:Int!) {
repository(owner:$owner, name:$repo) {
pullRequest(number:$number) {
closingIssuesReferences(first: 10) {
nodes {
number
labels(first: 20) {
nodes { name }
}
}
}
}
}
}`;
let linkedIssues = [];
try {
const res = await github.graphql(prDetailsQuery, {
owner: context.repo.owner, repo: context.repo.repo, number: pr.number
});
linkedIssues = res.repository.pullRequest.closingIssuesReferences.nodes;
} catch (e) {
core.warning(`GraphQL fetch failed for PR #${pr.number}: ${e.message}`);
}
// Check for mentions in body as fallback (regex)
const body = pr.body || '';
const mentionRegex = /(?:#|https:\/\/github\.com\/[^\/]+\/[^\/]+\/issues\/)(\d+)/i;
const matches = body.match(mentionRegex);
if (matches && linkedIssues.length === 0) {
const issueNumber = parseInt(matches[1]);
try {
const { data: issue } = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber
});
linkedIssues = [{ number: issueNumber, labels: { nodes: issue.labels.map(l => ({ name: l.name })) } }];
} catch (e) {}
}
// 3. Enforcement Logic
const prLabels = pr.labels.map(l => l.name.toLowerCase());
const hasHelpWanted = prLabels.includes('help wanted') ||
linkedIssues.some(issue => issue.labels.nodes.some(l => l.name.toLowerCase() === 'help wanted'));
const hasMaintainerOnly = prLabels.includes('🔒 maintainer only') ||
linkedIssues.some(issue => issue.labels.nodes.some(l => l.name.toLowerCase() === '🔒 maintainer only'));
const hasLinkedIssue = linkedIssues.length > 0;
// Closure Policy: No help-wanted label = Close after 14 days
if (pr.state === 'open' && !hasHelpWanted && !hasMaintainerOnly) {
const prCreatedAt = new Date(pr.created_at);
// We give a 14-day grace period for non-help-wanted PRs to be manually reviewed/labeled by an EM
if (prCreatedAt > fourteenDaysAgo) {
core.info(`PR #${pr.number} is new and lacks 'help wanted'. Giving 14-day grace period for EM review.`);
continue;
}
core.info(`PR #${pr.number} is older than 14 days and lacks 'help wanted' association. Closing.`);
if (!dryRun) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
body: "Hi there! Thank you for your interest in contributing to Gemini CLI. \n\nTo ensure we maintain high code quality and focus on our prioritized roadmap, we have updated our contribution policy (see [Discussion #17383](https://github.com/google-gemini/gemini-cli/discussions/17383)). \n\n**We only *guarantee* review and consideration of pull requests for issues that are explicitly labeled as 'help wanted'.** All other community pull requests are subject to closure after 14 days if they do not align with our current focus areas. For this reason, we strongly recommend that contributors only submit pull requests against issues explicitly labeled as **'help-wanted'**. \n\nThis pull request is being closed as it has been open for 14 days without a 'help wanted' designation. We encourage you to find and contribute to existing 'help wanted' issues in our backlog! Thank you for your understanding and for being part of our community!"
});
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
state: 'closed'
});
}
continue;
}
// Also check for linked issue even if it has help wanted (redundant but safe)
if (pr.state === 'open' && !hasLinkedIssue) {
// Already covered by hasHelpWanted check above, but good for future-proofing
continue;
}
// 4. Staleness Check (Scheduled only)
if (pr.state === 'open' && context.eventName !== 'pull_request') {
// Skip PRs that were created less than 30 days ago - they cannot be stale yet
const prCreatedAt = new Date(pr.created_at);
if (prCreatedAt > thirtyDaysAgo) continue;
let lastActivity = new Date(pr.created_at);
try {
const reviews = await github.paginate(github.rest.pulls.listReviews, {
owner: context.repo.owner, repo: context.repo.repo, pull_number: pr.number
});
for (const r of reviews) {
if (await isMaintainer(r.user.login, r.author_association)) {
const d = new Date(r.submitted_at || r.updated_at);
if (d > lastActivity) lastActivity = d;
}
}
const comments = await github.paginate(github.rest.issues.listComments, {
owner: context.repo.owner, repo: context.repo.repo, issue_number: pr.number
});
for (const c of comments) {
if (await isMaintainer(c.user.login, c.author_association)) {
const d = new Date(c.updated_at);
if (d > lastActivity) lastActivity = d;
}
}
} catch (e) {}
if (lastActivity < thirtyDaysAgo) {
const labels = pr.labels.map(l => l.name.toLowerCase());
const isProtected = labels.includes('help wanted') || labels.includes('🔒 maintainer only');
if (isProtected) {
core.info(`PR #${pr.number} is stale but has a protected label. Skipping closure.`);
continue;
}
core.info(`PR #${pr.number} is stale (no maintainer activity for 30+ days). Closing.`);
if (!dryRun) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.number,
body: "Hi there! Thank you for your contribution. To keep our backlog manageable, we are closing pull requests that haven't seen maintainer activity for 30 days. If you're still working on this, please let us know!"
});
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
state: 'closed'
});
}
}
}
}
================================================
FILE: .github/workflows/gemini-self-assign-issue.yml
================================================
name: 'Assign Issue on Comment'
on:
issue_comment:
types:
- 'created'
concurrency:
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
permissions:
contents: 'read'
id-token: 'write'
issues: 'write'
statuses: 'write'
packages: 'read'
jobs:
self-assign-issue:
if: |-
github.repository == 'google-gemini/gemini-cli' &&
github.event_name == 'issue_comment' &&
(contains(github.event.comment.body, '/assign') || contains(github.event.comment.body, '/unassign'))
runs-on: 'ubuntu-latest'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
# Add 'assignments' write permission
permission-issues: 'write'
- name: 'Assign issue to user'
if: "contains(github.event.comment.body, '/assign')"
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |
const issueNumber = context.issue.number;
const commenter = context.actor;
const owner = context.repo.owner;
const repo = context.repo.repo;
const MAX_ISSUES_ASSIGNED = 3;
const issue = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
});
const hasHelpWantedLabel = issue.data.labels.some(label => label.name === 'help wanted');
if (!hasHelpWantedLabel) {
await github.rest.issues.createComment({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: `👋 @${commenter}, thanks for your interest in this issue! We're reserving self-assignment for issues that have been marked with the \`help wanted\` label. Feel free to check out our list of [issues that need attention](https://github.com/google-gemini/gemini-cli/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22).`
});
return;
}
// Search for open issues already assigned to the commenter in this repo
const { data: assignedIssues } = await github.rest.search.issuesAndPullRequests({
q: `is:issue repo:${owner}/${repo} assignee:${commenter} is:open`,
advanced_search: true
});
if (assignedIssues.total_count >= MAX_ISSUES_ASSIGNED) {
await github.rest.issues.createComment({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: `👋 @${commenter}! You currently have ${assignedIssues.total_count} issues assigned to you. We have a ${MAX_ISSUES_ASSIGNED} max issues assigned at once policy. Once you close out an existing issue it will open up space to take another. You can also unassign yourself from an existing issue but please work on a hand-off if someone is expecting work on that issue.`
});
return; // exit
}
if (issue.data.assignees.length > 0) {
// Comment that it's already assigned
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: `@${commenter} Thanks for taking interest but this issue is already assigned. We'd still love to have you contribute. Check out our [Help Wanted](https://github.com/google-gemini/gemini-cli/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22) list for issues where we need some extra attention.`
});
return;
}
// If not taken, assign the user who commented
await github.rest.issues.addAssignees({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
assignees: [commenter]
});
// Post a comment to confirm assignment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
body: `👋 @${commenter}, you've been assigned to this issue! Thank you for taking the time to contribute. Make sure to check out our [contributing guidelines](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md).`
});
- name: 'Unassign issue from user'
if: "contains(github.event.comment.body, '/unassign')"
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |
const issueNumber = context.issue.number;
const commenter = context.actor;
const owner = context.repo.owner;
const repo = context.repo.repo;
const commentBody = context.payload.comment.body.trim();
if (commentBody !== '/unassign') {
return;
}
const issue = await github.rest.issues.get({
owner: owner,
repo: repo,
issue_number: issueNumber,
});
const isAssigned = issue.data.assignees.some(assignee => assignee.login === commenter);
if (isAssigned) {
await github.rest.issues.removeAssignees({
owner: owner,
repo: repo,
issue_number: issueNumber,
assignees: [commenter]
});
await github.rest.issues.createComment({
owner: owner,
repo: repo,
issue_number: issueNumber,
body: `👋 @${commenter}, you have been unassigned from this issue.`
});
}
================================================
FILE: .github/workflows/issue-opened-labeler.yml
================================================
name: '🏷️ Issue Opened Labeler'
on:
issues:
types:
- 'opened'
jobs:
label-issue:
runs-on: 'ubuntu-latest'
if: |-
${{ github.repository == 'google-gemini/gemini-cli' || github.repository == 'google-gemini/maintainers-gemini-cli' }}
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
env:
APP_ID: '${{ secrets.APP_ID }}'
if: |-
${{ env.APP_ID != '' }}
uses: 'actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b' # ratchet:actions/create-github-app-token@v2
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
- name: 'Add need-triage label'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
const { data: issue } = await github.rest.issues.get({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const hasLabel = issue.labels.some(l => l.name === 'status/need-triage');
if (!hasLabel) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
labels: ['status/need-triage']
});
} else {
core.info('Issue already has status/need-triage label. Skipping.');
}
================================================
FILE: .github/workflows/label-backlog-child-issues.yml
================================================
name: 'Label Child Issues for Project Rollup'
on:
issues:
types: ['opened', 'edited', 'reopened']
schedule:
- cron: '0 * * * *' # Run every hour
workflow_dispatch:
permissions:
issues: 'write'
contents: 'read'
jobs:
# Event-based: Quick reaction to new/edited issues in THIS repo
labeler:
if: "github.repository == 'google-gemini/gemini-cli' && github.event_name == 'issues'"
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@v4'
- name: 'Setup Node.js'
uses: 'actions/setup-node@v4'
with:
node-version: '20'
cache: 'npm'
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Run Multi-Repo Sync Script'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
run: 'node .github/scripts/sync-maintainer-labels.cjs'
# Scheduled/Manual: Recursive sync across multiple repos
sync-maintainer-labels:
if: "github.repository == 'google-gemini/gemini-cli' && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')"
runs-on: 'ubuntu-latest'
steps:
- name: 'Checkout'
uses: 'actions/checkout@v4'
- name: 'Setup Node.js'
uses: 'actions/setup-node@v4'
with:
node-version: '20'
cache: 'npm'
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Run Multi-Repo Sync Script'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
run: 'node .github/scripts/sync-maintainer-labels.cjs'
================================================
FILE: .github/workflows/label-workstream-rollup.yml
================================================
name: 'Label Workstream Rollup'
on:
issues:
types: ['opened', 'edited', 'reopened']
schedule:
- cron: '0 * * * *'
workflow_dispatch:
jobs:
labeler:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
issues: 'write'
steps:
- name: 'Check for Parent Workstream and Apply Label'
uses: 'actions/github-script@v7'
with:
script: |
const labelToAdd = 'workstream-rollup';
// Allow-list of parent issue URLs
const allowedParentUrls = [
'https://github.com/google-gemini/gemini-cli/issues/15374',
'https://github.com/google-gemini/gemini-cli/issues/15456',
'https://github.com/google-gemini/gemini-cli/issues/15324',
'https://github.com/google-gemini/gemini-cli/issues/17202',
'https://github.com/google-gemini/gemini-cli/issues/17203'
];
// Single issue processing (for event triggers)
async function processSingleIssue(owner, repo, number) {
const query = `
query($owner:String!, $repo:String!, $number:Int!) {
repository(owner:$owner, name:$repo) {
issue(number:$number) {
number
parent {
url
parent {
url
parent {
url
parent {
url
parent {
url
}
}
}
}
}
}
}
}
`;
try {
const result = await github.graphql(query, { owner, repo, number });
if (!result || !result.repository || !result.repository.issue) {
console.log(`Issue #${number} not found or data missing.`);
return;
}
const issue = result.repository.issue;
await checkAndLabel(issue, owner, repo);
} catch (error) {
console.error(`Failed to process issue #${number}:`, error);
throw error; // Re-throw to be caught by main execution
}
}
// Bulk processing (for schedule/dispatch)
async function processAllOpenIssues(owner, repo) {
const query = `
query($owner:String!, $repo:String!, $cursor:String) {
repository(owner:$owner, name:$repo) {
issues(first: 100, states: OPEN, after: $cursor) {
pageInfo {
hasNextPage
endCursor
}
nodes {
number
parent {
url
parent {
url
parent {
url
parent {
url
parent {
url
}
}
}
}
}
}
}
}
}
`;
let hasNextPage = true;
let cursor = null;
while (hasNextPage) {
try {
const result = await github.graphql(query, { owner, repo, cursor });
if (!result || !result.repository || !result.repository.issues) {
console.error('Invalid response structure from GitHub API');
break;
}
const issues = result.repository.issues.nodes || [];
console.log(`Processing batch of ${issues.length} issues...`);
for (const issue of issues) {
await checkAndLabel(issue, owner, repo);
}
hasNextPage = result.repository.issues.pageInfo.hasNextPage;
cursor = result.repository.issues.pageInfo.endCursor;
} catch (error) {
console.error('Failed to fetch issues batch:', error);
throw error; // Re-throw to be caught by main execution
}
}
}
async function checkAndLabel(issue, owner, repo) {
if (!issue || !issue.parent) return;
let currentParent = issue.parent;
let tracedParents = [];
let matched = false;
while (currentParent) {
tracedParents.push(currentParent.url);
if (allowedParentUrls.includes(currentParent.url)) {
console.log(`SUCCESS: Issue #${issue.number} is a descendant of ${currentParent.url}. Trace: ${tracedParents.join(' -> ')}. Adding label.`);
await github.rest.issues.addLabels({
owner,
repo,
issue_number: issue.number,
labels: [labelToAdd]
});
matched = true;
break;
}
currentParent = currentParent.parent;
}
if (!matched && context.eventName === 'issues') {
console.log(`Issue #${issue.number} did not match any allowed workstreams. Trace: ${tracedParents.join(' -> ') || 'None'}.`);
}
}
// Main execution
try {
if (context.eventName === 'issues') {
console.log(`Processing single issue #${context.payload.issue.number}...`);
await processSingleIssue(context.repo.owner, context.repo.repo, context.payload.issue.number);
} else {
console.log(`Running for event: ${context.eventName}. Processing all open issues...`);
await processAllOpenIssues(context.repo.owner, context.repo.repo);
}
} catch (error) {
core.setFailed(`Workflow failed: ${error.message}`);
}
================================================
FILE: .github/workflows/links.yml
================================================
name: 'Links'
on:
push:
branches: ['main']
pull_request:
branches: ['main']
repository_dispatch:
workflow_dispatch:
schedule:
- cron: '00 18 * * *'
jobs:
linkChecker:
if: |-
${{ github.repository == 'google-gemini/gemini-cli' }}
runs-on: 'ubuntu-latest'
steps:
- uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
- name: 'Link Checker'
id: 'lychee'
uses: 'lycheeverse/lychee-action@885c65f3dc543b57c898c8099f4e08c8afd178a2' # ratchet: lycheeverse/lychee-action@v2.6.1
with:
args: '--verbose --no-progress --accept 200,503 ./**/*.md'
================================================
FILE: .github/workflows/no-response.yml
================================================
name: 'No Response'
# Run as a daily cron at 1:45 AM
on:
schedule:
- cron: '45 1 * * *'
workflow_dispatch:
jobs:
no-response:
runs-on: 'ubuntu-latest'
if: |-
${{ github.repository == 'google-gemini/gemini-cli' }}
permissions:
issues: 'write'
pull-requests: 'write'
concurrency:
group: '${{ github.workflow }}-no-response'
cancel-in-progress: true
steps:
- uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9
with:
repo-token: '${{ secrets.GITHUB_TOKEN }}'
days-before-stale: -1
days-before-close: 14
stale-issue-label: 'status/need-information'
close-issue-message: >-
This issue was marked as needing more information and has not received a response in 14 days.
Closing it for now. If you still face this problem, feel free to reopen with more details. Thank you!
stale-pr-label: 'status/need-information'
close-pr-message: >-
This pull request was marked as needing more information and has had no updates in 14 days.
Closing it for now. You are welcome to reopen with the required info. Thanks for contributing!
================================================
FILE: .github/workflows/pr-contribution-guidelines-notifier.yml
================================================
name: '🏷️ PR Contribution Guidelines Notifier'
on:
pull_request:
types:
- 'opened'
jobs:
notify-process-change:
runs-on: 'ubuntu-latest'
if: |-
github.repository == 'google-gemini/gemini-cli' || github.repository == 'google-gemini/maintainers-gemini-cli'
permissions:
pull-requests: 'write'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
env:
APP_ID: '${{ secrets.APP_ID }}'
if: |-
${{ env.APP_ID != '' }}
uses: 'actions/create-github-app-token@v2'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
- name: 'Check membership and post comment'
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
with:
github-token: '${{ steps.generate_token.outputs.token || secrets.GITHUB_TOKEN }}'
script: |-
const org = context.repo.owner;
const repo = context.repo.repo;
const username = context.payload.pull_request.user.login;
const pr_number = context.payload.pull_request.number;
// 1. Check if the PR author is a maintainer
// Check team membership (most reliable for private org members)
let isTeamMember = false;
const teams = ['gemini-cli-maintainers', 'gemini-cli-askmode-approvers', 'gemini-cli-docs'];
for (const team_slug of teams) {
try {
const members = await github.paginate(github.rest.teams.listMembersInOrg, {
org: org,
team_slug: team_slug
});
if (members.some(m => m.login.toLowerCase() === username.toLowerCase())) {
isTeamMember = true;
core.info(`${username} is a member of ${team_slug}. No notification needed.`);
break;
}
} catch (e) {
core.warning(`Failed to fetch team members from ${team_slug}: ${e.message}`);
}
}
if (isTeamMember) return;
// Check author_association from webhook payload
const authorAssociation = context.payload.pull_request.author_association;
const isRepoMaintainer = ['OWNER', 'MEMBER', 'COLLABORATOR'].includes(authorAssociation);
if (isRepoMaintainer) {
core.info(`${username} is a maintainer (author_association: ${authorAssociation}). No notification needed.`);
return;
}
// Check if author is a Googler
const isGoogler = async (login) => {
try {
const orgs = ['googlers', 'google'];
for (const org of orgs) {
try {
await github.rest.orgs.checkMembershipForUser({
org: org,
username: login
});
return true;
} catch (e) {
if (e.status !== 404) throw e;
}
}
} catch (e) {
core.warning(`Failed to check org membership for ${login}: ${e.message}`);
}
return false;
};
if (await isGoogler(username)) {
core.info(`${username} is a Googler. No notification needed.`);
return;
}
// 2. Check if the PR is already associated with an issue
const query = `
query($owner:String!, $repo:String!, $number:Int!) {
repository(owner:$owner, name:$repo) {
pullRequest(number:$number) {
closingIssuesReferences(first: 1) {
totalCount
}
}
}
}
`;
const variables = { owner: org, repo: repo, number: pr_number };
const result = await github.graphql(query, variables);
const issueCount = result.repository.pullRequest.closingIssuesReferences.totalCount;
if (issueCount > 0) {
core.info(`PR #${pr_number} is already associated with an issue. No notification needed.`);
return;
}
// 3. Post the notification comment
core.info(`${username} is not a maintainer and PR #${pr_number} has no linked issue. Posting notification.`);
const comment = `
Hi @${username}, thank you so much for your contribution to Gemini CLI! We really appreciate the time and effort you've put into this.
We're making some updates to our contribution process to improve how we track and review changes. Please take a moment to review our recent discussion post: [Improving Our Contribution Process & Introducing New Guidelines](https://github.com/google-gemini/gemini-cli/discussions/16706).
Key Update: Starting **January 26, 2026**, the Gemini CLI project will require all pull requests to be associated with an existing issue. Any pull requests not linked to an issue by that date will be automatically closed.
Thank you for your understanding and for being a part of our community!
`.trim().replace(/^[ ]+/gm, '');
await github.rest.issues.createComment({
owner: org,
repo: repo,
issue_number: pr_number,
body: comment
});
================================================
FILE: .github/workflows/pr-rate-limiter.yaml
================================================
# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
name: 'PR rate limiter'
permissions: {}
on:
pull_request_target:
types:
- 'opened'
- 'reopened'
jobs:
limit:
runs-on: 'gemini-cli-ubuntu-16-core'
permissions:
contents: 'read'
pull-requests: 'write'
steps:
- name: 'Limit open pull requests per user'
uses: 'Homebrew/actions/limit-pull-requests@9ceb7934560eb61d131dde205a6c2d77b2e1529d' # master
with:
except-author-associations: 'MEMBER,OWNER,COLLABORATOR'
comment-limit: 8
comment: >
You already have 7 pull requests open. Please work on getting
existing PRs merged before opening more.
close-limit: 8
close: true
================================================
FILE: .github/workflows/release-change-tags.yml
================================================
name: 'Release: Change Tags'
on:
workflow_dispatch:
inputs:
version:
description: 'The package version to tag (e.g., 0.5.0-preview-2). This version must already exist on the npm registry.'
required: true
type: 'string'
channel:
description: 'The npm dist-tag to apply (e.g., latest, preview, nightly).'
required: true
type: 'choice'
options:
- 'dev'
- 'latest'
- 'preview'
- 'nightly'
dry-run:
description: 'Whether to run in dry-run mode.'
required: false
type: 'boolean'
default: true
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
change-tags:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout repository'
uses: 'actions/checkout@v4'
with:
ref: '${{ github.ref }}'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
- name: 'Change tag'
uses: './.github/actions/tag-npm-release'
with:
channel: '${{ github.event.inputs.channel }}'
version: '${{ github.event.inputs.version }}'
dry-run: '${{ github.event.inputs.dry-run }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
working-directory: '.'
================================================
FILE: .github/workflows/release-manual.yml
================================================
name: 'Release: Manual'
on:
workflow_dispatch:
inputs:
version:
description: 'The version to release (e.g., v0.1.11). Must be a valid semver string with a "v" prefix.'
required: true
type: 'string'
ref:
description: 'The branch, tag, or SHA to release from.'
required: true
type: 'string'
npm_channel:
description: 'The npm channel to publish to'
required: true
type: 'choice'
options:
- 'dev'
- 'preview'
- 'nightly'
- 'latest'
default: 'latest'
dry_run:
description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.'
required: true
type: 'boolean'
default: true
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
type: 'boolean'
default: false
skip_github_release:
description: 'Select to skip creating a GitHub release (only used when environment is PROD)'
required: false
type: 'boolean'
default: false
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
release:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
contents: 'write'
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
fetch-depth: 0
- name: 'Checkout Release Code'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref }}'
path: 'release'
fetch-depth: 0
- name: 'Debug Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: './release/.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
working-directory: './release'
run: 'npm ci'
- name: 'Prepare Release Info'
id: 'release_info'
working-directory: './release'
env:
INPUT_VERSION: '${{ github.event.inputs.version }}'
run: |
RELEASE_VERSION="${INPUT_VERSION}"
echo "RELEASE_VERSION=${RELEASE_VERSION#v}" >> "${GITHUB_OUTPUT}"
echo "PREVIOUS_TAG=$(git describe --tags --abbrev=0)" >> "${GITHUB_OUTPUT}"
- name: 'Run Tests'
if: "${{github.event.inputs.force_skip_tests != 'true'}}"
uses: './.github/actions/run-tests'
with:
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
working-directory: './release'
- name: 'Publish Release'
uses: './.github/actions/publish-release'
with:
force-skip-tests: '${{ github.event.inputs.force_skip_tests }}'
release-version: '${{ steps.release_info.outputs.RELEASE_VERSION }}'
release-tag: '${{ github.event.inputs.version }}'
npm-tag: '${{ github.event.inputs.npm_channel }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
github-release-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ github.event.inputs.dry_run }}'
previous-tag: '${{ steps.release_info.outputs.PREVIOUS_TAG }}'
skip-github-release: '${{ github.event.inputs.skip_github_release }}'
working-directory: './release'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
npm-registry-publish-url: '${{ vars.NPM_REGISTRY_PUBLISH_URL }}'
npm-registry-url: '${{ vars.NPM_REGISTRY_URL }}'
npm-registry-scope: '${{ vars.NPM_REGISTRY_SCOPE }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry_run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_TAG: '${{ github.event.inputs.version }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title 'Manual Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')' \
--body 'The manual release workflow failed. See the full run for details: ${DETAILS_URL}' \
--label 'release-failure,priority/p0'
================================================
FILE: .github/workflows/release-nightly.yml
================================================
name: 'Release: Nightly'
on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:
inputs:
dry_run:
description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.'
required: true
type: 'boolean'
default: true
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
type: 'boolean'
default: true
ref:
description: 'The branch, tag, or SHA to release from.'
required: false
type: 'string'
default: 'main'
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
release:
if: "github.repository == 'google-gemini/gemini-cli'"
environment: "${{ github.event.inputs.environment || 'prod' }}"
runs-on: 'ubuntu-latest'
permissions:
contents: 'write'
packages: 'write'
issues: 'write'
pull-requests: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
fetch-depth: 0
- name: 'Checkout Release Code'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref }}'
path: 'release'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: './release/.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
working-directory: './release'
run: 'npm ci'
- name: 'Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(github.event.inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Calculate Release Variables'
id: 'vars'
uses: './.github/actions/calculate-vars'
with:
dry_run: '${{ github.event.inputs.dry_run }}'
- name: 'Print Calculated vars'
shell: 'bash'
env:
JSON_VARS: '${{ toJSON(steps.vars.outputs) }}'
run: 'echo "$JSON_VARS"'
- name: 'Run Tests'
if: "${{ github.event_name == 'schedule' || github.event.inputs.force_skip_tests == 'false' }}"
uses: './.github/actions/run-tests'
with:
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
working-directory: './release'
- name: 'Get Nightly Version'
id: 'nightly_version'
working-directory: './release'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
run: |
# Calculate the version using the centralized script
VERSION_JSON=$(node scripts/get-release-version.js --type=nightly)
# Extract values for logging and outputs
RELEASE_TAG=$(echo "${VERSION_JSON}" | jq -r .releaseTag)
RELEASE_VERSION=$(echo "${VERSION_JSON}" | jq -r .releaseVersion)
NPM_TAG=$(echo "${VERSION_JSON}" | jq -r .npmTag)
PREVIOUS_TAG=$(echo "${VERSION_JSON}" | jq -r .previousReleaseTag)
# Print calculated values for logging
echo "Calculated Release Tag: ${RELEASE_TAG}"
echo "Calculated Release Version: ${RELEASE_VERSION}"
echo "Calculated Previous Tag: ${PREVIOUS_TAG}"
# Set outputs for subsequent steps
echo "RELEASE_TAG=${RELEASE_TAG}" >> "${GITHUB_OUTPUT}"
echo "RELEASE_VERSION=${RELEASE_VERSION}" >> "${GITHUB_OUTPUT}"
echo "NPM_TAG=${NPM_TAG}" >> "${GITHUB_OUTPUT}"
echo "PREVIOUS_TAG=${PREVIOUS_TAG}" >> "${GITHUB_OUTPUT}"
- name: 'Publish Release'
if: true
uses: './.github/actions/publish-release'
with:
release-version: '${{ steps.nightly_version.outputs.RELEASE_VERSION }}'
release-tag: '${{ steps.nightly_version.outputs.RELEASE_TAG }}'
npm-tag: '${{ steps.nightly_version.outputs.NPM_TAG }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
github-release-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ steps.vars.outputs.is_dry_run }}'
previous-tag: '${{ steps.nightly_version.outputs.PREVIOUS_TAG }}'
working-directory: './release'
skip-branch-cleanup: true
force-skip-tests: "${{ github.event_name != 'schedule' && github.event.inputs.force_skip_tests == 'true' }}"
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
npm-registry-publish-url: '${{ vars.NPM_REGISTRY_PUBLISH_URL }}'
npm-registry-url: '${{ vars.NPM_REGISTRY_URL }}'
npm-registry-scope: '${{ vars.NPM_REGISTRY_SCOPE }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
- name: 'Create and Merge Pull Request'
if: "github.event.inputs.environment != 'dev'"
uses: './.github/actions/create-pull-request'
with:
branch-name: 'release/${{ steps.nightly_version.outputs.RELEASE_TAG }}'
pr-title: 'chore/release: bump version to ${{ steps.nightly_version.outputs.RELEASE_VERSION }}'
pr-body: 'Automated version bump for nightly release.'
github-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ steps.vars.outputs.is_dry_run }}'
working-directory: './release'
- name: 'Create Issue on Failure'
if: "${{ failure() && github.event.inputs.environment != 'dev' && (github.event_name == 'schedule' || github.event.inputs.dry_run != 'true') }}"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_TAG: '${{ steps.nightly_version.outputs.RELEASE_TAG }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title "Nightly Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')" \
--body "The nightly-release workflow failed. See the full run for details: ${DETAILS_URL}" \
--label 'release-failure,priority/p0'
================================================
FILE: .github/workflows/release-notes.yml
================================================
# This workflow is triggered on every new release.
# It uses Gemini to generate release notes and creates a PR with the changes.
name: 'Generate Release Notes'
on:
release:
types: ['published']
workflow_dispatch:
inputs:
version:
description: 'New version (e.g., v1.2.3)'
required: true
type: 'string'
body:
description: 'Release notes body'
required: true
type: 'string'
time:
description: 'Release time'
required: true
type: 'string'
jobs:
generate-release-notes:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
contents: 'write'
pull-requests: 'write'
steps:
- name: 'Checkout repository'
uses: 'actions/checkout@v4'
with:
# The user-level skills need to be available to the workflow
fetch-depth: 0
ref: 'main'
- name: 'Set up Node.js'
uses: 'actions/setup-node@v4'
with:
node-version: '20'
- name: 'Get release information'
id: 'release_info'
run: |
VERSION="${{ github.event.inputs.version || github.event.release.tag_name }}"
TIME="${{ github.event.inputs.time || github.event.release.created_at }}"
echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT"
echo "TIME=${TIME}" >> "$GITHUB_OUTPUT"
# Use a heredoc to preserve multiline release body
echo 'RAW_CHANGELOG<> "$GITHUB_OUTPUT"
printf "%s\n" "$BODY" >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
env:
GH_TOKEN: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
BODY: '${{ github.event.inputs.body || github.event.release.body }}'
- name: 'Validate version'
id: 'validate_version'
run: |
if echo "${{ steps.release_info.outputs.VERSION }}" | grep -q "nightly"; then
echo "Nightly release detected. Stopping workflow."
echo "CONTINUE=false" >> "$GITHUB_OUTPUT"
else
echo "CONTINUE=true" >> "$GITHUB_OUTPUT"
fi
- name: 'Generate Changelog with Gemini'
if: "steps.validate_version.outputs.CONTINUE == 'true'"
uses: 'google-github-actions/run-gemini-cli@a3bf79042542528e91937b3a3a6fbc4967ee3c31' # ratchet:google-github-actions/run-gemini-cli@v0
with:
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
prompt: |
Activate the 'docs-changelog' skill.
**Release Information:**
- New Version: ${{ steps.release_info.outputs.VERSION }}
- Release Date: ${{ steps.release_info.outputs.TIME }}
- Raw Changelog Data: ${{ steps.release_info.outputs.RAW_CHANGELOG }}
Execute the release notes generation process using the information provided.
When you are done, please output your thought process and the steps you took for future debugging purposes.
- name: 'Create Pull Request'
if: "steps.validate_version.outputs.CONTINUE == 'true'"
uses: 'peter-evans/create-pull-request@v6'
with:
token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
commit-message: 'docs(changelog): update for ${{ steps.release_info.outputs.VERSION }}'
title: 'Changelog for ${{ steps.release_info.outputs.VERSION }}'
body: |
This PR contains the auto-generated changelog for the ${{ steps.release_info.outputs.VERSION }} release.
Please review and merge.
Related to #18505
branch: 'changelog-${{ steps.release_info.outputs.VERSION }}'
base: 'main'
team-reviewers: 'gemini-cli-docs, gemini-cli-maintainers'
delete-branch: true
================================================
FILE: .github/workflows/release-patch-0-from-comment.yml
================================================
name: 'Release: Patch (0) from Comment'
on:
issue_comment:
types: ['created']
jobs:
slash-command:
runs-on: 'ubuntu-latest'
# Only run if the comment is from a human user (not automated)
if: "github.event.comment.user.type == 'User' && github.event.comment.user.login != 'github-actions[bot]'"
permissions:
contents: 'write'
pull-requests: 'write'
actions: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
fetch-depth: 1
- name: 'Slash Command Dispatch'
id: 'slash_command'
uses: 'peter-evans/slash-command-dispatch@40877f718dce0101edfc7aea2b3800cc192f9ed5'
with:
token: '${{ secrets.GITHUB_TOKEN }}'
commands: 'patch'
permission: 'write'
issue-type: 'pull-request'
- name: 'Get PR Status'
id: 'pr_status'
if: "startsWith(github.event.comment.body, '/patch')"
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
run: |
gh pr view "${{ github.event.issue.number }}" --json mergeCommit,state > pr_status.json
echo "MERGE_COMMIT_SHA=$(jq -r .mergeCommit.oid pr_status.json)" >> "$GITHUB_OUTPUT"
echo "STATE=$(jq -r .state pr_status.json)" >> "$GITHUB_OUTPUT"
- name: 'Dispatch if Merged'
if: "steps.pr_status.outputs.STATE == 'MERGED'"
id: 'dispatch_patch'
uses: 'actions/github-script@00f12e3e20659f42342b1c0226afda7f7c042325'
env:
COMMENT_BODY: '${{ github.event.comment.body }}'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
script: |
// Parse the comment body directly to extract channel(s)
const commentBody = process.env.COMMENT_BODY;
console.log('Comment body:', commentBody);
let channels = ['stable', 'preview']; // default to both
// Parse different formats:
// /patch (defaults to both)
// /patch both
// /patch stable
// /patch preview
if (commentBody.trim() === '/patch' || commentBody.trim() === '/patch both') {
channels = ['stable', 'preview'];
} else if (commentBody.trim() === '/patch stable') {
channels = ['stable'];
} else if (commentBody.trim() === '/patch preview') {
channels = ['preview'];
} else {
// Fallback parsing for legacy formats
if (commentBody.includes('channel=preview')) {
channels = ['preview'];
} else if (commentBody.includes('--channel preview')) {
channels = ['preview'];
}
}
console.log('Detected channels:', channels);
const dispatchedRuns = [];
// Dispatch workflow for each channel
for (const channel of channels) {
console.log(`Dispatching workflow for channel: ${channel}`);
const response = await github.rest.actions.createWorkflowDispatch({
owner: context.repo.owner,
repo: context.repo.repo,
workflow_id: 'release-patch-1-create-pr.yml',
ref: 'main',
inputs: {
commit: '${{ steps.pr_status.outputs.MERGE_COMMIT_SHA }}',
channel: channel,
original_pr: '${{ github.event.issue.number }}',
environment: 'prod'
}
});
dispatchedRuns.push({ channel, response });
}
// Wait a moment for the workflows to be created
await new Promise(resolve => setTimeout(resolve, 3000));
const runs = await github.rest.actions.listWorkflowRuns({
owner: context.repo.owner,
repo: context.repo.repo,
workflow_id: 'release-patch-1-create-pr.yml',
per_page: 20 // Increased to handle multiple runs
});
// Find the recent runs that match our trigger
const recentRuns = runs.data.workflow_runs.filter(run =>
run.event === 'workflow_dispatch' &&
new Date(run.created_at) > new Date(Date.now() - 15000) // Within last 15 seconds
).slice(0, channels.length); // Limit to the number of channels we dispatched
// Set outputs
core.setOutput('dispatched_channels', channels.join(','));
core.setOutput('dispatched_run_count', channels.length.toString());
if (recentRuns.length > 0) {
core.setOutput('dispatched_run_urls', recentRuns.map(r => r.html_url).join(','));
core.setOutput('dispatched_run_ids', recentRuns.map(r => r.id).join(','));
const markdownLinks = recentRuns.map(r => `- [View dispatched workflow run](${r.html_url})`).join('\n');
core.setOutput('dispatched_run_links', markdownLinks);
}
- name: 'Comment on Failure'
if: "startsWith(github.event.comment.body, '/patch') && steps.pr_status.outputs.STATE != 'MERGED'"
uses: 'peter-evans/create-or-update-comment@67dcc547d311b736a8e6c5c236542148a47adc3d'
with:
token: '${{ secrets.GITHUB_TOKEN }}'
issue-number: '${{ github.event.issue.number }}'
body: |
:x: The `/patch` command failed. This pull request must be merged before a patch can be created.
- name: 'Final Status Comment - Success'
if: "always() && startsWith(github.event.comment.body, '/patch') && steps.dispatch_patch.outcome == 'success' && steps.dispatch_patch.outputs.dispatched_run_urls"
uses: 'peter-evans/create-or-update-comment@67dcc547d311b736a8e6c5c236542148a47adc3d'
with:
token: '${{ secrets.GITHUB_TOKEN }}'
issue-number: '${{ github.event.issue.number }}'
body: |
🚀 **[Step 1/4] Patch workflow(s) waiting for approval!**
**📋 Details:**
- **Channels**: `${{ steps.dispatch_patch.outputs.dispatched_channels }}`
- **Commit**: `${{ steps.pr_status.outputs.MERGE_COMMIT_SHA }}`
- **Workflows Created**: ${{ steps.dispatch_patch.outputs.dispatched_run_count }}
**⏳ Status:** The patch creation workflow has been triggered and is waiting for deployment approval. Please visit the specific workflow links below and approve the runs.
**🔗 Track Progress:**
${{ steps.dispatch_patch.outputs.dispatched_run_links }}
- [View patch workflow history](https://github.com/${{ github.repository }}/actions/workflows/release-patch-1-create-pr.yml)
- [This trigger workflow run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})
- name: 'Final Status Comment - Dispatch Success (No URL)'
if: "always() && startsWith(github.event.comment.body, '/patch') && steps.dispatch_patch.outcome == 'success' && !steps.dispatch_patch.outputs.dispatched_run_urls"
uses: 'peter-evans/create-or-update-comment@67dcc547d311b736a8e6c5c236542148a47adc3d'
with:
token: '${{ secrets.GITHUB_TOKEN }}'
issue-number: '${{ github.event.issue.number }}'
body: |
🚀 **[Step 1/4] Patch workflow(s) waiting for approval!**
**📋 Details:**
- **Channels**: `${{ steps.dispatch_patch.outputs.dispatched_channels }}`
- **Commit**: `${{ steps.pr_status.outputs.MERGE_COMMIT_SHA }}`
- **Workflows Created**: ${{ steps.dispatch_patch.outputs.dispatched_run_count }}
**⏳ Status:** The patch creation workflow has been triggered and is waiting for deployment approval. Please visit the workflow history link below and approve the runs.
**🔗 Track Progress:**
- [View patch workflow history](https://github.com/${{ github.repository }}/actions/workflows/release-patch-1-create-pr.yml)
- [This trigger workflow run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})
- name: 'Final Status Comment - Failure'
if: "always() && startsWith(github.event.comment.body, '/patch') && (steps.dispatch_patch.outcome == 'failure' || steps.dispatch_patch.outcome == 'cancelled')"
uses: 'peter-evans/create-or-update-comment@67dcc547d311b736a8e6c5c236542148a47adc3d'
with:
token: '${{ secrets.GITHUB_TOKEN }}'
issue-number: '${{ github.event.issue.number }}'
body: |
❌ **[Step 1/4] Patch workflow dispatch failed!**
There was an error dispatching the patch creation workflow.
**🔍 Troubleshooting:**
- Check that the PR is properly merged
- Verify workflow permissions
- Review error logs in the workflow run
**🔗 Debug Links:**
- [This workflow run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})
- [Patch workflow history](https://github.com/${{ github.repository }}/actions/workflows/release-patch-1-create-pr.yml)
================================================
FILE: .github/workflows/release-patch-1-create-pr.yml
================================================
name: 'Release: Patch (1) Create PR'
run-name: >-
Release Patch (1) Create PR | S:${{ inputs.channel }} | C:${{ inputs.commit }} ${{ inputs.original_pr && format('| PR:#{0}', inputs.original_pr) || '' }}
on:
workflow_dispatch:
inputs:
commit:
description: 'The commit SHA to cherry-pick for the patch.'
required: true
type: 'string'
channel:
description: 'The release channel to patch.'
required: true
type: 'choice'
options:
- 'stable'
- 'preview'
dry_run:
description: 'Whether to run in dry-run mode.'
required: false
type: 'boolean'
default: false
ref:
description: 'The branch, tag, or SHA to test from.'
required: false
type: 'string'
default: 'main'
original_pr:
description: 'The original PR number to comment back on.'
required: false
type: 'string'
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
create-patch:
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
contents: 'write'
pull-requests: 'write'
actions: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8' # ratchet:actions/checkout@v5
with:
ref: '${{ github.event.inputs.ref }}'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'configure .npmrc'
uses: './.github/actions/setup-npmrc'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Install Script Dependencies'
run: 'npm ci'
- name: 'Configure Git User'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
REPOSITORY: '${{ github.repository }}'
run: |-
git config user.name "gemini-cli-robot"
git config user.email "gemini-cli-robot@google.com"
# Configure git to use GITHUB_TOKEN for remote operations (has actions:write for workflow files)
git remote set-url origin "https://x-access-token:${GH_TOKEN}@github.com/${REPOSITORY}.git"
- name: 'Create Patch'
id: 'create_patch'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
GH_TOKEN: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
CLI_PACKAGE_NAME: '${{ vars.CLI_PACKAGE_NAME }}'
PATCH_COMMIT: '${{ github.event.inputs.commit }}'
PATCH_CHANNEL: '${{ github.event.inputs.channel }}'
ORIGINAL_PR: '${{ github.event.inputs.original_pr }}'
DRY_RUN: '${{ github.event.inputs.dry_run }}'
continue-on-error: true
run: |
# Capture output and display it in logs using tee
{
node scripts/releasing/create-patch-pr.js \
--cli-package-name="${CLI_PACKAGE_NAME}" \
--commit="${PATCH_COMMIT}" \
--channel="${PATCH_CHANNEL}" \
--pullRequestNumber="${ORIGINAL_PR}" \
--dry-run="${DRY_RUN}"
} 2>&1 | tee >(
echo "LOG_CONTENT<> "$GITHUB_ENV"
cat >> "$GITHUB_ENV"
echo "EOF" >> "$GITHUB_ENV"
)
echo "EXIT_CODE=${PIPESTATUS[0]}" >> "$GITHUB_OUTPUT"
- name: 'Comment on Original PR'
if: 'always() && inputs.original_pr'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ORIGINAL_PR: '${{ github.event.inputs.original_pr }}'
EXIT_CODE: '${{ steps.create_patch.outputs.EXIT_CODE }}'
COMMIT: '${{ github.event.inputs.commit }}'
CHANNEL: '${{ github.event.inputs.channel }}'
REPOSITORY: '${{ github.repository }}'
GITHUB_RUN_ID: '${{ github.run_id }}'
LOG_CONTENT: '${{ env.LOG_CONTENT }}'
TARGET_REF: '${{ github.event.inputs.ref }}'
ENVIRONMENT: '${{ github.event.inputs.environment }}'
continue-on-error: true
run: |
git checkout "${TARGET_REF}"
node scripts/releasing/patch-create-comment.js
- name: 'Fail Workflow if Main Task Failed'
if: 'always() && steps.create_patch.outputs.EXIT_CODE != 0'
env:
EXIT_CODE: '${{ steps.create_patch.outputs.EXIT_CODE }}'
run: |
echo "Patch creation failed with exit code: ${EXIT_CODE}"
echo "Check the logs above and the comment posted to the original PR for details."
exit 1
================================================
FILE: .github/workflows/release-patch-2-trigger.yml
================================================
name: 'Release: Patch (2) Trigger'
run-name: >-
Release Patch (2) Trigger |
${{ github.event.pull_request.number && format('PR #{0}', github.event.pull_request.number) || 'Manual' }} |
${{ github.event.pull_request.head.ref || github.event.inputs.ref }}
on:
pull_request:
types:
- 'closed'
branches:
- 'release/**'
workflow_dispatch:
inputs:
ref:
description: 'The head ref of the merged hotfix PR to trigger the release for (e.g. hotfix/v1.2.3/cherry-pick-abc).'
required: true
type: 'string'
workflow_ref:
description: 'The ref to checkout the workflow code from.'
required: false
type: 'string'
default: 'main'
workflow_id:
description: 'The workflow to trigger. Defaults to release-patch-3-release.yml'
required: false
type: 'string'
default: 'release-patch-3-release.yml'
dry_run:
description: 'Whether this is a dry run.'
required: false
type: 'boolean'
default: false
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
type: 'boolean'
default: false
test_mode:
description: 'Whether or not to run in test mode'
required: false
type: 'boolean'
default: false
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
trigger-patch-release:
if: "(github.event_name == 'pull_request' && github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'hotfix/')) || github.event_name == 'workflow_dispatch'"
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
actions: 'write'
contents: 'write'
pull-requests: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: "${{ github.event.inputs.workflow_ref || 'main' }}"
fetch-depth: 1
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Trigger Patch Release'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
HEAD_REF: "${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref || github.event.inputs.ref }}"
PR_BODY: "${{ github.event_name == 'pull_request' && github.event.pull_request.body || '' }}"
WORKFLOW_ID: '${{ github.event.inputs.workflow_id }}'
GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}'
GITHUB_REPOSITORY_NAME: '${{ github.event.repository.name }}'
GITHUB_EVENT_NAME: '${{ github.event_name }}'
GITHUB_EVENT_PAYLOAD: '${{ toJSON(github.event) }}'
FORCE_SKIP_TESTS: '${{ github.event.inputs.force_skip_tests }}'
TEST_MODE: '${{ github.event.inputs.test_mode }}'
ENVIRONMENT: "${{ github.event.inputs.environment || 'prod' }}"
DRY_RUN: '${{ github.event.inputs.dry_run }}'
run: |
node scripts/releasing/patch-trigger.js --dry-run="${DRY_RUN}"
================================================
FILE: .github/workflows/release-patch-3-release.yml
================================================
name: 'Release: Patch (3) Release'
run-name: >-
Release Patch (3) Release | T:${{ inputs.type }} | R:${{ inputs.release_ref }} ${{ inputs.original_pr && format('| PR:#{0}', inputs.original_pr) || '' }}
on:
workflow_dispatch:
inputs:
type:
description: 'The type of release to perform.'
required: true
type: 'choice'
options:
- 'stable'
- 'preview'
dry_run:
description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.'
required: true
type: 'boolean'
default: true
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
type: 'boolean'
default: false
release_ref:
description: 'The branch, tag, or SHA to release from.'
required: true
type: 'string'
original_pr:
description: 'The original PR number to comment back on.'
required: false
type: 'string'
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
release:
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
contents: 'write'
packages: 'write'
pull-requests: 'write'
issues: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
fetch-depth: 0
fetch-tags: true
- name: 'Checkout Release Code'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.release_ref }}'
path: 'release'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020' # ratchet:actions/setup-node@v4
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'configure .npmrc'
uses: './.github/actions/setup-npmrc'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Install Script Dependencies'
run: |-
npm ci
- name: 'Install Dependencies'
working-directory: './release'
run: |-
npm ci
- name: 'Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Get Patch Version'
id: 'patch_version'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
PATCH_FROM: '${{ github.event.inputs.type }}'
CLI_PACKAGE_NAME: '${{vars.CLI_PACKAGE_NAME}}'
run: |
# Use the existing get-release-version.js script to calculate patch version
# Run from main checkout which has full git history and access to npm
PATCH_JSON=$(node scripts/get-release-version.js --type=patch --cli-package-name="${CLI_PACKAGE_NAME}" --patch-from="${PATCH_FROM}")
echo "Patch version calculation result: ${PATCH_JSON}"
RELEASE_VERSION=$(echo "${PATCH_JSON}" | jq -r .releaseVersion)
RELEASE_TAG=$(echo "${PATCH_JSON}" | jq -r .releaseTag)
NPM_TAG=$(echo "${PATCH_JSON}" | jq -r .npmTag)
PREVIOUS_TAG=$(echo "${PATCH_JSON}" | jq -r .previousReleaseTag)
echo "RELEASE_VERSION=${RELEASE_VERSION}" >> "${GITHUB_OUTPUT}"
echo "RELEASE_TAG=${RELEASE_TAG}" >> "${GITHUB_OUTPUT}"
echo "NPM_TAG=${NPM_TAG}" >> "${GITHUB_OUTPUT}"
echo "PREVIOUS_TAG=${PREVIOUS_TAG}" >> "${GITHUB_OUTPUT}"
- name: 'Verify Version Consistency'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
CHANNEL: '${{ github.event.inputs.type }}'
ORIGINAL_RELEASE_VERSION: '${{ steps.patch_version.outputs.RELEASE_VERSION }}'
ORIGINAL_RELEASE_TAG: '${{ steps.patch_version.outputs.RELEASE_TAG }}'
ORIGINAL_PREVIOUS_TAG: '${{ steps.patch_version.outputs.PREVIOUS_TAG }}'
VARS_CLI_PACKAGE_NAME: '${{ vars.CLI_PACKAGE_NAME }}'
run: |
echo "🔍 Verifying no concurrent patch releases have occurred..."
# Store original calculation for comparison
echo "Original calculation:"
echo " Release version: ${ORIGINAL_RELEASE_VERSION}"
echo " Release tag: ${ORIGINAL_RELEASE_TAG}"
echo " Previous tag: ${ORIGINAL_PREVIOUS_TAG}"
# Re-run the same version calculation script
echo "Re-calculating version to check for changes..."
CURRENT_PATCH_JSON=$(node scripts/get-release-version.js --cli-package-name="${VARS_CLI_PACKAGE_NAME}" --type=patch --patch-from="${CHANNEL}")
CURRENT_RELEASE_VERSION=$(echo "${CURRENT_PATCH_JSON}" | jq -r .releaseVersion)
CURRENT_RELEASE_TAG=$(echo "${CURRENT_PATCH_JSON}" | jq -r .releaseTag)
CURRENT_PREVIOUS_TAG=$(echo "${CURRENT_PATCH_JSON}" | jq -r .previousReleaseTag)
echo "Current calculation:"
echo " Release version: ${CURRENT_RELEASE_VERSION}"
echo " Release tag: ${CURRENT_RELEASE_TAG}"
echo " Previous tag: ${CURRENT_PREVIOUS_TAG}"
# Compare calculations
if [[ "${ORIGINAL_RELEASE_VERSION}" != "${CURRENT_RELEASE_VERSION}" ]] || \
[[ "${ORIGINAL_RELEASE_TAG}" != "${CURRENT_RELEASE_TAG}" ]] || \
[[ "${ORIGINAL_PREVIOUS_TAG}" != "${CURRENT_PREVIOUS_TAG}" ]]; then
echo "❌ RACE CONDITION DETECTED: Version calculations have changed!"
echo "This indicates another patch release completed while this one was in progress."
echo ""
echo "Originally planned: ${ORIGINAL_RELEASE_VERSION} (from ${ORIGINAL_PREVIOUS_TAG})"
echo "Should now build: ${CURRENT_RELEASE_VERSION} (from ${CURRENT_PREVIOUS_TAG})"
echo ""
echo "# Setting outputs for failure comment"
echo "CURRENT_RELEASE_VERSION=${CURRENT_RELEASE_VERSION}" >> "${GITHUB_ENV}"
echo "CURRENT_RELEASE_TAG=${CURRENT_RELEASE_TAG}" >> "${GITHUB_ENV}"
echo "CURRENT_PREVIOUS_TAG=${CURRENT_PREVIOUS_TAG}" >> "${GITHUB_ENV}"
echo "The patch release must be restarted to use the correct version numbers."
exit 1
fi
echo "✅ Version calculations unchanged - proceeding with release"
- name: 'Print Calculated Version'
run: |-
echo "Patch Release Summary:"
echo " Release Version: ${STEPS_PATCH_VERSION_OUTPUTS_RELEASE_VERSION}"
echo " Release Tag: ${STEPS_PATCH_VERSION_OUTPUTS_RELEASE_TAG}"
echo " NPM Tag: ${STEPS_PATCH_VERSION_OUTPUTS_NPM_TAG}"
echo " Previous Tag: ${STEPS_PATCH_VERSION_OUTPUTS_PREVIOUS_TAG}"
env:
STEPS_PATCH_VERSION_OUTPUTS_RELEASE_VERSION: '${{ steps.patch_version.outputs.RELEASE_VERSION }}'
STEPS_PATCH_VERSION_OUTPUTS_RELEASE_TAG: '${{ steps.patch_version.outputs.RELEASE_TAG }}'
STEPS_PATCH_VERSION_OUTPUTS_NPM_TAG: '${{ steps.patch_version.outputs.NPM_TAG }}'
STEPS_PATCH_VERSION_OUTPUTS_PREVIOUS_TAG: '${{ steps.patch_version.outputs.PREVIOUS_TAG }}'
- name: 'Run Tests'
if: "${{github.event.inputs.force_skip_tests != 'true'}}"
uses: './.github/actions/run-tests'
with:
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
working-directory: './release'
- name: 'Publish Release'
uses: './.github/actions/publish-release'
with:
release-version: '${{ steps.patch_version.outputs.RELEASE_VERSION }}'
release-tag: '${{ steps.patch_version.outputs.RELEASE_TAG }}'
npm-tag: '${{ steps.patch_version.outputs.NPM_TAG }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
github-release-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ github.event.inputs.dry_run }}'
previous-tag: '${{ steps.patch_version.outputs.PREVIOUS_TAG }}'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
npm-registry-publish-url: '${{ vars.NPM_REGISTRY_PUBLISH_URL }}'
npm-registry-url: '${{ vars.NPM_REGISTRY_URL }}'
npm-registry-scope: '${{ vars.NPM_REGISTRY_SCOPE }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
working-directory: './release'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry_run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_TAG: '${{ steps.patch_version.outputs.RELEASE_TAG }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title 'Patch Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')' \
--body 'The patch-release workflow failed. See the full run for details: ${DETAILS_URL}' \
--label 'release-failure,priority/p0'
- name: 'Comment Success on Original PR'
if: '${{ success() && github.event.inputs.original_pr }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ORIGINAL_PR: '${{ github.event.inputs.original_pr }}'
SUCCESS: 'true'
RELEASE_VERSION: '${{ steps.patch_version.outputs.RELEASE_VERSION }}'
RELEASE_TAG: '${{ steps.patch_version.outputs.RELEASE_TAG }}'
NPM_TAG: '${{ steps.patch_version.outputs.NPM_TAG }}'
CHANNEL: '${{ github.event.inputs.type }}'
DRY_RUN: '${{ github.event.inputs.dry_run }}'
GITHUB_RUN_ID: '${{ github.run_id }}'
GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}'
GITHUB_REPOSITORY_NAME: '${{ github.event.repository.name }}'
run: |
node scripts/releasing/patch-comment.js
- name: 'Comment Failure on Original PR'
if: '${{ failure() && github.event.inputs.original_pr }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ORIGINAL_PR: '${{ github.event.inputs.original_pr }}'
SUCCESS: 'false'
RELEASE_VERSION: '${{ steps.patch_version.outputs.RELEASE_VERSION }}'
RELEASE_TAG: '${{ steps.patch_version.outputs.RELEASE_TAG }}'
NPM_TAG: '${{ steps.patch_version.outputs.NPM_TAG }}'
CHANNEL: '${{ github.event.inputs.type }}'
DRY_RUN: '${{ github.event.inputs.dry_run }}'
GITHUB_RUN_ID: '${{ github.run_id }}'
GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}'
GITHUB_REPOSITORY_NAME: '${{ github.event.repository.name }}'
# Pass current version info for race condition failures
CURRENT_RELEASE_VERSION: '${{ env.CURRENT_RELEASE_VERSION }}'
CURRENT_RELEASE_TAG: '${{ env.CURRENT_RELEASE_TAG }}'
CURRENT_PREVIOUS_TAG: '${{ env.CURRENT_PREVIOUS_TAG }}'
run: |
# Check if this was a version consistency failure
if [[ -n "${CURRENT_RELEASE_VERSION}" ]]; then
echo "Detected version race condition failure - posting specific comment with current version info"
export RACE_CONDITION_FAILURE=true
fi
node scripts/releasing/patch-comment.js
================================================
FILE: .github/workflows/release-promote.yml
================================================
name: 'Release: Promote'
on:
workflow_dispatch:
inputs:
dry_run:
description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.'
required: true
type: 'boolean'
default: true
force_skip_tests:
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
required: false
type: 'boolean'
default: false
ref:
description: 'The branch, tag, or SHA to release from.'
required: false
type: 'string'
default: 'main'
stable_version_override:
description: 'Manually override the stable version number.'
required: false
type: 'string'
preview_version_override:
description: 'Manually override the preview version number.'
required: false
type: 'string'
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
calculate-versions:
name: 'Calculate Versions and Plan'
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
outputs:
STABLE_VERSION: '${{ steps.versions.outputs.STABLE_VERSION }}'
STABLE_SHA: '${{ steps.versions.outputs.STABLE_SHA }}'
PREVIOUS_STABLE_TAG: '${{ steps.versions.outputs.PREVIOUS_STABLE_TAG }}'
PREVIEW_VERSION: '${{ steps.versions.outputs.PREVIEW_VERSION }}'
PREVIEW_SHA: '${{ steps.versions.outputs.PREVIEW_SHA }}'
PREVIOUS_PREVIEW_TAG: '${{ steps.versions.outputs.PREVIOUS_PREVIEW_TAG }}'
NEXT_NIGHTLY_VERSION: '${{ steps.versions.outputs.NEXT_NIGHTLY_VERSION }}'
PREVIOUS_NIGHTLY_TAG: '${{ steps.versions.outputs.PREVIOUS_NIGHTLY_TAG }}'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
fetch-depth: 0
fetch-tags: true
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Print Inputs'
shell: 'bash'
env:
JSON_INPUTS: '${{ toJSON(inputs) }}'
run: 'echo "$JSON_INPUTS"'
- name: 'Calculate Versions and SHAs'
id: 'versions'
env:
GH_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
STABLE_OVERRIDE: '${{ github.event.inputs.stable_version_override }}'
PREVIEW_OVERRIDE: '${{ github.event.inputs.preview_version_override }}'
REF_INPUT: '${{ github.event.inputs.ref }}'
run: |
set -e
STABLE_COMMAND="node scripts/get-release-version.js --type=stable"
if [[ -n "${STABLE_OVERRIDE}" ]]; then
STABLE_COMMAND+=" --stable_version_override=${STABLE_OVERRIDE}"
fi
PREVIEW_COMMAND="node scripts/get-release-version.js --type=preview"
if [[ -n "${PREVIEW_OVERRIDE}" ]]; then
PREVIEW_COMMAND+=" --preview_version_override=${PREVIEW_OVERRIDE}"
fi
NIGHTLY_COMMAND="node scripts/get-release-version.js --type=promote-nightly"
STABLE_JSON=$(${STABLE_COMMAND})
STABLE_VERSION=$(echo "${STABLE_JSON}" | jq -r .releaseVersion)
PREVIEW_COMMAND+=" --stable-base-version=${STABLE_VERSION}"
NIGHTLY_COMMAND+=" --stable-base-version=${STABLE_VERSION}"
PREVIEW_JSON=$(${PREVIEW_COMMAND})
NIGHTLY_JSON=$(${NIGHTLY_COMMAND})
echo "STABLE_JSON_COMMAND=${STABLE_COMMAND}"
echo "PREVIEW_JSON_COMMAND=${PREVIEW_COMMAND}"
echo "NIGHTLY_JSON_COMMAND=${NIGHTLY_COMMAND}"
echo "STABLE_JSON: ${STABLE_JSON}"
echo "PREVIEW_JSON: ${PREVIEW_JSON}"
echo "NIGHTLY_JSON: ${NIGHTLY_JSON}"
echo "STABLE_VERSION=${STABLE_VERSION}" >> "${GITHUB_OUTPUT}"
# shellcheck disable=SC1083
echo "STABLE_SHA=$(git rev-parse "$(echo "${PREVIEW_JSON}" | jq -r .previousReleaseTag)"^{commit})" >> "${GITHUB_OUTPUT}"
echo "PREVIOUS_STABLE_TAG=$(echo "${STABLE_JSON}" | jq -r .previousReleaseTag)" >> "${GITHUB_OUTPUT}"
echo "PREVIEW_VERSION=$(echo "${PREVIEW_JSON}" | jq -r .releaseVersion)" >> "${GITHUB_OUTPUT}"
# shellcheck disable=SC1083
REF="${REF_INPUT}"
SHA=$(git ls-remote origin "$REF" | awk -v ref="$REF" '$2 == "refs/heads/"ref || $2 == "refs/tags/"ref || $2 == ref {print $1}' | head -n 1)
if [ -z "$SHA" ]; then
if [[ "$REF" =~ ^[0-9a-f]{7,40}$ ]]; then
SHA="$REF"
else
echo "::error::Could not resolve ref '$REF' to a commit SHA."
exit 1
fi
fi
echo "PREVIEW_SHA=$SHA" >> "${GITHUB_OUTPUT}"
echo "PREVIOUS_PREVIEW_TAG=$(echo "${PREVIEW_JSON}" | jq -r .previousReleaseTag)" >> "${GITHUB_OUTPUT}"
echo "NEXT_NIGHTLY_VERSION=$(echo "${NIGHTLY_JSON}" | jq -r .releaseVersion)" >> "${GITHUB_OUTPUT}"
echo "PREVIOUS_NIGHTLY_TAG=$(echo "${NIGHTLY_JSON}" | jq -r .previousReleaseTag)" >> "${GITHUB_OUTPUT}"
CURRENT_NIGHTLY_TAG=$(git describe --tags --abbrev=0 --match="*nightly*")
echo "CURRENT_NIGHTLY_TAG=${CURRENT_NIGHTLY_TAG}" >> "${GITHUB_OUTPUT}"
echo "NEXT_SHA=$SHA" >> "${GITHUB_OUTPUT}"
- name: 'Display Pending Updates'
env:
STABLE_VERSION: '${{ steps.versions.outputs.STABLE_VERSION }}'
STABLE_SHA: '${{ steps.versions.outputs.STABLE_SHA }}'
PREVIOUS_STABLE_TAG: '${{ steps.versions.outputs.PREVIOUS_STABLE_TAG }}'
PREVIEW_VERSION: '${{ steps.versions.outputs.PREVIEW_VERSION }}'
PREVIEW_SHA: '${{ steps.versions.outputs.PREVIEW_SHA }}'
PREVIOUS_PREVIEW_TAG: '${{ steps.versions.outputs.PREVIOUS_PREVIEW_TAG }}'
NEXT_NIGHTLY_VERSION: '${{ steps.versions.outputs.NEXT_NIGHTLY_VERSION }}'
PREVIOUS_NIGHTLY_TAG: '${{ steps.versions.outputs.PREVIOUS_NIGHTLY_TAG }}'
INPUT_REF: '${{ github.event.inputs.ref }}'
run: |
echo "Release Plan:"
echo "-----------"
echo "Stable Release: ${STABLE_VERSION}"
echo " - Commit: ${STABLE_SHA}"
echo " - Previous Tag: ${PREVIOUS_STABLE_TAG}"
echo ""
echo "Preview Release: ${PREVIEW_VERSION}"
echo " - Commit: ${PREVIEW_SHA} (${INPUT_REF})"
echo " - Previous Tag: ${PREVIOUS_PREVIEW_TAG}"
echo ""
echo "Preparing Next Nightly Release: ${NEXT_NIGHTLY_VERSION}"
echo " - Merging Version Update PR to Branch: ${INPUT_REF}"
echo " - Previous Tag: ${PREVIOUS_NIGHTLY_TAG}"
test:
name: 'Test ${{ matrix.channel }}'
needs: 'calculate-versions'
runs-on: 'ubuntu-latest'
strategy:
fail-fast: false
matrix:
include:
- channel: 'stable'
sha: '${{ needs.calculate-versions.outputs.STABLE_SHA }}'
- channel: 'preview'
sha: '${{ needs.calculate-versions.outputs.PREVIEW_SHA }}'
- channel: 'nightly'
sha: '${{ github.event.inputs.ref }}'
steps:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref }}'
- name: 'Checkout correct SHA'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ matrix.sha }}'
path: 'release'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
working-directory: './release'
run: 'npm ci'
- name: 'Run Tests'
if: "${{github.event.inputs.force_skip_tests != 'true'}}"
uses: './.github/actions/run-tests'
with:
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
working-directory: './release'
publish-preview:
name: 'Publish preview'
needs: ['calculate-versions', 'test']
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
contents: 'write'
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref }}'
- name: 'Checkout correct SHA'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ needs.calculate-versions.outputs.PREVIEW_SHA }}'
path: 'release'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
working-directory: './release'
run: 'npm ci'
- name: 'Publish Release'
uses: './.github/actions/publish-release'
with:
release-version: '${{ needs.calculate-versions.outputs.PREVIEW_VERSION }}'
release-tag: 'v${{ needs.calculate-versions.outputs.PREVIEW_VERSION }}'
npm-tag: 'preview'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
github-release-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ github.event.inputs.dry_run }}'
previous-tag: '${{ needs.calculate-versions.outputs.PREVIOUS_PREVIEW_TAG }}'
working-directory: './release'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
force-skip-tests: '${{ github.event.inputs.force_skip_tests }}'
npm-registry-publish-url: '${{ vars.NPM_REGISTRY_PUBLISH_URL }}'
npm-registry-url: '${{ vars.NPM_REGISTRY_URL }}'
npm-registry-scope: '${{ vars.NPM_REGISTRY_SCOPE }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry_run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_TAG: 'v${{ needs.calculate-versions.outputs.PREVIEW_VERSION }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title 'Promote Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')' \
--body 'The promote-release workflow failed during preview publish. See the full run for details: ${DETAILS_URL}' \
--label 'release-failure,priority/p0'
publish-stable:
name: 'Publish stable'
needs: ['calculate-versions', 'test', 'publish-preview']
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
contents: 'write'
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref }}'
- name: 'Checkout correct SHA'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ needs.calculate-versions.outputs.STABLE_SHA }}'
path: 'release'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
working-directory: './release'
run: 'npm ci'
- name: 'Publish Release'
uses: './.github/actions/publish-release'
with:
release-version: '${{ needs.calculate-versions.outputs.STABLE_VERSION }}'
release-tag: 'v${{ needs.calculate-versions.outputs.STABLE_VERSION }}'
npm-tag: 'latest'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
github-release-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ github.event.inputs.dry_run }}'
previous-tag: '${{ needs.calculate-versions.outputs.PREVIOUS_STABLE_TAG }}'
working-directory: './release'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
force-skip-tests: '${{ github.event.inputs.force_skip_tests }}'
npm-registry-publish-url: '${{ vars.NPM_REGISTRY_PUBLISH_URL }}'
npm-registry-url: '${{ vars.NPM_REGISTRY_URL }}'
npm-registry-scope: '${{ vars.NPM_REGISTRY_SCOPE }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry_run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_TAG: 'v${{ needs.calculate-versions.outputs.STABLE_VERSION }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title 'Promote Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')' \
--body 'The promote-release workflow failed during stable publish. See the full run for details: ${DETAILS_URL}' \
--label 'release-failure,priority/p0'
nightly-pr:
name: 'Create Nightly PR'
needs: ['publish-stable', 'calculate-versions']
runs-on: 'ubuntu-latest'
environment: "${{ github.event.inputs.environment || 'prod' }}"
permissions:
contents: 'write'
pull-requests: 'write'
issues: 'write'
steps:
- name: 'Checkout Ref'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref }}'
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Configure Git User'
run: |-
git config user.name "gemini-cli-robot"
git config user.email "gemini-cli-robot@google.com"
- name: 'Create and switch to a new branch'
id: 'release_branch'
run: |
BRANCH_NAME="chore/nightly-version-bump-${NEEDS_CALCULATE_VERSIONS_OUTPUTS_NEXT_NIGHTLY_VERSION}"
git switch -c "${BRANCH_NAME}"
echo "BRANCH_NAME=${BRANCH_NAME}" >> "${GITHUB_OUTPUT}"
env:
NEEDS_CALCULATE_VERSIONS_OUTPUTS_NEXT_NIGHTLY_VERSION: '${{ needs.calculate-versions.outputs.NEXT_NIGHTLY_VERSION }}'
- name: 'Update package versions'
run: 'npm run release:version "${NEEDS_CALCULATE_VERSIONS_OUTPUTS_NEXT_NIGHTLY_VERSION}"'
env:
NEEDS_CALCULATE_VERSIONS_OUTPUTS_NEXT_NIGHTLY_VERSION: '${{ needs.calculate-versions.outputs.NEXT_NIGHTLY_VERSION }}'
- name: 'Commit and Push package versions'
env:
BRANCH_NAME: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
DRY_RUN: '${{ github.event.inputs.dry_run }}'
NEEDS_CALCULATE_VERSIONS_OUTPUTS_NEXT_NIGHTLY_VERSION: '${{ needs.calculate-versions.outputs.NEXT_NIGHTLY_VERSION }}'
run: |-
git add package.json packages/*/package.json
if [ -f package-lock.json ]; then
git add package-lock.json
fi
git commit -m "chore(release): bump version to ${NEEDS_CALCULATE_VERSIONS_OUTPUTS_NEXT_NIGHTLY_VERSION}"
if [[ "${DRY_RUN}" == "false" ]]; then
echo "Pushing release branch to remote..."
git push --set-upstream origin "${BRANCH_NAME}"
else
echo "Dry run enabled. Skipping push."
fi
- name: 'Create and Merge Pull Request'
uses: './.github/actions/create-pull-request'
with:
branch-name: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
pr-title: 'chore(release): bump version to ${{ needs.calculate-versions.outputs.NEXT_NIGHTLY_VERSION }}'
pr-body: 'Automated version bump to prepare for the next nightly release.'
github-token: '${{ secrets.GEMINI_CLI_ROBOT_GITHUB_PAT }}'
dry-run: '${{ github.event.inputs.dry_run }}'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry_run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
RELEASE_TAG: 'v${{ needs.calculate-versions.outputs.NEXT_NIGHTLY_VERSION }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title 'Promote Release Failed for ${RELEASE_TAG} on $(date +'%Y-%m-%d')' \
--body 'The promote-release workflow failed during nightly PR creation. See the full run for details: ${DETAILS_URL}' \
--label 'release-failure,priority/p0'
================================================
FILE: .github/workflows/release-rollback.yml
================================================
name: 'Release: Rollback change'
on:
workflow_dispatch:
inputs:
rollback_origin:
description: 'The package version to rollback FROM and delete (e.g., 0.5.0-preview-2)'
required: true
type: 'string'
rollback_destination:
description: 'The package version to rollback TO (e.g., 0.5.0-preview-2). This version must already exist on the npm registry.'
required: false
type: 'string'
channel:
description: 'The npm dist-tag to apply to rollback_destination (e.g., latest, preview, nightly). REQUIRED IF rollback_destination is set.'
required: false
type: 'choice'
options:
- 'latest'
- 'preview'
- 'nightly'
- 'dev'
default: 'dev'
ref:
description: 'The branch, tag, or SHA to run from.'
required: false
type: 'string'
default: 'main'
dry-run:
description: 'Whether to run in dry-run mode.'
required: false
type: 'boolean'
default: true
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
change-tags:
if: "github.repository == 'google-gemini/gemini-cli'"
environment: "${{ github.event.inputs.environment || 'prod' }}"
runs-on: 'ubuntu-latest'
permissions:
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout repository'
uses: 'actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955' # ratchet:actions/checkout@v4
with:
ref: '${{ github.event.inputs.ref }}'
fetch-depth: 0
- name: 'Setup Node.js'
uses: 'actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020'
with:
node-version-file: '.nvmrc'
- name: 'configure .npmrc'
uses: './.github/actions/setup-npmrc'
with:
github-token: '${{ secrets.GITHUB_TOKEN }}'
- name: 'Get Origin Version Tag'
id: 'origin_tag'
shell: 'bash'
env:
ROLLBACK_ORIGIN: '${{ github.event.inputs.rollback_origin }}'
run: |
TAG_VALUE="v${ROLLBACK_ORIGIN}"
echo "ORIGIN_TAG=$TAG_VALUE" >> "$GITHUB_OUTPUT"
- name: 'Get Origin Commit Hash'
id: 'origin_hash'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ORIGIN_TAG: '${{ steps.origin_tag.outputs.ORIGIN_TAG }}'
shell: 'bash'
run: |
echo "ORIGIN_HASH=$(git rev-parse "${ORIGIN_TAG}")" >> "$GITHUB_OUTPUT"
- name: 'Change tag'
if: "${{ github.event.inputs.rollback_destination != '' }}"
uses: './.github/actions/tag-npm-release'
with:
channel: '${{ github.event.inputs.channel }}'
version: '${{ github.event.inputs.rollback_destination }}'
dry-run: '${{ github.event.inputs.dry-run }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
cli-package-name: '${{ vars.CLI_PACKAGE_NAME }}'
core-package-name: '${{ vars.CORE_PACKAGE_NAME }}'
a2a-package-name: '${{ vars.A2A_PACKAGE_NAME }}'
- name: 'Get cli Token'
uses: './.github/actions/npm-auth-token'
id: 'cli-token'
with:
package-name: '${{ vars.CLI_PACKAGE_NAME }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
- name: 'Deprecate Cli Npm Package'
if: "${{ github.event.inputs.dry-run == 'false' && github.event.inputs.environment == 'prod' }}"
env:
NODE_AUTH_TOKEN: '${{ steps.cli-token.outputs.auth-token }}'
PACKAGE_NAME: '${{ vars.CLI_PACKAGE_NAME }}'
ROLLBACK_ORIGIN: '${{ github.event.inputs.rollback_origin }}'
shell: 'bash'
run: |
npm deprecate "${PACKAGE_NAME}@${ROLLBACK_ORIGIN}" "This version has been rolled back."
- name: 'Get core Token'
uses: './.github/actions/npm-auth-token'
id: 'core-token'
with:
package-name: '${{ vars.CLI_PACKAGE_NAME }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
- name: 'Deprecate Core Npm Package'
if: "${{ github.event.inputs.dry-run == 'false' && github.event.inputs.environment == 'prod' }}"
env:
NODE_AUTH_TOKEN: '${{ steps.core-token.outputs.auth-token }}'
PACKAGE_NAME: '${{ vars.CORE_PACKAGE_NAME }}'
ROLLBACK_ORIGIN: '${{ github.event.inputs.rollback_origin }}'
shell: 'bash'
run: |
npm deprecate "${PACKAGE_NAME}@${ROLLBACK_ORIGIN}" "This version has been rolled back."
- name: 'Get a2a Token'
uses: './.github/actions/npm-auth-token'
id: 'a2a-token'
with:
package-name: '${{ vars.A2A_PACKAGE_NAME }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
wombat-token-core: '${{ secrets.WOMBAT_TOKEN_CORE }}'
wombat-token-cli: '${{ secrets.WOMBAT_TOKEN_CLI }}'
wombat-token-a2a-server: '${{ secrets.WOMBAT_TOKEN_A2A_SERVER }}'
- name: 'Deprecate A2A Server Npm Package'
if: "${{ github.event.inputs.dry-run == 'false' && github.event.inputs.environment == 'prod' }}"
env:
NODE_AUTH_TOKEN: '${{ steps.a2a-token.outputs.auth-token }}'
PACKAGE_NAME: '${{ vars.A2A_PACKAGE_NAME }}'
ROLLBACK_ORIGIN: '${{ github.event.inputs.rollback_origin }}'
shell: 'bash'
run: |
npm deprecate "${PACKAGE_NAME}@${ROLLBACK_ORIGIN}" "This version has been rolled back."
- name: 'Delete Github Release'
if: "${{ github.event.inputs.dry-run == 'false' && github.event.inputs.environment == 'prod'}}"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ORIGIN_TAG: '${{ steps.origin_tag.outputs.ORIGIN_TAG }}'
shell: 'bash'
run: |
gh release delete "${ORIGIN_TAG}" --yes
- name: 'Verify Origin Release Deletion'
if: "${{ github.event.inputs.dry-run == 'false' }}"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
TARGET_TAG: '${{ steps.origin_tag.outputs.ORIGIN_TAG }}'
shell: 'bash'
run: |
RELEASE_TAG=$(gh release view "$TARGET_TAG" --json tagName --jq .tagName)
if [ "$RELEASE_TAG" = "$TARGET_TAG" ]; then
echo "❌ Failed to delete release with tag ${TARGET_TAG}"
echo '❌ This means the release was not deleted, and the workflow should fail.'
exit 1
fi
- name: 'Add Rollback Tag'
id: 'rollback_tag'
if: "${{ github.event.inputs.dry-run == 'false' }}"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
ROLLBACK_TAG_NAME: '${{ steps.origin_tag.outputs.ORIGIN_TAG }}-rollback'
ORIGIN_HASH: '${{ steps.origin_hash.outputs.ORIGIN_HASH }}'
shell: 'bash'
run: |
echo "ROLLBACK_TAG=$ROLLBACK_TAG_NAME" >> "$GITHUB_OUTPUT"
git tag "$ROLLBACK_TAG_NAME" "${ORIGIN_HASH}"
git push origin --tags
- name: 'Verify Rollback Tag Added'
if: "${{ github.event.inputs.dry-run == 'false' }}"
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
TARGET_TAG: '${{ steps.rollback_tag.outputs.ROLLBACK_TAG }}'
TARGET_HASH: '${{ steps.origin_hash.outputs.ORIGIN_HASH }}'
shell: 'bash'
run: |
ROLLBACK_COMMIT=$(git rev-parse -q --verify "$TARGET_TAG")
if [ "$ROLLBACK_COMMIT" != "$TARGET_HASH" ]; then
echo "❌ Failed to add tag ${TARGET_TAG} to commit ${TARGET_HASH}"
echo '❌ This means the tag was not added, and the workflow should fail.'
exit 1
fi
- name: 'Log Dry run'
if: "${{ github.event.inputs.dry-run == 'true' }}"
env:
ROLLBACK_ORIGIN: '${{ github.event.inputs.rollback_origin }}'
ROLLBACK_DESTINATION: '${{ github.event.inputs.rollback_destination }}'
CHANNEL: '${{ github.event.inputs.channel }}'
REF_INPUT: '${{ github.event.inputs.ref }}'
ORIGIN_TAG: '${{ steps.origin_tag.outputs.ORIGIN_TAG }}'
ORIGIN_HASH: '${{ steps.origin_hash.outputs.ORIGIN_HASH }}'
ROLLBACK_TAG: '${{ steps.rollback_tag.outputs.ROLLBACK_TAG }}'
CLI_PACKAGE_NAME: '${{ vars.CLI_PACKAGE_NAME }}'
CORE_PACKAGE_NAME: '${{ vars.CORE_PACKAGE_NAME }}'
A2A_PACKAGE_NAME: '${{ vars.A2A_PACKAGE_NAME }}'
shell: 'bash'
run: |
echo "
Inputs:
- rollback_origin: '${ROLLBACK_ORIGIN}'
- rollback_destination: '${ROLLBACK_DESTINATION}'
- channel: '${CHANNEL}'
- ref: '${REF_INPUT}'
Outputs:
- ORIGIN_TAG: '${ORIGIN_TAG}'
- ORIGIN_HASH: '${ORIGIN_HASH}'
- ROLLBACK_TAG: '${ROLLBACK_TAG}'
Would have npm deprecate ${CLI_PACKAGE_NAME}@${ROLLBACK_ORIGIN}, ${CORE_PACKAGE_NAME}@${ROLLBACK_ORIGIN}, and ${A2A_PACKAGE_NAME}@${ROLLBACK_ORIGIN}
Would have deleted the github release with tag ${ORIGIN_TAG}
Would have added tag ${ORIGIN_TAG}-rollback to ${ORIGIN_HASH}
"
================================================
FILE: .github/workflows/release-sandbox.yml
================================================
name: 'Release Sandbox'
on:
workflow_dispatch:
inputs:
ref:
description: 'The branch, tag, or SHA to release from.'
required: false
type: 'string'
default: 'main'
dry-run:
description: 'Whether this is a dry run.'
required: false
type: 'boolean'
default: true
jobs:
build:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
contents: 'read'
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref || github.sha }}'
fetch-depth: 0
- name: 'Push'
uses: './.github/actions/push-sandbox'
with:
dockerhub-username: '${{ secrets.DOCKER_SERVICE_ACCOUNT_NAME }}'
dockerhub-token: '${{ secrets.DOCKER_SERVICE_ACCOUNT_KEY }}'
github-actor: '${{ github.actor }}'
github-secret: '${{ secrets.GITHUB_TOKEN }}'
github-sha: '${{ github.sha }}'
github-ref-name: '${{github.event.inputs.ref}}'
dry-run: '${{ github.event.inputs.dry-run }}'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry-run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
run: |
gh issue create \
--title 'Sandbox Release Failed on $(date +'%Y-%m-%d')' \
--body 'The sandbox-release workflow failed. See the full run for details: ${DETAILS_URL}' \
--label 'release-failure,priority/p0'
================================================
FILE: .github/workflows/smoke-test.yml
================================================
name: 'On Merge Smoke Test'
on:
push:
branches:
- 'main'
- 'release/**'
workflow_dispatch:
inputs:
ref:
description: 'The branch, tag, or SHA to test on.'
required: false
type: 'string'
default: 'main'
dry-run:
description: 'Run a dry-run of the smoke test; No bug will be created'
required: true
type: 'boolean'
default: true
jobs:
smoke-test:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
contents: 'write'
packages: 'write'
issues: 'write'
steps:
- name: 'Checkout'
uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
with:
ref: '${{ github.event.inputs.ref || github.sha }}'
fetch-depth: 0
- name: 'Install Dependencies'
run: 'npm ci'
- name: 'Build bundle'
run: 'npm run bundle'
- name: 'Smoke test bundle'
run: 'node ./bundle/gemini.js --version'
- name: 'Create Issue on Failure'
if: '${{ failure() && github.event.inputs.dry-run == false }}'
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
DETAILS_URL: '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}'
REF: '${{ github.event.inputs.ref }}'
run: |
gh issue create \
--title 'Smoke test failed on ${REF} @ $(date +'%Y-%m-%d')' \
--body 'Smoke test build failed. See the full run for details: ${DETAILS_URL}' \
--label 'priority/p0'
================================================
FILE: .github/workflows/stale.yml
================================================
name: 'Mark stale issues and pull requests'
# Run as a daily cron at 1:30 AM
on:
schedule:
- cron: '30 1 * * *'
workflow_dispatch:
jobs:
stale:
strategy:
fail-fast: false
matrix:
runner:
- 'ubuntu-latest' # GitHub-hosted
runs-on: '${{ matrix.runner }}'
if: |-
${{ github.repository == 'google-gemini/gemini-cli' }}
permissions:
issues: 'write'
pull-requests: 'write'
concurrency:
group: '${{ github.workflow }}-stale'
cancel-in-progress: true
steps:
- uses: 'actions/stale@5bef64f19d7facfb25b37b414482c7164d639639' # ratchet:actions/stale@v9
with:
repo-token: '${{ secrets.GITHUB_TOKEN }}'
stale-issue-message: >-
This issue has been automatically marked as stale due to 60 days of inactivity.
It will be closed in 14 days if no further activity occurs.
stale-pr-message: >-
This pull request has been automatically marked as stale due to 60 days of inactivity.
It will be closed in 14 days if no further activity occurs.
close-issue-message: >-
This issue has been closed due to 14 additional days of inactivity after being marked as stale.
If you believe this is still relevant, feel free to comment or reopen the issue. Thank you!
close-pr-message: >-
This pull request has been closed due to 14 additional days of inactivity after being marked as stale.
If this is still relevant, you are welcome to reopen or leave a comment. Thanks for contributing!
days-before-stale: 60
days-before-close: 14
exempt-issue-labels: 'pinned,security,🔒 maintainer only,help wanted,🗓️ Public Roadmap'
exempt-pr-labels: 'pinned,security,🔒 maintainer only,help wanted,🗓️ Public Roadmap'
================================================
FILE: .github/workflows/test-build-binary.yml
================================================
name: 'Test Build Binary'
on:
workflow_dispatch:
permissions:
contents: 'read'
defaults:
run:
shell: 'bash'
jobs:
build-node-binary:
name: 'Build Binary (${{ matrix.os }})'
runs-on: '${{ matrix.os }}'
strategy:
fail-fast: false
matrix:
include:
- os: 'ubuntu-latest'
platform_name: 'linux-x64'
arch: 'x64'
- os: 'windows-latest'
platform_name: 'win32-x64'
arch: 'x64'
- os: 'macos-latest' # Apple Silicon (ARM64)
platform_name: 'darwin-arm64'
arch: 'arm64'
- os: 'macos-latest' # Intel (x64) running on ARM via Rosetta
platform_name: 'darwin-x64'
arch: 'x64'
steps:
- name: 'Checkout'
uses: 'actions/checkout@v4'
- name: 'Optimize Windows Performance'
if: "matrix.os == 'windows-latest'"
run: |
Set-MpPreference -DisableRealtimeMonitoring $true
Stop-Service -Name "wsearch" -Force -ErrorAction SilentlyContinue
Set-Service -Name "wsearch" -StartupType Disabled
Stop-Service -Name "SysMain" -Force -ErrorAction SilentlyContinue
Set-Service -Name "SysMain" -StartupType Disabled
shell: 'powershell'
- name: 'Set up Node.js'
uses: 'actions/setup-node@v4'
with:
node-version-file: '.nvmrc'
architecture: '${{ matrix.arch }}'
cache: 'npm'
- name: 'Install dependencies'
run: 'npm ci'
- name: 'Check Secrets'
id: 'check_secrets'
run: |
echo "has_win_cert=${{ secrets.WINDOWS_PFX_BASE64 != '' }}" >> "$GITHUB_OUTPUT"
echo "has_mac_cert=${{ secrets.MACOS_CERT_P12_BASE64 != '' }}" >> "$GITHUB_OUTPUT"
- name: 'Setup Windows SDK (Windows)'
if: "matrix.os == 'windows-latest'"
uses: 'microsoft/setup-msbuild@v2'
- name: 'Add Signtool to Path (Windows)'
if: "matrix.os == 'windows-latest'"
run: |
$signtoolPath = Get-ChildItem -Path "C:\Program Files (x86)\Windows Kits\10\bin" -Recurse -Filter "signtool.exe" | Sort-Object FullName -Descending | Select-Object -First 1 -ExpandProperty DirectoryName
echo "Found signtool at: $signtoolPath"
echo "$signtoolPath" >> $env:GITHUB_PATH
shell: 'pwsh'
- name: 'Setup macOS Keychain'
if: "startsWith(matrix.os, 'macos') && steps.check_secrets.outputs.has_mac_cert == 'true' && github.event_name != 'pull_request'"
env:
BUILD_CERTIFICATE_BASE64: '${{ secrets.MACOS_CERT_P12_BASE64 }}'
P12_PASSWORD: '${{ secrets.MACOS_CERT_PASSWORD }}'
KEYCHAIN_PASSWORD: 'temp-password'
run: |
# Create the P12 file
echo "$BUILD_CERTIFICATE_BASE64" | base64 --decode > certificate.p12
# Create a temporary keychain
security create-keychain -p "$KEYCHAIN_PASSWORD" build.keychain
security default-keychain -s build.keychain
security unlock-keychain -p "$KEYCHAIN_PASSWORD" build.keychain
# Import the certificate
security import certificate.p12 -k build.keychain -P "$P12_PASSWORD" -T /usr/bin/codesign
# Allow codesign to access it
security set-key-partition-list -S apple-tool:,apple: -s -k "$KEYCHAIN_PASSWORD" build.keychain
# Set Identity for build script
echo "APPLE_IDENTITY=${{ secrets.MACOS_CERT_IDENTITY }}" >> "$GITHUB_ENV"
- name: 'Setup Windows Certificate'
if: "matrix.os == 'windows-latest' && steps.check_secrets.outputs.has_win_cert == 'true' && github.event_name != 'pull_request'"
env:
PFX_BASE64: '${{ secrets.WINDOWS_PFX_BASE64 }}'
PFX_PASSWORD: '${{ secrets.WINDOWS_PFX_PASSWORD }}'
run: |
$pfx_cert_byte = [System.Convert]::FromBase64String("$env:PFX_BASE64")
$certPath = Join-Path (Get-Location) "cert.pfx"
[IO.File]::WriteAllBytes($certPath, $pfx_cert_byte)
echo "WINDOWS_PFX_FILE=$certPath" >> $env:GITHUB_ENV
echo "WINDOWS_PFX_PASSWORD=$env:PFX_PASSWORD" >> $env:GITHUB_ENV
shell: 'pwsh'
- name: 'Build Binary'
run: 'npm run build:binary'
- name: 'Build Core Package'
run: 'npm run build -w @google/gemini-cli-core'
- name: 'Verify Output Exists'
run: |
if [ -f "dist/${{ matrix.platform_name }}/gemini" ]; then
echo "Binary found at dist/${{ matrix.platform_name }}/gemini"
elif [ -f "dist/${{ matrix.platform_name }}/gemini.exe" ]; then
echo "Binary found at dist/${{ matrix.platform_name }}/gemini.exe"
else
echo "Error: Binary not found in dist/${{ matrix.platform_name }}/"
ls -R dist/
exit 1
fi
- name: 'Smoke Test Binary'
run: |
echo "Running binary smoke test..."
if [ -f "dist/${{ matrix.platform_name }}/gemini.exe" ]; then
"./dist/${{ matrix.platform_name }}/gemini.exe" --version
else
"./dist/${{ matrix.platform_name }}/gemini" --version
fi
- name: 'Run Integration Tests'
if: "github.event_name != 'pull_request'"
env:
GEMINI_API_KEY: '${{ secrets.GEMINI_API_KEY }}'
run: |
echo "Running integration tests with binary..."
if [[ "${{ matrix.os }}" == 'windows-latest' ]]; then
BINARY_PATH="$(cygpath -m "$(pwd)/dist/${{ matrix.platform_name }}/gemini.exe")"
else
BINARY_PATH="$(pwd)/dist/${{ matrix.platform_name }}/gemini"
fi
echo "Using binary at $BINARY_PATH"
export INTEGRATION_TEST_GEMINI_BINARY_PATH="$BINARY_PATH"
npm run test:integration:sandbox:none -- --testTimeout=600000
- name: 'Upload Artifact'
uses: 'actions/upload-artifact@v4'
with:
name: 'gemini-cli-${{ matrix.platform_name }}'
path: 'dist/${{ matrix.platform_name }}/'
retention-days: 5
================================================
FILE: .github/workflows/trigger_e2e.yml
================================================
name: 'Trigger E2E'
on:
workflow_dispatch:
inputs:
repo_name:
description: 'Repository name (e.g., owner/repo)'
required: false
type: 'string'
head_sha:
description: 'SHA of the commit to test'
required: false
type: 'string'
pull_request:
jobs:
save_repo_name:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- name: 'Save Repo name'
env:
REPO_NAME: '${{ github.event.inputs.repo_name || github.event.pull_request.head.repo.full_name }}'
HEAD_SHA: '${{ github.event.inputs.head_sha || github.event.pull_request.head.sha }}'
run: |
mkdir -p ./pr
echo "${REPO_NAME}" > ./pr/repo_name
echo "${HEAD_SHA}" > ./pr/head_sha
- uses: 'actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02' # ratchet:actions/upload-artifact@v4
with:
name: 'repo_name'
path: 'pr/'
trigger_e2e:
name: 'Trigger e2e'
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'gemini-cli-ubuntu-16-core'
steps:
- id: 'trigger-e2e'
run: |
echo "Trigger e2e workflow"
================================================
FILE: .github/workflows/unassign-inactive-assignees.yml
================================================
name: 'Unassign Inactive Issue Assignees'
# This workflow runs daily and scans every open "help wanted" issue that has
# one or more assignees. For each assignee it checks whether they have a
# non-draft pull request (open and ready for review, or already merged) that
# is linked to the issue. Draft PRs are intentionally excluded so that
# contributors cannot reset the check by opening a no-op PR. If no
# qualifying PR is found within 7 days of assignment the assignee is
# automatically removed and a friendly comment is posted so that other
# contributors can pick up the work.
# Maintainers, org members, and collaborators (anyone with write access or
# above) are always exempted and will never be auto-unassigned.
on:
schedule:
- cron: '0 9 * * *' # Every day at 09:00 UTC
workflow_dispatch:
inputs:
dry_run:
description: 'Run in dry-run mode (no changes will be applied)'
required: false
default: false
type: 'boolean'
concurrency:
group: '${{ github.workflow }}'
cancel-in-progress: true
defaults:
run:
shell: 'bash'
jobs:
unassign-inactive-assignees:
if: "github.repository == 'google-gemini/gemini-cli'"
runs-on: 'ubuntu-latest'
permissions:
issues: 'write'
steps:
- name: 'Generate GitHub App Token'
id: 'generate_token'
uses: 'actions/create-github-app-token@v2'
with:
app-id: '${{ secrets.APP_ID }}'
private-key: '${{ secrets.PRIVATE_KEY }}'
- name: 'Unassign inactive assignees'
uses: 'actions/github-script@v7'
env:
DRY_RUN: '${{ inputs.dry_run }}'
with:
github-token: '${{ steps.generate_token.outputs.token }}'
script: |
const dryRun = process.env.DRY_RUN === 'true';
if (dryRun) {
core.info('DRY RUN MODE ENABLED: No changes will be applied.');
}
const owner = context.repo.owner;
const repo = context.repo.repo;
const GRACE_PERIOD_DAYS = 7;
const now = new Date();
let maintainerLogins = new Set();
const teams = ['gemini-cli-maintainers', 'gemini-cli-askmode-approvers', 'gemini-cli-docs'];
for (const team_slug of teams) {
try {
const members = await github.paginate(github.rest.teams.listMembersInOrg, {
org: owner,
team_slug,
});
for (const m of members) maintainerLogins.add(m.login.toLowerCase());
core.info(`Fetched ${members.length} members from team ${team_slug}.`);
} catch (e) {
core.warning(`Could not fetch team ${team_slug}: ${e.message}`);
}
}
const isGooglerCache = new Map();
const isGoogler = async (login) => {
if (isGooglerCache.has(login)) return isGooglerCache.get(login);
try {
for (const org of ['googlers', 'google']) {
try {
await github.rest.orgs.checkMembershipForUser({ org, username: login });
isGooglerCache.set(login, true);
return true;
} catch (e) {
if (e.status !== 404) throw e;
}
}
} catch (e) {
core.warning(`Could not check org membership for ${login}: ${e.message}`);
}
isGooglerCache.set(login, false);
return false;
};
const permissionCache = new Map();
const isPrivilegedUser = async (login) => {
if (maintainerLogins.has(login.toLowerCase())) return true;
if (permissionCache.has(login)) return permissionCache.get(login);
try {
const { data } = await github.rest.repos.getCollaboratorPermissionLevel({
owner,
repo,
username: login,
});
const privileged = ['admin', 'maintain', 'write', 'triage'].includes(data.permission);
permissionCache.set(login, privileged);
if (privileged) {
core.info(` @${login} is a repo collaborator (${data.permission}) — exempt.`);
return true;
}
} catch (e) {
if (e.status !== 404) {
core.warning(`Could not check permission for ${login}: ${e.message}`);
}
}
const googler = await isGoogler(login);
permissionCache.set(login, googler);
return googler;
};
core.info('Fetching open "help wanted" issues with assignees...');
const issues = await github.paginate(github.rest.issues.listForRepo, {
owner,
repo,
state: 'open',
labels: 'help wanted',
per_page: 100,
});
const assignedIssues = issues.filter(
(issue) => !issue.pull_request && issue.assignees && issue.assignees.length > 0
);
core.info(`Found ${assignedIssues.length} assigned "help wanted" issues.`);
let totalUnassigned = 0;
let timelineEvents = [];
try {
timelineEvents = await github.paginate(github.rest.issues.listEventsForTimeline, {
owner,
repo,
issue_number: issue.number,
per_page: 100,
mediaType: { previews: ['mockingbird'] },
});
} catch (err) {
core.warning(`Could not fetch timeline for issue #${issue.number}: ${err.message}`);
continue;
}
const assignedAtMap = new Map();
for (const event of timelineEvents) {
if (event.event === 'assigned' && event.assignee) {
const login = event.assignee.login.toLowerCase();
const at = new Date(event.created_at);
assignedAtMap.set(login, at);
} else if (event.event === 'unassigned' && event.assignee) {
assignedAtMap.delete(event.assignee.login.toLowerCase());
}
}
const linkedPRAuthorSet = new Set();
const seenPRKeys = new Set();
for (const event of timelineEvents) {
if (
event.event !== 'cross-referenced' ||
!event.source ||
event.source.type !== 'pull_request' ||
!event.source.issue ||
!event.source.issue.user ||
!event.source.issue.number ||
!event.source.issue.repository
) continue;
const prOwner = event.source.issue.repository.owner.login;
const prRepo = event.source.issue.repository.name;
const prNumber = event.source.issue.number;
const prAuthor = event.source.issue.user.login.toLowerCase();
const prKey = `${prOwner}/${prRepo}#${prNumber}`;
if (seenPRKeys.has(prKey)) continue;
seenPRKeys.add(prKey);
try {
const { data: pr } = await github.rest.pulls.get({
owner: prOwner,
repo: prRepo,
pull_number: prNumber,
});
const isReady = (pr.state === 'open' && !pr.draft) ||
(pr.state === 'closed' && pr.merged_at !== null);
core.info(
` PR ${prKey} by @${prAuthor}: ` +
`state=${pr.state}, draft=${pr.draft}, merged=${!!pr.merged_at} → ` +
(isReady ? 'qualifies' : 'does NOT qualify (draft or closed without merge)')
);
if (isReady) linkedPRAuthorSet.add(prAuthor);
} catch (err) {
core.warning(`Could not fetch PR ${prKey}: ${err.message}`);
}
}
const assigneesToRemove = [];
for (const assignee of issue.assignees) {
const login = assignee.login.toLowerCase();
if (await isPrivilegedUser(assignee.login)) {
core.info(` @${assignee.login}: privileged user — skipping.`);
continue;
}
const assignedAt = assignedAtMap.get(login);
if (!assignedAt) {
core.warning(
`No 'assigned' event found for @${login} on issue #${issue.number}; ` +
`falling back to issue creation date (${issue.created_at}).`
);
assignedAtMap.set(login, new Date(issue.created_at));
}
const resolvedAssignedAt = assignedAtMap.get(login);
const daysSinceAssignment = (now - resolvedAssignedAt) / (1000 * 60 * 60 * 24);
core.info(
` @${login}: assigned ${daysSinceAssignment.toFixed(1)} day(s) ago, ` +
`ready-for-review PR: ${linkedPRAuthorSet.has(login) ? 'yes' : 'no'}`
);
if (daysSinceAssignment < GRACE_PERIOD_DAYS) {
core.info(` → within grace period, skipping.`);
continue;
}
if (linkedPRAuthorSet.has(login)) {
core.info(` → ready-for-review PR found, keeping assignment.`);
continue;
}
core.info(` → no ready-for-review PR after ${GRACE_PERIOD_DAYS} days, will unassign.`);
assigneesToRemove.push(assignee.login);
}
if (assigneesToRemove.length === 0) {
continue;
}
if (!dryRun) {
try {
await github.rest.issues.removeAssignees({
owner,
repo,
issue_number: issue.number,
assignees: assigneesToRemove,
});
} catch (err) {
core.warning(
`Failed to unassign ${assigneesToRemove.join(', ')} from issue #${issue.number}: ${err.message}`
);
continue;
}
const mentionList = assigneesToRemove.map((l) => `@${l}`).join(', ');
const commentBody =
`👋 ${mentionList} — it has been more than ${GRACE_PERIOD_DAYS} days since ` +
`you were assigned to this issue and we could not find a pull request ` +
`ready for review.\n\n` +
`To keep the backlog moving and ensure issues stay accessible to all ` +
`contributors, we require a PR that is open and ready for review (not a ` +
`draft) within ${GRACE_PERIOD_DAYS} days of assignment.\n\n` +
`We are automatically unassigning you so that other contributors can pick ` +
`this up. If you are still actively working on this, please:\n` +
`1. Re-assign yourself by commenting \`/assign\`.\n` +
`2. Open a PR (not a draft) linked to this issue (e.g. \`Fixes #${issue.number}\`) ` +
`within ${GRACE_PERIOD_DAYS} days so the automation knows real progress is being made.\n\n` +
`Thank you for your contribution — we hope to see a PR from you soon! 🙏`;
try {
await github.rest.issues.createComment({
owner,
repo,
issue_number: issue.number,
body: commentBody,
});
} catch (err) {
core.warning(
`Failed to post comment on issue #${issue.number}: ${err.message}`
);
}
}
totalUnassigned += assigneesToRemove.length;
core.info(
` ${dryRun ? '[DRY RUN] Would have unassigned' : 'Unassigned'}: ${assigneesToRemove.join(', ')}`
);
}
core.info(`\nDone. Total assignees ${dryRun ? 'that would be' : ''} unassigned: ${totalUnassigned}`);
================================================
FILE: .github/workflows/verify-release.yml
================================================
name: 'Verify NPM release tag'
on:
workflow_dispatch:
inputs:
version:
description: 'The expected Gemini binary version that should be released (e.g., 0.5.0-preview-2).'
required: true
type: 'string'
npm-tag:
description: 'NPM tag to verify'
required: true
type: 'choice'
options:
- 'dev'
- 'latest'
- 'preview'
- 'nightly'
default: 'latest'
environment:
description: 'Environment'
required: false
type: 'choice'
options:
- 'prod'
- 'dev'
default: 'prod'
jobs:
verify-release:
if: "github.repository == 'google-gemini/gemini-cli'"
environment: "${{ github.event.inputs.environment || 'prod' }}"
strategy:
fail-fast: false
matrix:
os: ['ubuntu-latest', 'macos-latest', 'windows-latest']
runs-on: '${{ matrix.os }}'
permissions:
contents: 'read'
packages: 'write'
issues: 'write'
steps:
- name: '📝 Print vars'
shell: 'bash'
run: 'echo "${{ toJSON(vars) }}"'
- uses: 'actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8'
- name: 'Verify release'
uses: './.github/actions/verify-release'
with:
npm-package: '${{vars.CLI_PACKAGE_NAME}}@${{github.event.inputs.npm-tag}}'
expected-version: '${{github.event.inputs.version}}'
working-directory: '.'
gemini_api_key: '${{ secrets.GEMINI_API_KEY }}'
npm-registry-url: '${{ vars.NPM_REGISTRY_URL }}'
github-token: '${{ secrets.GITHUB_TOKEN }}'
npm-registry-scope: '${{ vars.NPM_REGISTRY_SCOPE }}'
================================================
FILE: .gitignore
================================================
# API keys and secrets
.env
.env~
# gemini-cli settings
# We want to keep the .gemini in the root of the repo and ignore any .gemini
# in subdirectories. In our root .gemini we want to allow for version control
# for subcommands.
**/.gemini/
!/.gemini/
.gemini/*
!.gemini/config.yaml
!.gemini/commands/
!.gemini/skills/
!.gemini/settings.json
# Note: .gemini-clipboard/ is NOT in gitignore so Gemini can access pasted images
# Dependency directory
node_modules
bower_components
# Editors
.idea
*.iml
# OS metadata
.DS_Store
Thumbs.db
# TypeScript build info files
*.tsbuildinfo
# Ignore built ts files
dist
# Docker folder to help skip auth refreshes
.docker
bundle
# Test report files
junit.xml
packages/*/coverage/
# Generated files
packages/cli/src/generated/
packages/core/src/generated/
packages/devtools/src/_client-assets.ts
.integration-tests/
packages/vscode-ide-companion/*.vsix
packages/cli/download-ripgrep*/
# GHA credentials
gha-creds-*.json
# Log files
patch_output.log
gemini-debug.log
.genkit
.gemini-clipboard/
.eslintcache
evals/logs/
temp_agents/
================================================
FILE: .husky/pre-commit
================================================
npm run pre-commit || {
echo ''
echo '===================================================='
echo 'pre-commit checks failed. in case of emergency, run:'
echo ''
echo 'git commit --no-verify'
echo '===================================================='
exit 1
}
================================================
FILE: .lycheeignore
================================================
http://localhost:16686/
https://github.com/google-gemini/gemini-cli/issues/new/choose
https://github.com/google-gemini/maintainers-gemini-cli/blob/main/npm.md
https://github.com/settings/personal-access-tokens/new
https://github.com/settings/tokens/new
https://www.npmjs.com/package/@google/gemini-cli
================================================
FILE: .npmrc
================================================
@google:registry=https://wombat-dressing-room.appspot.com
================================================
FILE: .nvmrc
================================================
20
================================================
FILE: .prettierignore
================================================
**/bundle
**/coverage
**/dist
**/.git
**/node_modules
.docker
.DS_Store
.env
.gemini/
.idea
.integration-tests/
*.iml
*.tsbuildinfo
*.vsix
bower_components
eslint.config.js
**/generated
gha-creds-*.json
junit.xml
.gemini-linters/
Thumbs.db
.pytest_cache
**/SKILL.md
packages/sdk/test-data/*.json
================================================
FILE: .prettierrc.json
================================================
{
"semi": true,
"trailingComma": "all",
"singleQuote": true,
"printWidth": 80,
"tabWidth": 2,
"overrides": [
{
"files": ["**/*.md"],
"options": {
"tabWidth": 2,
"printWidth": 80,
"proseWrap": "always"
}
}
]
}
================================================
FILE: .vscode/extensions.json
================================================
{
"recommendations": [
"vitest.explorer",
"esbenp.prettier-vscode",
"dbaeumer.vscode-eslint"
]
}
================================================
FILE: .vscode/launch.json
================================================
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Build & Launch CLI",
"runtimeExecutable": "npm",
"runtimeArgs": ["run", "build-and-start"],
"skipFiles": ["/**"],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"env": {
"GEMINI_SANDBOX": "false"
}
},
{
"name": "Launch Companion VS Code Extension",
"type": "extensionHost",
"request": "launch",
"args": [
"--extensionDevelopmentPath=${workspaceFolder}/packages/vscode-ide-companion"
],
"outFiles": [
"${workspaceFolder}/packages/vscode-ide-companion/dist/**/*.js"
],
"preLaunchTask": "npm: build: vscode-ide-companion"
},
{
"name": "Attach",
"port": 9229,
"request": "attach",
"skipFiles": ["/**"],
"type": "node",
// fix source mapping when debugging in sandbox using global installation
// note this does not interfere when remoteRoot is also ${workspaceFolder}/packages
"remoteRoot": "/usr/local/share/npm-global/lib/node_modules/@gemini-cli",
"localRoot": "${workspaceFolder}/packages"
},
{
"type": "node",
"request": "launch",
"name": "Launch Program",
"skipFiles": ["/**"],
"program": "${file}",
"outFiles": ["${workspaceFolder}/**/*.js"]
},
{
"type": "node",
"request": "launch",
"name": "Debug Test File",
"runtimeExecutable": "npm",
"runtimeArgs": [
"run",
"test",
"-w",
"packages",
"--",
"--inspect-brk=9229",
"--no-file-parallelism",
"${input:testFile}"
],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"skipFiles": ["/**"]
},
{
"name": "Debug Integration Test File",
"type": "node",
"request": "launch",
"runtimeExecutable": "npx",
"runtimeArgs": [
"vitest",
"run",
"--root",
"./integration-tests",
"--inspect-brk=9229",
"${file}"
],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"skipFiles": ["/**"],
"env": {
"GEMINI_SANDBOX": "false"
}
}
],
"inputs": [
{
"id": "testFile",
"type": "promptString",
"description": "Enter the path to the test file (e.g., ${workspaceFolder}/packages/cli/src/ui/components/LoadingIndicator.test.tsx)",
"default": "${workspaceFolder}/packages/cli/src/ui/components/LoadingIndicator.test.tsx"
}
]
}
================================================
FILE: .vscode/settings.json
================================================
{
"typescript.tsserver.experimental.enableProjectDiagnostics": true,
"editor.tabSize": 2,
"editor.rulers": [80],
"editor.detectIndentation": false,
"editor.insertSpaces": true,
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"vitest.disableWorkspaceWarning": true
}
================================================
FILE: .vscode/tasks.json
================================================
{
"version": "2.0.0",
"tasks": [
{
"type": "npm",
"script": "build",
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [],
"label": "npm: build",
"detail": "scripts/build.sh"
},
{
"type": "npm",
"script": "build",
"path": "packages/vscode-ide-companion",
"group": "build",
"problemMatcher": [],
"label": "npm: build: vscode-ide-companion",
"detail": "npm run build -w packages/vscode-ide-companion"
}
]
}
================================================
FILE: .yamllint.yml
================================================
rules:
anchors:
forbid-duplicated-anchors: true
forbid-undeclared-aliases: true
forbid-unused-anchors: true
braces:
forbid: 'non-empty'
min-spaces-inside-empty: 0
max-spaces-inside-empty: 0
brackets:
min-spaces-inside: 0
max-spaces-inside: 0
min-spaces-inside-empty: 0
max-spaces-inside-empty: 0
colons:
max-spaces-before: 0
max-spaces-after: 1
commas:
max-spaces-before: 0
min-spaces-after: 1
max-spaces-after: 1
comments:
require-starting-space: true
ignore-shebangs: true
min-spaces-from-content: 1
comments-indentation: 'disable'
document-end:
present: false
document-start:
present: false
empty-lines:
max: 2
max-start: 0
max-end: 1
empty-values:
forbid-in-block-mappings: false
forbid-in-flow-mappings: true
float-values:
forbid-inf: false
forbid-nan: false
forbid-scientific-notation: false
require-numeral-before-decimal: false
hyphens:
max-spaces-after: 1
indentation:
spaces: 2
indent-sequences: true
check-multi-line-strings: false
key-duplicates: {}
new-line-at-end-of-file: {}
new-lines:
type: 'unix'
octal-values:
forbid-implicit-octal: true
forbid-explicit-octal: false
quoted-strings:
quote-type: 'single'
required: true
allow-quoted-quotes: true
trailing-spaces: {}
truthy:
allowed-values: ['true', 'false', 'on'] # GitHub Actions uses "on"
check-keys: true
ignore:
- 'thirdparty/'
- 'third_party/'
- 'vendor/'
- 'node_modules/'
================================================
FILE: CONTRIBUTING.md
================================================
# How to contribute
We would love to accept your patches and contributions to this project. This
document includes:
- **[Before you begin](#before-you-begin):** Essential steps to take before
becoming a Gemini CLI contributor.
- **[Code contribution process](#code-contribution-process):** How to contribute
code to Gemini CLI.
- **[Development setup and workflow](#development-setup-and-workflow):** How to
set up your development environment and workflow.
- **[Documentation contribution process](#documentation-contribution-process):**
How to contribute documentation to Gemini CLI.
We're looking forward to seeing your contributions!
## Before you begin
### Sign our Contributor License Agreement
Contributions to this project must be accompanied by a
[Contributor License Agreement](https://cla.developers.google.com/about) (CLA).
You (or your employer) retain the copyright to your contribution; this simply
gives us permission to use and redistribute your contributions as part of the
project.
If you or your current employer have already signed the Google CLA (even if it
was for a different project), you probably don't need to do it again.
Visit to see your current agreements or to
sign a new one.
### Review our Community Guidelines
This project follows
[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
## Code contribution process
### Get started
The process for contributing code is as follows:
1. **Find an issue** that you want to work on. If an issue is tagged as
`🔒Maintainers only`, this means it is reserved for project maintainers. We
will not accept pull requests related to these issues. In the near future,
we will explicitly mark issues looking for contributions using the
`help-wanted` label. If you believe an issue is a good candidate for
community contribution, please leave a comment on the issue. A maintainer
will review it and apply the `help-wanted` label if appropriate. Only
maintainers should attempt to add the `help-wanted` label to an issue.
2. **Fork the repository** and create a new branch.
3. **Make your changes** in the `packages/` directory.
4. **Ensure all checks pass** by running `npm run preflight`.
5. **Open a pull request** with your changes.
### Code reviews
All submissions, including submissions by project members, require review. We
use [GitHub pull requests](https://docs.github.com/articles/about-pull-requests)
for this purpose.
To assist with the review process, we provide an automated review tool that
helps detect common anti-patterns, testing issues, and other best practices that
are easy to miss.
#### Using the automated review tool
You can run the review tool in two ways:
1. **Using the helper script (Recommended):** We provide a script that
automatically handles checking out the PR into a separate worktree,
installing dependencies, building the project, and launching the review
tool.
```bash
./scripts/review.sh [model]
```
**Warning:** If you run `scripts/review.sh`, you must have first verified
that the code for the PR being reviewed is safe to run and does not contain
data exfiltration attacks.
**Authors are strongly encouraged to run this script on their own PRs**
immediately after creation. This allows you to catch and fix simple issues
locally before a maintainer performs a full review.
**Note on Models:** By default, the script uses the latest Pro model
(`gemini-3.1-pro-preview`). If you do not have enough Pro quota, you can run
it with the latest Flash model instead:
`./scripts/review.sh gemini-3-flash-preview`.
2. **Manually from within Gemini CLI:** If you already have the PR checked out
and built, you can run the tool directly from the CLI prompt:
```text
/review-frontend
```
Replace `` with your pull request number. Reviewers should use this
tool to augment, not replace, their manual review process.
### Self-assigning and unassigning issues
To assign an issue to yourself, simply add a comment with the text `/assign`. To
unassign yourself from an issue, add a comment with the text `/unassign`.
The comment must contain only that text and nothing else. These commands will
assign or unassign the issue as requested, provided the conditions are met
(e.g., an issue must be unassigned to be assigned).
Please note that you can have a maximum of 3 issues assigned to you at any given
time.
### Pull request guidelines
To help us review and merge your PRs quickly, please follow these guidelines.
PRs that do not meet these standards may be closed.
#### 1. Link to an existing issue
All PRs should be linked to an existing issue in our tracker. This ensures that
every change has been discussed and is aligned with the project's goals before
any code is written.
- **For bug fixes:** The PR should be linked to the bug report issue.
- **For features:** The PR should be linked to the feature request or proposal
issue that has been approved by a maintainer.
If an issue for your change doesn't exist, we will automatically close your PR
along with a comment reminding you to associate the PR with an issue. The ideal
workflow starts with an issue that has been reviewed and approved by a
maintainer. Please **open the issue first** and wait for feedback before you
start coding.
#### 2. Keep it small and focused
We favor small, atomic PRs that address a single issue or add a single,
self-contained feature.
- **Do:** Create a PR that fixes one specific bug or adds one specific feature.
- **Don't:** Bundle multiple unrelated changes (e.g., a bug fix, a new feature,
and a refactor) into a single PR.
Large changes should be broken down into a series of smaller, logical PRs that
can be reviewed and merged independently.
#### 3. Use draft PRs for work in progress
If you'd like to get early feedback on your work, please use GitHub's **Draft
Pull Request** feature. This signals to the maintainers that the PR is not yet
ready for a formal review but is open for discussion and initial feedback.
#### 4. Ensure all checks pass
Before submitting your PR, ensure that all automated checks are passing by
running `npm run preflight`. This command runs all tests, linting, and other
style checks.
#### 5. Update documentation
If your PR introduces a user-facing change (e.g., a new command, a modified
flag, or a change in behavior), you must also update the relevant documentation
in the `/docs` directory.
See more about writing documentation:
[Documentation contribution process](#documentation-contribution-process).
#### 6. Write clear commit messages and a good PR description
Your PR should have a clear, descriptive title and a detailed description of the
changes. Follow the [Conventional Commits](https://www.conventionalcommits.org/)
standard for your commit messages.
- **Good PR title:** `feat(cli): Add --json flag to 'config get' command`
- **Bad PR title:** `Made some changes`
In the PR description, explain the "why" behind your changes and link to the
relevant issue (e.g., `Fixes #123`).
### Forking
If you are forking the repository you will be able to run the Build, Test and
Integration test workflows. However in order to make the integration tests run
you'll need to add a
[GitHub Repository Secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository)
with a value of `GEMINI_API_KEY` and set that to a valid API key that you have
available. Your key and secret are private to your repo; no one without access
can see your key and you cannot see any secrets related to this repo.
Additionally you will need to click on the `Actions` tab and enable workflows
for your repository, you'll find it's the large blue button in the center of the
screen.
### Development setup and workflow
This section guides contributors on how to build, modify, and understand the
development setup of this project.
### Setting up the development environment
**Prerequisites:**
1. **Node.js**:
- **Development:** Please use Node.js `~20.19.0`. This specific version is
required due to an upstream development dependency issue. You can use a
tool like [nvm](https://github.com/nvm-sh/nvm) to manage Node.js versions.
- **Production:** For running the CLI in a production environment, any
version of Node.js `>=20` is acceptable.
2. **Git**
### Build process
To clone the repository:
```bash
git clone https://github.com/google-gemini/gemini-cli.git # Or your fork's URL
cd gemini-cli
```
To install dependencies defined in `package.json` as well as root dependencies:
```bash
npm install
```
To build the entire project (all packages):
```bash
npm run build
```
This command typically compiles TypeScript to JavaScript, bundles assets, and
prepares the packages for execution. Refer to `scripts/build.js` and
`package.json` scripts for more details on what happens during the build.
### Enabling sandboxing
[Sandboxing](#sandboxing) is highly recommended and requires, at a minimum,
setting `GEMINI_SANDBOX=true` in your `~/.env` and ensuring a sandboxing
provider (e.g. `macOS Seatbelt`, `docker`, or `podman`) is available. See
[Sandboxing](#sandboxing) for details.
To build both the `gemini` CLI utility and the sandbox container, run
`build:all` from the root directory:
```bash
npm run build:all
```
To skip building the sandbox container, you can use `npm run build` instead.
### Running the CLI
To start the Gemini CLI from the source code (after building), run the following
command from the root directory:
```bash
npm start
```
If you'd like to run the source build outside of the gemini-cli folder, you can
utilize `npm link path/to/gemini-cli/packages/cli` (see:
[docs](https://docs.npmjs.com/cli/v9/commands/npm-link)) or
`alias gemini="node path/to/gemini-cli/packages/cli"` to run with `gemini`
### Running tests
This project contains two types of tests: unit tests and integration tests.
#### Unit tests
To execute the unit test suite for the project:
```bash
npm run test
```
This will run tests located in the `packages/core` and `packages/cli`
directories. Ensure tests pass before submitting any changes. For a more
comprehensive check, it is recommended to run `npm run preflight`.
#### Integration tests
The integration tests are designed to validate the end-to-end functionality of
the Gemini CLI. They are not run as part of the default `npm run test` command.
To run the integration tests, use the following command:
```bash
npm run test:e2e
```
For more detailed information on the integration testing framework, please see
the
[Integration Tests documentation](https://geminicli.com/docs/integration-tests).
### Linting and preflight checks
To ensure code quality and formatting consistency, run the preflight check:
```bash
npm run preflight
```
This command will run ESLint, Prettier, all tests, and other checks as defined
in the project's `package.json`.
_ProTip_
after cloning create a git precommit hook file to ensure your commits are always
clean.
```bash
echo "
# Run npm build and check for errors
if ! npm run preflight; then
echo "npm build failed. Commit aborted."
exit 1
fi
" > .git/hooks/pre-commit && chmod +x .git/hooks/pre-commit
```
#### Formatting
To separately format the code in this project by running the following command
from the root directory:
```bash
npm run format
```
This command uses Prettier to format the code according to the project's style
guidelines.
#### Linting
To separately lint the code in this project, run the following command from the
root directory:
```bash
npm run lint
```
### Coding conventions
- Please adhere to the coding style, patterns, and conventions used throughout
the existing codebase.
- Consult [GEMINI.md](../GEMINI.md) (typically found in the project root) for
specific instructions related to AI-assisted development, including
conventions for React, comments, and Git usage.
- **Imports:** Pay special attention to import paths. The project uses ESLint to
enforce restrictions on relative imports between packages.
### Debugging
#### VS Code
0. Run the CLI to interactively debug in VS Code with `F5`
1. Start the CLI in debug mode from the root directory:
```bash
npm run debug
```
This command runs `node --inspect-brk dist/gemini.js` within the
`packages/cli` directory, pausing execution until a debugger attaches. You
can then open `chrome://inspect` in your Chrome browser to connect to the
debugger.
2. In VS Code, use the "Attach" launch configuration (found in
`.vscode/launch.json`).
Alternatively, you can use the "Launch Program" configuration in VS Code if you
prefer to launch the currently open file directly, but 'F5' is generally
recommended.
To hit a breakpoint inside the sandbox container run:
```bash
DEBUG=1 gemini
```
**Note:** If you have `DEBUG=true` in a project's `.env` file, it won't affect
gemini-cli due to automatic exclusion. Use `.gemini/.env` files for gemini-cli
specific debug settings.
### React DevTools
To debug the CLI's React-based UI, you can use React DevTools.
1. **Start the Gemini CLI in development mode:**
```bash
DEV=true npm start
```
2. **Install and run React DevTools version 6 (which matches the CLI's
`react-devtools-core`):**
You can either install it globally:
```bash
npm install -g react-devtools@6
react-devtools
```
Or run it directly using npx:
```bash
npx react-devtools@6
```
Your running CLI application should then connect to React DevTools.

### Sandboxing
#### macOS Seatbelt
On macOS, `gemini` uses Seatbelt (`sandbox-exec`) under a `permissive-open`
profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that
restricts writes to the project folder but otherwise allows all other operations
and outbound network traffic ("open") by default. You can switch to a
`strict-open` profile (see
`packages/cli/src/utils/sandbox-macos-strict-open.sb`) that restricts both reads
and writes to the working directory while allowing outbound network traffic by
setting `SEATBELT_PROFILE=strict-open` in your environment or `.env` file.
Available built-in profiles are `permissive-{open,proxied}`,
`restrictive-{open,proxied}`, and `strict-{open,proxied}` (see below for proxied
networking). You can also switch to a custom profile
`SEATBELT_PROFILE=` if you also create a file
`.gemini/sandbox-macos-.sb` under your project settings directory
`.gemini`.
#### Container-based sandboxing (all platforms)
For stronger container-based sandboxing on macOS or other platforms, you can set
`GEMINI_SANDBOX=true|docker|podman|` in your environment or `.env`
file. The specified command (or if `true` then either `docker` or `podman`) must
be installed on the host machine. Once enabled, `npm run build:all` will build a
minimal container ("sandbox") image and `npm start` will launch inside a fresh
instance of that container. The first build can take 20-30s (mostly due to
downloading of the base image) but after that both build and start overhead
should be minimal. Default builds (`npm run build`) will not rebuild the
sandbox.
Container-based sandboxing mounts the project directory (and system temp
directory) with read-write access and is started/stopped/removed automatically
as you start/stop Gemini CLI. Files created within the sandbox should be
automatically mapped to your user/group on host machine. You can easily specify
additional mounts, ports, or environment variables by setting
`SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox
for your projects by creating the files `.gemini/sandbox.Dockerfile` and/or
`.gemini/sandbox.bashrc` under your project settings directory (`.gemini`) and
running `gemini` with `BUILD_SANDBOX=1` to trigger building of your custom
sandbox.
#### Proxied networking
All sandboxing methods, including macOS Seatbelt using `*-proxied` profiles,
support restricting outbound network traffic through a custom proxy server that
can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=`, where ``
must start a proxy server that listens on `:::8877` for relevant requests. See
`docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS`
connections to `example.com:443` (e.g. `curl https://example.com`) and declines
all other requests. The proxy is started and stopped automatically alongside the
sandbox.
### Manual publish
We publish an artifact for each commit to our internal registry. But if you need
to manually cut a local build, then run the following commands:
```
npm run clean
npm install
npm run auth
npm run prerelease:dev
npm publish --workspaces
```
## Documentation contribution process
Our documentation must be kept up-to-date with our code contributions. We want
our documentation to be clear, concise, and helpful to our users. We value:
- **Clarity:** Use simple and direct language. Avoid jargon where possible.
- **Accuracy:** Ensure all information is correct and up-to-date.
- **Completeness:** Cover all aspects of a feature or topic.
- **Examples:** Provide practical examples to help users understand how to use
Gemini CLI.
### Getting started
The process for contributing to the documentation is similar to contributing
code.
1. **Fork the repository** and create a new branch.
2. **Make your changes** in the `/docs` directory.
3. **Preview your changes locally** in Markdown rendering.
4. **Lint and format your changes.** Our preflight check includes linting and
formatting for documentation files.
```bash
npm run preflight
```
5. **Open a pull request** with your changes.
### Documentation structure
Our documentation is organized using [sidebar.json](/docs/sidebar.json) as the
table of contents. When adding new documentation:
1. Create your markdown file **in the appropriate directory** under `/docs`.
2. Add an entry to `sidebar.json` in the relevant section.
3. Ensure all internal links use relative paths and point to existing files.
### Style guide
We follow the
[Google Developer Documentation Style Guide](https://developers.google.com/style).
Please refer to it for guidance on writing style, tone, and formatting.
#### Key style points
- Use sentence case for headings.
- Write in second person ("you") when addressing the reader.
- Use present tense.
- Keep paragraphs short and focused.
- Use code blocks with appropriate language tags for syntax highlighting.
- Include practical examples whenever possible.
### Linting and formatting
We use `prettier` to enforce a consistent style across our documentation. The
`npm run preflight` command will check for any linting issues.
You can also run the linter and formatter separately:
- `npm run lint` - Check for linting issues
- `npm run format` - Auto-format markdown files
- `npm run lint:fix` - Auto-fix linting issues where possible
Please make sure your contributions are free of linting errors before submitting
a pull request.
### Before you submit
Before submitting your documentation pull request, please:
1. Run `npm run preflight` to ensure all checks pass.
2. Review your changes for clarity and accuracy.
3. Check that all links work correctly.
4. Ensure any code examples are tested and functional.
5. Sign the
[Contributor License Agreement (CLA)](https://cla.developers.google.com/) if
you haven't already.
### Need help?
If you have questions about contributing documentation:
- Check our [FAQ](https://geminicli.com/docs/resources/faq).
- Review existing documentation for examples.
- Open [an issue](https://github.com/google-gemini/gemini-cli/issues) to discuss
your proposed changes.
- Reach out to the maintainers.
We appreciate your contributions to making Gemini CLI documentation better!
================================================
FILE: Dockerfile
================================================
FROM docker.io/library/node:20-slim
ARG SANDBOX_NAME="gemini-cli-sandbox"
ARG CLI_VERSION_ARG
ENV SANDBOX="$SANDBOX_NAME"
ENV CLI_VERSION=$CLI_VERSION_ARG
# install minimal set of packages, then clean up
RUN apt-get update && apt-get install -y --no-install-recommends \
python3 \
make \
g++ \
man-db \
curl \
dnsutils \
less \
jq \
bc \
gh \
git \
unzip \
rsync \
ripgrep \
procps \
psmisc \
lsof \
socat \
ca-certificates \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# set up npm global package folder under /usr/local/share
# give it to non-root user node, already set up in base image
RUN mkdir -p /usr/local/share/npm-global \
&& chown -R node:node /usr/local/share/npm-global
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
ENV PATH=$PATH:/usr/local/share/npm-global/bin
# switch to non-root user node
USER node
# install gemini-cli and clean up
COPY packages/cli/dist/google-gemini-cli-*.tgz /tmp/gemini-cli.tgz
COPY packages/core/dist/google-gemini-cli-core-*.tgz /tmp/gemini-core.tgz
RUN npm install -g /tmp/gemini-core.tgz \
&& npm install -g /tmp/gemini-cli.tgz \
&& node -e "const fs=require('node:fs'); JSON.parse(fs.readFileSync('/usr/local/share/npm-global/lib/node_modules/@google/gemini-cli/package.json','utf8')); JSON.parse(fs.readFileSync('/usr/local/share/npm-global/lib/node_modules/@google/gemini-cli-core/package.json','utf8'));" \
&& gemini --version > /dev/null \
&& npm cache clean --force \
&& rm -f /tmp/gemini-{cli,core}.tgz
# default entrypoint when none specified
CMD ["gemini"]
================================================
FILE: GEMINI.md
================================================
# Gemini CLI Project Context
Gemini CLI is an open-source AI agent that brings the power of Gemini directly
into the terminal. It is designed to be a terminal-first, extensible, and
powerful tool for developers.
## Project Overview
- **Purpose:** Provide a seamless terminal interface for Gemini models,
supporting code understanding, generation, automation, and integration via MCP
(Model Context Protocol).
- **Main Technologies:**
- **Runtime:** Node.js (>=20.0.0, recommended ~20.19.0 for development)
- **Language:** TypeScript
- **UI Framework:** React (using [Ink](https://github.com/vadimdemedes/ink)
for CLI rendering)
- **Testing:** Vitest
- **Bundling:** esbuild
- **Linting/Formatting:** ESLint, Prettier
- **Architecture:** Monorepo structure using npm workspaces.
- `packages/cli`: User-facing terminal UI, input processing, and display
rendering.
- `packages/core`: Backend logic, Gemini API orchestration, prompt
construction, and tool execution.
- `packages/a2a-server`: Experimental Agent-to-Agent server.
- `packages/sdk`: Programmatic SDK for embedding Gemini CLI capabilities.
- `packages/devtools`: Integrated developer tools (Network/Console inspector).
- `packages/test-utils`: Shared test utilities and test rig.
- `packages/vscode-ide-companion`: VS Code extension pairing with the CLI.
## Building and Running
- **Install Dependencies:** `npm install`
- **Build All:** `npm run build:all` (Builds packages, sandbox, and VS Code
companion)
- **Build Packages:** `npm run build`
- **Run in Development:** `npm run start`
- **Run in Debug Mode:** `npm run debug` (Enables Node.js inspector)
- **Bundle Project:** `npm run bundle`
- **Clean Artifacts:** `npm run clean`
## Testing and Quality
- **Test Commands:**
- **Unit (All):** `npm run test`
- **Integration (E2E):** `npm run test:e2e`
- **Workspace-Specific:** `npm test -w -- ` (Note: `` must
be relative to the workspace root, e.g.,
`-w @google/gemini-cli-core -- src/routing/modelRouterService.test.ts`)
- **Full Validation:** `npm run preflight` (Heaviest check; runs clean, install,
build, lint, type check, and tests. Recommended before submitting PRs. Due to
its long runtime, only run this at the very end of a code implementation task.
If it fails, use faster, targeted commands (e.g., `npm run test`,
`npm run lint`, or workspace-specific tests) to iterate on fixes before
re-running `preflight`. For simple, non-code changes like documentation or
prompting updates, skip `preflight` at the end of the task and wait for PR
validation.)
- **Individual Checks:** `npm run lint` / `npm run format` / `npm run typecheck`
## Development Conventions
- **Contributions:** Follow the process outlined in `CONTRIBUTING.md`. Requires
signing the Google CLA.
- **Pull Requests:** Keep PRs small, focused, and linked to an existing issue.
Always activate the `pr-creator` skill for PR generation, even when using the
`gh` CLI.
- **Commit Messages:** Follow the
[Conventional Commits](https://www.conventionalcommits.org/) standard.
- **Imports:** Use specific imports and avoid restricted relative imports
between packages (enforced by ESLint).
- **License Headers:** For all new source code files (`.ts`, `.tsx`, `.js`),
include the Apache-2.0 license header with the current year. (e.g.,
`Copyright 2026 Google LLC`). This is enforced by ESLint.
## Testing Conventions
- **Environment Variables:** When testing code that depends on environment
variables, use `vi.stubEnv('NAME', 'value')` in `beforeEach` and
`vi.unstubAllEnvs()` in `afterEach`. Avoid modifying `process.env` directly as
it can lead to test leakage and is less reliable. To "unset" a variable, use
an empty string `vi.stubEnv('NAME', '')`.
## Documentation
- Always use the `docs-writer` skill when you are asked to write, edit, or
review any documentation.
- Documentation is located in the `docs/` directory.
- Suggest documentation updates when code changes render existing documentation
obsolete or incomplete.
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: Makefile
================================================
# Makefile for gemini-cli
.PHONY: help install build build-sandbox build-all test lint format preflight clean start debug release run-npx create-alias
help:
@echo "Makefile for gemini-cli"
@echo ""
@echo "Usage:"
@echo " make install - Install npm dependencies"
@echo " make build - Build the main project"
@echo " make build-all - Build the main project and sandbox"
@echo " make test - Run the test suite"
@echo " make lint - Lint the code"
@echo " make format - Format the code"
@echo " make preflight - Run formatting, linting, and tests"
@echo " make clean - Remove generated files"
@echo " make start - Start the Gemini CLI"
@echo " make debug - Start the Gemini CLI in debug mode"
@echo ""
@echo " make run-npx - Run the CLI using npx (for testing the published package)"
@echo " make create-alias - Create a 'gemini' alias for your shell"
install:
npm install
build:
npm run build
build-all:
npm run build:all
test:
npm run test
lint:
npm run lint
format:
npm run format
preflight:
npm run preflight
clean:
npm run clean
start:
npm run start
debug:
npm run debug
run-npx:
npx https://github.com/google-gemini/gemini-cli
create-alias:
scripts/create_alias.sh
================================================
FILE: README.md
================================================
# Gemini CLI
[](https://github.com/google-gemini/gemini-cli/actions/workflows/ci.yml)
[](https://github.com/google-gemini/gemini-cli/actions/workflows/chained_e2e.yml)
[](https://www.npmjs.com/package/@google/gemini-cli)
[](https://github.com/google-gemini/gemini-cli/blob/main/LICENSE)
[](https://codewiki.google/github.com/google-gemini/gemini-cli?utm_source=badge&utm_medium=github&utm_campaign=github.com/google-gemini/gemini-cli)

Gemini CLI is an open-source AI agent that brings the power of Gemini directly
into your terminal. It provides lightweight access to Gemini, giving you the
most direct path from your prompt to our model.
Learn all about Gemini CLI in our [documentation](https://geminicli.com/docs/).
## 🚀 Why Gemini CLI?
- **🎯 Free tier**: 60 requests/min and 1,000 requests/day with personal Google
account.
- **🧠 Powerful Gemini 3 models**: Access to improved reasoning and 1M token
context window.
- **🔧 Built-in tools**: Google Search grounding, file operations, shell
commands, web fetching.
- **🔌 Extensible**: MCP (Model Context Protocol) support for custom
integrations.
- **💻 Terminal-first**: Designed for developers who live in the command line.
- **🛡️ Open source**: Apache 2.0 licensed.
## 📦 Installation
See
[Gemini CLI installation, execution, and releases](./docs/get-started/installation.md)
for recommended system specifications and a detailed installation guide.
### Quick Install
#### Run instantly with npx
```bash
# Using npx (no installation required)
npx @google/gemini-cli
```
#### Install globally with npm
```bash
npm install -g @google/gemini-cli
```
#### Install globally with Homebrew (macOS/Linux)
```bash
brew install gemini-cli
```
#### Install globally with MacPorts (macOS)
```bash
sudo port install gemini-cli
```
#### Install with Anaconda (for restricted environments)
```bash
# Create and activate a new environment
conda create -y -n gemini_env -c conda-forge nodejs
conda activate gemini_env
# Install Gemini CLI globally via npm (inside the environment)
npm install -g @google/gemini-cli
```
## Release Cadence and Tags
See [Releases](./docs/releases.md) for more details.
### Preview
New preview releases will be published each week at UTC 23:59 on Tuesdays. These
releases will not have been fully vetted and may contain regressions or other
outstanding issues. Please help us test and install with `preview` tag.
```bash
npm install -g @google/gemini-cli@preview
```
### Stable
- New stable releases will be published each week at UTC 20:00 on Tuesdays, this
will be the full promotion of last week's `preview` release + any bug fixes
and validations. Use `latest` tag.
```bash
npm install -g @google/gemini-cli@latest
```
### Nightly
- New releases will be published each day at UTC 00:00. This will be all changes
from the main branch as represented at time of release. It should be assumed
there are pending validations and issues. Use `nightly` tag.
```bash
npm install -g @google/gemini-cli@nightly
```
## 📋 Key Features
### Code Understanding & Generation
- Query and edit large codebases
- Generate new apps from PDFs, images, or sketches using multimodal capabilities
- Debug issues and troubleshoot with natural language
### Automation & Integration
- Automate operational tasks like querying pull requests or handling complex
rebases
- Use MCP servers to connect new capabilities, including
[media generation with Imagen, Veo or Lyria](https://github.com/GoogleCloudPlatform/vertex-ai-creative-studio/tree/main/experiments/mcp-genmedia)
- Run non-interactively in scripts for workflow automation
### Advanced Capabilities
- Ground your queries with built-in
[Google Search](https://ai.google.dev/gemini-api/docs/grounding) for real-time
information
- Conversation checkpointing to save and resume complex sessions
- Custom context files (GEMINI.md) to tailor behavior for your projects
### GitHub Integration
Integrate Gemini CLI directly into your GitHub workflows with
[**Gemini CLI GitHub Action**](https://github.com/google-github-actions/run-gemini-cli):
- **Pull Request Reviews**: Automated code review with contextual feedback and
suggestions
- **Issue Triage**: Automated labeling and prioritization of GitHub issues based
on content analysis
- **On-demand Assistance**: Mention `@gemini-cli` in issues and pull requests
for help with debugging, explanations, or task delegation
- **Custom Workflows**: Build automated, scheduled and on-demand workflows
tailored to your team's needs
## 🔐 Authentication Options
Choose the authentication method that best fits your needs:
### Option 1: Sign in with Google (OAuth login using your Google Account)
**✨ Best for:** Individual developers as well as anyone who has a Gemini Code
Assist License. (see
[quota limits and terms of service](https://cloud.google.com/gemini/docs/quotas)
for details)
**Benefits:**
- **Free tier**: 60 requests/min and 1,000 requests/day
- **Gemini 3 models** with 1M token context window
- **No API key management** - just sign in with your Google account
- **Automatic updates** to latest models
#### Start Gemini CLI, then choose _Sign in with Google_ and follow the browser authentication flow when prompted
```bash
gemini
```
#### If you are using a paid Code Assist License from your organization, remember to set the Google Cloud Project
```bash
# Set your Google Cloud Project
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
gemini
```
### Option 2: Gemini API Key
**✨ Best for:** Developers who need specific model control or paid tier access
**Benefits:**
- **Free tier**: 1000 requests/day with Gemini 3 (mix of flash and pro)
- **Model selection**: Choose specific Gemini models
- **Usage-based billing**: Upgrade for higher limits when needed
```bash
# Get your key from https://aistudio.google.com/apikey
export GEMINI_API_KEY="YOUR_API_KEY"
gemini
```
### Option 3: Vertex AI
**✨ Best for:** Enterprise teams and production workloads
**Benefits:**
- **Enterprise features**: Advanced security and compliance
- **Scalable**: Higher rate limits with billing account
- **Integration**: Works with existing Google Cloud infrastructure
```bash
# Get your key from Google Cloud Console
export GOOGLE_API_KEY="YOUR_API_KEY"
export GOOGLE_GENAI_USE_VERTEXAI=true
gemini
```
For Google Workspace accounts and other authentication methods, see the
[authentication guide](./docs/get-started/authentication.md).
## 🚀 Getting Started
### Basic Usage
#### Start in current directory
```bash
gemini
```
#### Include multiple directories
```bash
gemini --include-directories ../lib,../docs
```
#### Use specific model
```bash
gemini -m gemini-2.5-flash
```
#### Non-interactive mode for scripts
Get a simple text response:
```bash
gemini -p "Explain the architecture of this codebase"
```
For more advanced scripting, including how to parse JSON and handle errors, use
the `--output-format json` flag to get structured output:
```bash
gemini -p "Explain the architecture of this codebase" --output-format json
```
For real-time event streaming (useful for monitoring long-running operations),
use `--output-format stream-json` to get newline-delimited JSON events:
```bash
gemini -p "Run tests and deploy" --output-format stream-json
```
### Quick Examples
#### Start a new project
```bash
cd new-project/
gemini
> Write me a Discord bot that answers questions using a FAQ.md file I will provide
```
#### Analyze existing code
```bash
git clone https://github.com/google-gemini/gemini-cli
cd gemini-cli
gemini
> Give me a summary of all of the changes that went in yesterday
```
## 📚 Documentation
### Getting Started
- [**Quickstart Guide**](./docs/get-started/index.md) - Get up and running
quickly.
- [**Authentication Setup**](./docs/get-started/authentication.md) - Detailed
auth configuration.
- [**Configuration Guide**](./docs/reference/configuration.md) - Settings and
customization.
- [**Keyboard Shortcuts**](./docs/reference/keyboard-shortcuts.md) -
Productivity tips.
### Core Features
- [**Commands Reference**](./docs/reference/commands.md) - All slash commands
(`/help`, `/chat`, etc).
- [**Custom Commands**](./docs/cli/custom-commands.md) - Create your own
reusable commands.
- [**Context Files (GEMINI.md)**](./docs/cli/gemini-md.md) - Provide persistent
context to Gemini CLI.
- [**Checkpointing**](./docs/cli/checkpointing.md) - Save and resume
conversations.
- [**Token Caching**](./docs/cli/token-caching.md) - Optimize token usage.
### Tools & Extensions
- [**Built-in Tools Overview**](./docs/reference/tools.md)
- [File System Operations](./docs/tools/file-system.md)
- [Shell Commands](./docs/tools/shell.md)
- [Web Fetch & Search](./docs/tools/web-fetch.md)
- [**MCP Server Integration**](./docs/tools/mcp-server.md) - Extend with custom
tools.
- [**Custom Extensions**](./docs/extensions/index.md) - Build and share your own
commands.
### Advanced Topics
- [**Headless Mode (Scripting)**](./docs/cli/headless.md) - Use Gemini CLI in
automated workflows.
- [**IDE Integration**](./docs/ide-integration/index.md) - VS Code companion.
- [**Sandboxing & Security**](./docs/cli/sandbox.md) - Safe execution
environments.
- [**Trusted Folders**](./docs/cli/trusted-folders.md) - Control execution
policies by folder.
- [**Enterprise Guide**](./docs/cli/enterprise.md) - Deploy and manage in a
corporate environment.
- [**Telemetry & Monitoring**](./docs/cli/telemetry.md) - Usage tracking.
- [**Tools reference**](./docs/reference/tools.md) - Built-in tools overview.
- [**Local development**](./docs/local-development.md) - Local development
tooling.
### Troubleshooting & Support
- [**Troubleshooting Guide**](./docs/resources/troubleshooting.md) - Common
issues and solutions.
- [**FAQ**](./docs/resources/faq.md) - Frequently asked questions.
- Use `/bug` command to report issues directly from the CLI.
### Using MCP Servers
Configure MCP servers in `~/.gemini/settings.json` to extend Gemini CLI with
custom tools:
```text
> @github List my open pull requests
> @slack Send a summary of today's commits to #dev channel
> @database Run a query to find inactive users
```
See the [MCP Server Integration guide](./docs/tools/mcp-server.md) for setup
instructions.
## 🤝 Contributing
We welcome contributions! Gemini CLI is fully open source (Apache 2.0), and we
encourage the community to:
- Report bugs and suggest features.
- Improve documentation.
- Submit code improvements.
- Share your MCP servers and extensions.
See our [Contributing Guide](./CONTRIBUTING.md) for development setup, coding
standards, and how to submit pull requests.
Check our [Official Roadmap](https://github.com/orgs/google-gemini/projects/11)
for planned features and priorities.
## 📖 Resources
- **[Official Roadmap](./ROADMAP.md)** - See what's coming next.
- **[Changelog](./docs/changelogs/index.md)** - See recent notable updates.
- **[NPM Package](https://www.npmjs.com/package/@google/gemini-cli)** - Package
registry.
- **[GitHub Issues](https://github.com/google-gemini/gemini-cli/issues)** -
Report bugs or request features.
- **[Security Advisories](https://github.com/google-gemini/gemini-cli/security/advisories)** -
Security updates.
### Uninstall
See the [Uninstall Guide](./docs/resources/uninstall.md) for removal
instructions.
## 📄 Legal
- **License**: [Apache License 2.0](LICENSE)
- **Terms of Service**: [Terms & Privacy](./docs/resources/tos-privacy.md)
- **Security**: [Security Policy](SECURITY.md)
---
Built with ❤️ by Google and the open source community
================================================
FILE: ROADMAP.md
================================================
# Gemini CLI Roadmap
The
[Official Gemini CLI Roadmap](https://github.com/orgs/google-gemini/projects/11/)
Gemini CLI is an open-source AI agent that brings the power of Gemini directly
into your terminal. It provides lightweight access to Gemini, giving you the
most direct path from your prompt to our model.
This document outlines our approach to the Gemini CLI roadmap. Here, you'll find
our guiding principles and a breakdown of the key areas we are focused on for
development. Our roadmap is not a static list but a dynamic set of priorities
that are tracked live in our GitHub Issues.
As an
[Apache 2.0 open source project](https://github.com/google-gemini/gemini-cli?tab=Apache-2.0-1-ov-file#readme),
we appreciate and welcome
[public contributions](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md),
and will give first priority to those contributions aligned with our roadmap. If
you want to propose a new feature or change to our roadmap, please start by
[opening an issue for discussion](https://github.com/google-gemini/gemini-cli/issues/new/choose).
## Disclaimer
This roadmap represents our current thinking and is for informational purposes
only. It is not a commitment or a guarantee of future delivery. The development,
release, and timing of any features are subject to change, and we may update the
roadmap based on community discussions as well as when our priorities evolve.
## Guiding Principles
Our development is guided by the following principles:
- **Power & Simplicity:** Deliver access to state-of-the-art Gemini models with
an intuitive and easy-to-use lightweight command-line interface.
- **Extensibility:** An adaptable agent to help you with a variety of use cases
and environments along with the ability to run these agents anywhere.
- **Intelligent:** Gemini CLI should be reliably ranked among the best agentic
tools as measured by benchmarks like SWE Bench, Terminal Bench, and CSAT.
- **Free and Open Source:** Foster a thriving open source community where cost
isn’t a barrier to personal use, and PRs get merged quickly. This means
resolving and closing issues, pull requests, and discussion posts quickly.
## How the Roadmap Works
Our roadmap is managed directly through GitHub Issues. See our entry point
Roadmap Issue [here](https://github.com/google-gemini/gemini-cli/issues/4191).
This approach allows for transparency and gives you a direct way to learn more
or get involved with any specific initiative. All our roadmap items will be
tagged as Type:`Feature` and Label:`maintainer` for features we are actively
working on, or Type:`Task` and Label:`maintainer` for a more detailed list of
tasks.
Issues are organized to provide key information at a glance:
- **Target Quarter:** `Milestone` denotes the anticipated delivery timeline.
- **Feature Area:** Labels such as `area/model` or `area/tooling` categorize the
work.
- **Issue Type:** _Workstream_ => _Epics_ => _Features_ => _Tasks|Bugs_
To see what we're working on, you can filter our issues by these dimensions. See
all our items [here](https://github.com/orgs/google-gemini/projects/11/views/19)
## Focus Areas
To better organize our efforts, we categorize our work into several key feature
areas. These labels are used on our GitHub Issues to help you filter and find
initiatives that interest you.
- **Authentication:** Secure user access via API keys, Gemini Code Assist login,
etc.
- **Model:** Support new Gemini models, multi-modality, local execution, and
performance tuning.
- **User Experience:** Improve the CLI's usability, performance, interactive
features, and documentation.
- **Tooling:** Built-in tools and the MCP ecosystem.
- **Core:** Core functionality of the CLI
- **Extensibility:** Bringing Gemini CLI to other surfaces e.g. GitHub.
- **Contribution:** Improve the contribution process via test automation and
CI/CD pipeline enhancements.
- **Platform:** Manage installation, OS support, and the underlying CLI
framework.
- **Quality:** Focus on testing, reliability, performance, and overall product
quality.
- **Background Agents:** Enable long-running, autonomous tasks and proactive
assistance.
- **Security and Privacy:** For all things related to security and privacy
## How to Contribute
Gemini CLI is an open-source project, and we welcome contributions from the
community! Whether you're a developer, a designer, or just an enthusiastic user
you can find our
[Community Guidelines here](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md)
to learn how to get started. There are many ways to get involved:
- **Roadmap:** Please review and find areas in our
[roadmap](https://github.com/google-gemini/gemini-cli/issues/4191) that you
would like to contribute to. Contributions based on this will be easiest to
integrate with.
- **Report Bugs:** If you find an issue, please create a
[bug](https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml)
with as much detail as possible. If you believe it is a critical breaking
issue preventing direct CLI usage, please tag it as `priority/p0`.
- **Suggest Features:** Have a great idea? We'd love to hear it! Open a
[feature request](https://github.com/google-gemini/gemini-cli/issues/new?template=feature_request.yml).
- **Contribute Code:** Check out our
[CONTRIBUTING.md](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md)
file for guidelines on how to submit pull requests. We have a list of "good
first issues" for new contributors.
- **Write Documentation:** Help us improve our documentation, tutorials, and
examples. We are excited about the future of Gemini CLI and look forward to
building it with you!
================================================
FILE: SECURITY.md
================================================
# Reporting Security Issues
To report a security issue, please use [https://g.co/vulnz](https://g.co/vulnz).
We use g.co/vulnz for our intake, and do coordination and disclosure here on
GitHub (including using GitHub Security Advisory). The Google Security Team will
respond within 5 working days of your report on g.co/vulnz.
[GitHub Security Advisory]:
https://github.com/google-gemini/gemini-cli/security/advisories
================================================
FILE: docs/admin/enterprise-controls.md
================================================
# Enterprise Admin Controls
Gemini CLI empowers enterprise administrators to manage and enforce security
policies and configuration settings across their entire organization. Secure
defaults are enabled automatically for all enterprise users, but can be
customized via the [Management Console](https://goo.gle/manage-gemini-cli).
**Enterprise Admin Controls are enforced globally and cannot be overridden by
users locally**, ensuring a consistent security posture.
## Admin Controls vs. System Settings
While [System-wide settings](../cli/settings.md) act as convenient configuration
overrides, they can still be modified by users with sufficient privileges. In
contrast, admin controls are immutable at the local level, making them the
preferred method for enforcing policy.
## Available Controls
### Strict Mode
**Enabled/Disabled** | Default: enabled
If enabled, users will not be able to enter yolo mode.
### Extensions
**Enabled/Disabled** | Default: disabled
If disabled, users will not be able to use or install extensions. See
[Extensions](../extensions/index.md) for more details.
### MCP
#### Enabled/Disabled
**Enabled/Disabled** | Default: disabled
If disabled, users will not be able to use MCP servers. See
[MCP Server Integration](../tools/mcp-server.md) for more details.
#### MCP Servers (preview)
**Default**: empty
Allows administrators to define an explicit allowlist of MCP servers. This
guarantees that users can only connect to trusted MCP servers defined by the
organization.
**Allowlist Format:**
```json
{
"mcpServers": {
"external-provider": {
"url": "https://api.mcp-provider.com",
"type": "sse",
"trust": true,
"includeTools": ["toolA", "toolB"],
"excludeTools": []
},
"internal-corp-tool": {
"url": "https://mcp.internal-tool.corp",
"type": "http",
"includeTools": [],
"excludeTools": ["adminTool"]
}
}
}
```
**Supported Fields:**
- `url`: (Required) The full URL of the MCP server endpoint.
- `type`: (Required) The connection type (e.g., `sse` or `http`).
- `trust`: (Optional) If set to `true`, the server is trusted and tool execution
will not require user approval.
- `includeTools`: (Optional) An explicit list of tool names to allow. If
specified, only these tools will be available.
- `excludeTools`: (Optional) A list of tool names to hide. These tools will be
blocked.
**Client Enforcement Logic:**
- **Empty Allowlist**: If the admin allowlist is empty, the client uses the
user’s local configuration as is (unless the MCP toggle above is disabled).
- **Active Allowlist**: If the allowlist contains one or more servers, **all
locally configured servers not present in the allowlist are ignored**.
- **Configuration Merging**: For a server to be active, it must exist in
**both** the admin allowlist and the user’s local configuration (matched by
name). The client merges these definitions as follows:
- **Override Fields**: The `url`, `type`, & `trust` are always taken from the
admin allowlist, overriding any local values.
- **Tools Filtering**: If `includeTools` or `excludeTools` are defined in the
allowlist, the admin’s rules are used exclusively. If both are undefined in
the admin allowlist, the client falls back to the user’s local tool
settings.
- **Cleared Fields**: To ensure security and consistency, the client
automatically clears local execution fields (`command`, `args`, `env`,
`cwd`, `httpUrl`, `tcp`). This prevents users from overriding the connection
method.
- **Other Fields**: All other MCP fields are pulled from the user’s local
configuration.
- **Missing Allowlisted Servers**: If a server appears in the admin allowlist
but is missing from the local configuration, it will not be initialized. This
ensures users maintain final control over which permitted servers are actually
active in their environment.
#### Required MCP Servers (preview)
**Default**: empty
Allows administrators to define MCP servers that are **always injected** into
the user's environment. Unlike the allowlist (which filters user-configured
servers), required servers are automatically added regardless of the user's
local configuration.
**Required Servers Format:**
```json
{
"requiredMcpServers": {
"corp-compliance-tool": {
"url": "https://mcp.corp/compliance",
"type": "http",
"trust": true,
"description": "Corporate compliance tool"
},
"internal-registry": {
"url": "https://registry.corp/mcp",
"type": "sse",
"authProviderType": "google_credentials",
"oauth": {
"scopes": ["https://www.googleapis.com/auth/scope"]
}
}
}
}
```
**Supported Fields:**
- `url`: (Required) The full URL of the MCP server endpoint.
- `type`: (Required) The connection type (`sse` or `http`).
- `trust`: (Optional) If set to `true`, tool execution will not require user
approval. Defaults to `true` for required servers.
- `description`: (Optional) Human-readable description of the server.
- `authProviderType`: (Optional) Authentication provider (`dynamic_discovery`,
`google_credentials`, or `service_account_impersonation`).
- `oauth`: (Optional) OAuth configuration including `scopes`, `clientId`, and
`clientSecret`.
- `targetAudience`: (Optional) OAuth target audience for service-to-service
auth.
- `targetServiceAccount`: (Optional) Service account email to impersonate.
- `headers`: (Optional) Additional HTTP headers to send with requests.
- `includeTools` / `excludeTools`: (Optional) Tool filtering lists.
- `timeout`: (Optional) Timeout in milliseconds for MCP requests.
**Client Enforcement Logic:**
- Required servers are injected **after** allowlist filtering, so they are
always available even if the allowlist is active.
- If a required server has the **same name** as a locally configured server, the
admin configuration **completely overrides** the local one.
- Required servers only support remote transports (`sse`, `http`). Local
execution fields (`command`, `args`, `env`, `cwd`) are not supported.
- Required servers can coexist with allowlisted servers — both features work
independently.
### Unmanaged Capabilities
**Enabled/Disabled** | Default: disabled
If disabled, users will not be able to use certain features. Currently, this
control disables Agent Skills. See [Agent Skills](../cli/skills.md) for more
details.
================================================
FILE: docs/changelogs/index.md
================================================
# Gemini CLI release notes
Gemini CLI has three major release channels: nightly, preview, and stable. For
most users, we recommend the stable release.
On this page, you can find information regarding the current releases and
announcements from each release.
For the full changelog, refer to
[Releases - google-gemini/gemini-cli](https://github.com/google-gemini/gemini-cli/releases)
on GitHub.
## Current releases
| Release channel | Notes |
| :-------------------- | :---------------------------------------------- |
| Nightly | Nightly release with the most recent changes. |
| [Preview](preview.md) | Experimental features ready for early feedback. |
| [Stable](latest.md) | Stable, recommended for general use. |
## Announcements: v0.34.0 - 2026-03-17
- **Plan Mode Enabled by Default:** Plan Mode is now enabled by default to help
you break down complex tasks and execute them systematically
([#21713](https://github.com/google-gemini/gemini-cli/pull/21713) by @jerop).
- **Sandboxing Enhancements:** We've added native gVisor (runsc) and
experimental LXC container sandboxing support for safer execution environments
([#21062](https://github.com/google-gemini/gemini-cli/pull/21062) by
@Zheyuan-Lin, [#20735](https://github.com/google-gemini/gemini-cli/pull/20735)
by @h30s).
## Announcements: v0.33.0 - 2026-03-11
- **Agent Architecture Enhancements:** Introduced HTTP authentication for A2A
remote agents and authenticated A2A agent card discovery
([#20510](https://github.com/google-gemini/gemini-cli/pull/20510) by
@SandyTao520, [#20622](https://github.com/google-gemini/gemini-cli/pull/20622)
by @SandyTao520).
- **Plan Mode Updates:** Expanded Plan Mode with built-in research subagents,
annotation support for feedback, and a new `copy` subcommand
([#20972](https://github.com/google-gemini/gemini-cli/pull/20972) by @Adib234,
[#20988](https://github.com/google-gemini/gemini-cli/pull/20988) by
@ruomengz).
- **CLI UX & Admin Controls:** Redesigned the header to be compact with an ASCII
icon, inverted context window display to show usage, and enabled a 30-day
default retention for chat history
([#18713](https://github.com/google-gemini/gemini-cli/pull/18713) by
@keithguerin, [#20853](https://github.com/google-gemini/gemini-cli/pull/20853)
by @skeshive).
## Announcements: v0.32.0 - 2026-03-03
- **Generalist Agent:** The generalist agent is now enabled to improve task
delegation and routing
([#19665](https://github.com/google-gemini/gemini-cli/pull/19665) by
@joshualitt).
- **Model Steering in Workspace:** Added support for model steering directly in
the workspace
([#20343](https://github.com/google-gemini/gemini-cli/pull/20343) by
@joshualitt).
- **Plan Mode Enhancements:** Users can now open and modify plans in an external
editor, and the planning workflow has been adapted to handle complex tasks
more effectively with multi-select options
([#20348](https://github.com/google-gemini/gemini-cli/pull/20348) by @Adib234,
[#20465](https://github.com/google-gemini/gemini-cli/pull/20465) by @jerop).
- **Interactive Shell Autocompletion:** Introduced interactive shell
autocompletion for a more seamless experience
([#20082](https://github.com/google-gemini/gemini-cli/pull/20082) by
@mrpmohiburrahman).
- **Parallel Extension Loading:** Extensions are now loaded in parallel to
improve startup times
([#20229](https://github.com/google-gemini/gemini-cli/pull/20229) by
@scidomino).
## Announcements: v0.31.0 - 2026-02-27
- **Gemini 3.1 Pro Preview:** Gemini CLI now supports the new Gemini 3.1 Pro
Preview model
([#19676](https://github.com/google-gemini/gemini-cli/pull/19676) by
@sehoon38).
- **Experimental Browser Agent:** We've introduced a new experimental browser
agent to interact with web pages
([#19284](https://github.com/google-gemini/gemini-cli/pull/19284) by
@gsquared94).
- **Policy Engine Updates:** The policy engine now supports project-level
policies, MCP server wildcards, and tool annotation matching
([#18682](https://github.com/google-gemini/gemini-cli/pull/18682) by
@Abhijit-2592,
[#20024](https://github.com/google-gemini/gemini-cli/pull/20024) by @jerop).
- **Web Fetch Improvements:** We've implemented an experimental direct web fetch
feature and added rate limiting to mitigate DDoS risks
([#19557](https://github.com/google-gemini/gemini-cli/pull/19557) by @mbleigh,
[#19567](https://github.com/google-gemini/gemini-cli/pull/19567) by
@mattKorwel).
## Announcements: v0.30.0 - 2026-02-25
- **SDK & Custom Skills:** Introduced the initial SDK package, enabling dynamic
system instructions, `SessionContext` for SDK tool calls, and support for
custom skills
([#18861](https://github.com/google-gemini/gemini-cli/pull/18861) by
@mbleigh).
- **Policy Engine Enhancements:** Added a new `--policy` flag for user-defined
policies, introduced strict seatbelt profiles, and deprecated
`--allowed-tools` in favor of the policy engine
([#18500](https://github.com/google-gemini/gemini-cli/pull/18500) by
@allenhutchison).
- **UI & Themes:** Added a generic searchable list for settings and extensions,
new Solarized themes, text wrapping for markdown tables, and a clean UI toggle
prototype ([#19064](https://github.com/google-gemini/gemini-cli/pull/19064) by
@rmedranollamas).
- **Vim & Terminal Interaction:** Improved Vim support to feel more complete and
added support for Ctrl-Z terminal suspension
([#18755](https://github.com/google-gemini/gemini-cli/pull/18755) by
@ppgranger, [#18931](https://github.com/google-gemini/gemini-cli/pull/18931)
by @scidomino).
## Announcements: v0.29.0 - 2026-02-17
- **Plan Mode:** A new comprehensive planning capability with `/plan`,
`enter_plan_mode` tool, and dedicated documentation
([#17698](https://github.com/google-gemini/gemini-cli/pull/17698) by @Adib234,
[#18324](https://github.com/google-gemini/gemini-cli/pull/18324) by @jerop).
- **Gemini 3 Default:** We've removed the preview flag and enabled Gemini 3 by
default for all users
([#18414](https://github.com/google-gemini/gemini-cli/pull/18414) by
@sehoon38).
- **Extension Exploration:** New UI and settings to explore and manage
extensions more easily
([#18686](https://github.com/google-gemini/gemini-cli/pull/18686) by
@sripasg).
- **Admin Control:** Administrators can now allowlist specific MCP server
configurations
([#18311](https://github.com/google-gemini/gemini-cli/pull/18311) by
@skeshive).
## Announcements: v0.28.0 - 2026-02-10
- **IDE Support:** Gemini CLI now supports the Positron IDE
([#15047](https://github.com/google-gemini/gemini-cli/pull/15047) by
@kapsner).
- **Customization:** You can now use custom themes in extensions, and we've
implemented automatic theme switching based on your terminal's background
([#17327](https://github.com/google-gemini/gemini-cli/pull/17327) by
@spencer426, [#17976](https://github.com/google-gemini/gemini-cli/pull/17976)
by @Abhijit-2592).
- **Authentication:** We've added interactive and non-interactive consent for
OAuth, and you can now include your auth method in bug reports
([#17699](https://github.com/google-gemini/gemini-cli/pull/17699) by
@ehedlund, [#17569](https://github.com/google-gemini/gemini-cli/pull/17569) by
@erikus).
## Announcements: v0.27.0 - 2026-02-03
- **Event-Driven Architecture:** The CLI now uses a new event-driven scheduler
for tool execution, resulting in a more responsive and performant experience
([#17078](https://github.com/google-gemini/gemini-cli/pull/17078) by
@abhipatel12).
- **Enhanced User Experience:** This release includes queued tool confirmations,
and expandable large text pastes for a smoother workflow.
- **New `/rewind` Command:** Easily navigate your session history with the new
`/rewind` command
([#15720](https://github.com/google-gemini/gemini-cli/pull/15720) by
@Adib234).
- **Linux Clipboard Support:** You can now paste images on Linux with Wayland
and X11 ([#17144](https://github.com/google-gemini/gemini-cli/pull/17144) by
@devr0306).
## Announcements: v0.26.0 - 2026-01-27
- **Agents and Skills:** We've introduced a new `skill-creator` skill
([#16394](https://github.com/google-gemini/gemini-cli/pull/16394) by
@NTaylorMullen), enabled agent skills by default, and added a generalist agent
to improve task routing
([#16638](https://github.com/google-gemini/gemini-cli/pull/16638) by
@joshualitt).
- **UI/UX Improvements:** You can now "Rewind" through your conversation history
([#15717](https://github.com/google-gemini/gemini-cli/pull/15717) by
@Adib234).
- **Core and Scheduler Refactoring:** The core scheduler has been significantly
refactored to improve performance and reliability
([#16895](https://github.com/google-gemini/gemini-cli/pull/16895) by
@abhipatel12), and numerous performance and stability fixes have been
included.
## Announcements: v0.25.0 - 2026-01-20
- **Skills and Agents Improvements:** We've enhanced the `activate_skill` tool,
added a new `pr-creator` skill
([#16232](https://github.com/google-gemini/gemini-cli/pull/16232) by
[@NTaylorMullen](https://github.com/NTaylorMullen)), enabled skills by
default, improved the `cli_help` agent
([#16100](https://github.com/google-gemini/gemini-cli/pull/16100) by
[@scidomino](https://github.com/scidomino)), and added a new `/agents refresh`
command ([#16204](https://github.com/google-gemini/gemini-cli/pull/16204) by
[@joshualitt](https://github.com/joshualitt)).
- **UI/UX Refinements:** You'll notice more transparent feedback for skills
([#15954](https://github.com/google-gemini/gemini-cli/pull/15954) by
[@NTaylorMullen](https://github.com/NTaylorMullen)), the ability to switch
focus between the shell and input with Tab
([#14332](https://github.com/google-gemini/gemini-cli/pull/14332) by
[@jacob314](https://github.com/jacob314)), and dynamic terminal tab titles
([#16378](https://github.com/google-gemini/gemini-cli/pull/16378) by
[@NTaylorMullen](https://github.com/NTaylorMullen)).
- **Core Functionality & Performance:** This release includes support for
built-in agent skills
([#16045](https://github.com/google-gemini/gemini-cli/pull/16045) by
[@NTaylorMullen](https://github.com/NTaylorMullen)), refined Gemini 3 system
instructions ([#16139](https://github.com/google-gemini/gemini-cli/pull/16139)
by [@NTaylorMullen](https://github.com/NTaylorMullen)), caching for ignore
instances to improve performance
([#16185](https://github.com/google-gemini/gemini-cli/pull/16185) by
[@EricRahm](https://github.com/EricRahm)), and enhanced retry mechanisms
([#16489](https://github.com/google-gemini/gemini-cli/pull/16489) by
[@sehoon38](https://github.com/sehoon38)).
- **Bug Fixes and Stability:** We've squashed numerous bugs across the CLI,
core, and workflows, addressing issues with subagent delegation, unicode
character crashes, and sticky header regressions.
## Announcements: v0.24.0 - 2026-01-14
- **Agent Skills:** We've introduced significant advancements in Agent Skills.
This includes initial documentation and tutorials to help you get started,
alongside enhanced support for remote agents, allowing for more distributed
and powerful automation within Gemini CLI.
([#15869](https://github.com/google-gemini/gemini-cli/pull/15869) by
[@NTaylorMullen](https://github.com/NTaylorMullen)),
([#16013](https://github.com/google-gemini/gemini-cli/pull/16013) by
[@adamweidman](https://github.com/adamweidman))
- **Improved UI/UX:** The user interface has received several updates, featuring
visual indicators for hook execution, a more refined display for settings, and
the ability to use the Tab key to effortlessly switch focus between the shell
and input areas.
([#15408](https://github.com/google-gemini/gemini-cli/pull/15408) by
[@abhipatel12](https://github.com/abhipatel12)),
([#14332](https://github.com/google-gemini/gemini-cli/pull/14332) by
[@galz10](https://github.com/galz10))
- **Enhanced Security:** Security has been a major focus, with default folder
trust now set to untrusted for increased safety. The Policy Engine has been
improved to allow specific modes in user and administrator policies, and
granular allowlisting for shell commands has been implemented, providing finer
control over tool execution.
([#15943](https://github.com/google-gemini/gemini-cli/pull/15943) by
[@galz10](https://github.com/galz10)),
([#15977](https://github.com/google-gemini/gemini-cli/pull/15977) by
[@NTaylorMullen](https://github.com/NTaylorMullen))
- **Core Functionality:** This release includes a mandatory MessageBus
injection, marking Phase 3 of a hard migration to a more robust internal
communication system. We've also added support for built-in skills with the
CLI itself, and enhanced model routing to effectively utilize subagents.
([#15776](https://github.com/google-gemini/gemini-cli/pull/15776) by
[@abhipatel12](https://github.com/abhipatel12)),
([#16300](https://github.com/google-gemini/gemini-cli/pull/16300) by
[@NTaylorMullen](https://github.com/NTaylorMullen))
- **Terminal Features:** Terminal interactions are more seamless with new
features like OSC 52 paste support, along with fixes for Windows clipboard
paste issues and general improvements to pasting in Windows terminals.
([#15336](https://github.com/google-gemini/gemini-cli/pull/15336) by
[@scidomino](https://github.com/scidomino)),
([#15932](https://github.com/google-gemini/gemini-cli/pull/15932) by
[@scidomino](https://github.com/scidomino))
- **New Commands:** To manage the new features, we've added several new
commands: `/agents refresh` to update agent configurations, `/skills reload`
to refresh skill definitions, and `/skills install/uninstall` for easier
management of your Agent Skills.
([#16204](https://github.com/google-gemini/gemini-cli/pull/16204) by
[@NTaylorMullen](https://github.com/NTaylorMullen)),
([#15865](https://github.com/google-gemini/gemini-cli/pull/15865) by
[@NTaylorMullen](https://github.com/NTaylorMullen)),
([#16377](https://github.com/google-gemini/gemini-cli/pull/16377) by
[@NTaylorMullen](https://github.com/NTaylorMullen))
## Announcements: v0.23.0 - 2026-01-07
- 🎉 **Experimental Agent Skills Support in Preview:** Gemini CLI now supports
[Agent Skills](https://agentskills.io/home) in our preview builds. This is an
early preview where we’re looking for feedback!
- Install Preview: `npm install -g @google/gemini-cli@preview`
- Enable in `/settings`
- Docs:
[https://geminicli.com/docs/cli/skills/](https://geminicli.com/docs/cli/skills/)
- **Gemini CLI wrapped:** Run `npx gemini-wrapped` to visualize your usage
stats, top models, languages, and more!
- **Windows clipboard image support:** Windows users can now paste images
directly from their clipboard into the CLI using `Alt`+`V`.
([pr](https://github.com/google-gemini/gemini-cli/pull/13997) by
[@sgeraldes](https://github.com/sgeraldes))
- **Terminal background color detection:** Automatically optimizes your
terminal's background color to select compatible themes and provide
accessibility warnings.
([pr](https://github.com/google-gemini/gemini-cli/pull/15132) by
[@jacob314](https://github.com/jacob314))
- **Session logout:** Use the new `/logout` command to instantly clear
credentials and reset your authentication state for seamless account
switching. ([pr](https://github.com/google-gemini/gemini-cli/pull/13383) by
[@CN-Scars](https://github.com/CN-Scars))
## Announcements: v0.22.0 - 2025-12-22
- 🎉**Free Tier + Gemini 3:** Free tier users now all have access to Gemini 3
Pro & Flash. Enable in `/settings` by toggling "Preview Features" to `true`.
- 🎉**Gemini CLI + Colab:** Gemini CLI is now pre-installed. Can be used
headlessly in notebook cells or interactively in the built-in terminal
([pic](https://imgur.com/a/G0Tn7vi))
- 🎉**Gemini CLI Extensions:**
- **Conductor:** Planning++, Gemini works with you to build out a detailed
plan, pull in extra details as needed, ultimately to give the LLM guardrails
with artifacts. Measure twice, implement once!
`gemini extensions install https://github.com/gemini-cli-extensions/conductor`
Blog:
[https://developers.googleblog.com/conductor-introducing-context-driven-development-for-gemini-cli/](https://developers.googleblog.com/conductor-introducing-context-driven-development-for-gemini-cli/)
- **Endor Labs:** Perform code analysis, vulnerability scanning, and
dependency checks using natural language.
`gemini extensions install https://github.com/endorlabs/gemini-extension`
## Announcements: v0.21.0 - 2025-12-15
- **⚡️⚡️⚡️ Gemini 3 Flash + Gemini CLI:** Better, faster and cheaper than 2.5
Pro - and in some scenarios better than 3 Pro! For paid tiers + free tier
users who were on the wait list enable **Preview Features** in `/settings.`
- For more information:
[Gemini 3 Flash is now available in Gemini CLI](https://developers.googleblog.com/gemini-3-flash-is-now-available-in-gemini-cli/).
- 🎉 Gemini CLI Extensions:
- Rill: Utilize natural language to analyze Rill data, enabling the
exploration of metrics and trends without the need for manual queries.
`gemini extensions install https://github.com/rilldata/rill-gemini-extension`
- Browserbase: Interact with web pages, take screenshots, extract information,
and perform automated actions with atomic precision.
`gemini extensions install https://github.com/browserbase/mcp-server-browserbase`
- Quota Visibility: The `/stats` command now displays quota information for all
available models, including those not used in the current session. (@sehoon38)
- Fuzzy Setting Search: Users can now quickly find settings using fuzzy search
within the settings dialog. (@sehoon38)
- MCP Resource Support: Users can now discover, view, and search through
resources using the @ command. (@MrLesk)
- Auto-execute Simple Slash Commands: Simple slash commands are now executed
immediately on enter. (@jackwotherspoon)
## Announcements: v0.20.0 - 2025-12-01
- **Multi-file Drag & Drop:** Users can now drag and drop multiple files into
the terminal, and the CLI will automatically prefix each valid path with `@`.
([pr](https://github.com/google-gemini/gemini-cli/pull/14832) by
[@jackwotherspoon](https://github.com/jackwotherspoon))
- **Persistent "Always Allow" Policies:** Users can now save "Always Allow"
decisions for tool executions, with granular control over specific shell
commands and multi-cloud platform tools.
([pr](https://github.com/google-gemini/gemini-cli/pull/14737) by
[@allenhutchison](https://github.com/allenhutchison))
## Announcements: v0.19.0 - 2025-11-24
- 🎉 **New extensions:**
- **Eleven Labs:** Create, play, manage your audio play tracks with the Eleven
Labs Gemini CLI extension:
`gemini extensions install https://github.com/elevenlabs/elevenlabs-mcp`
- **Zed integration:** Users can now leverage Gemini 3 within the Zed
integration after enabling "Preview Features" in their CLI’s `/settings`.
([pr](https://github.com/google-gemini/gemini-cli/pull/13398) by
[@benbrandt](https://github.com/benbrandt))
- **Interactive shell:**
- **Click-to-Focus:** When "Use Alternate Buffer" setting is enabled, users
can click within the embedded shell output to focus it for input.
([pr](https://github.com/google-gemini/gemini-cli/pull/13341) by
[@galz10](https://github.com/galz10))
- **Loading phrase:** Clearly indicates when the interactive shell is awaiting
user input. ([vid](https://imgur.com/a/kjK8bUK),
[pr](https://github.com/google-gemini/gemini-cli/pull/12535) by
[@jackwotherspoon](https://github.com/jackwotherspoon))
## Announcements: v0.18.0 - 2025-11-17
- 🎉 **New extensions:**
- **Google Workspace**: Integrate Gemini CLI with your Workspace data. Write
docs, build slides, chat with others or even get your calc on in sheets:
`gemini extensions install https://github.com/gemini-cli-extensions/workspace`
- Blog:
[https://allen.hutchison.org/2025/11/19/bringing-the-office-to-the-terminal/](https://allen.hutchison.org/2025/11/19/bringing-the-office-to-the-terminal/)
- **Redis:** Manage and search data in Redis with natural language:
`gemini extensions install https://github.com/redis/mcp-redis`
- **Anomalo:** Query your data warehouse table metadata and quality status
through commands and natural language:
`gemini extensions install https://github.com/datagravity-ai/anomalo-gemini-extension`
- **Experimental permission improvements:** We are now experimenting with a new
policy engine in Gemini CLI. This allows users and administrators to create
fine-grained policy for tool calls. Currently behind a flag. See
[policy engine documentation](../reference/policy-engine.md) for more
information.
- Blog:
[https://allen.hutchison.org/2025/11/26/the-guardrails-of-autonomy/](https://allen.hutchison.org/2025/11/26/the-guardrails-of-autonomy/)
- **Gemini 3 support for paid:** Gemini 3 support has been rolled out to all API
key, Google AI Pro or Google AI Ultra (for individuals, not businesses) and
Gemini Code Assist Enterprise users. Enable it via `/settings` and toggling on
**Preview Features**.
- **Updated UI rollback:** We’ve temporarily rolled back our updated UI to give
it more time to bake. This means for a time you won’t have embedded scrolling
or mouse support. You can re-enable with `/settings` -> **Use Alternate Screen
Buffer** -> `true`.
- **Model in history:** Users can now toggle in `/settings` to display model in
their chat history. ([gif](https://imgur.com/a/uEmNKnQ),
[pr](https://github.com/google-gemini/gemini-cli/pull/13034) by
[@scidomino](https://github.com/scidomino))
- **Multi-uninstall:** Users can now uninstall multiple extensions with a single
command. ([pic](https://imgur.com/a/9Dtq8u2),
[pr](https://github.com/google-gemini/gemini-cli/pull/13016) by
[@JayadityaGit](https://github.com/JayadityaGit))
## Announcements: v0.16.0 - 2025-11-10
- **Gemini 3 + Gemini CLI:** launch 🚀🚀🚀
- **Data Commons Gemini CLI Extension** - A new Data Commons Gemini CLI
extension that lets you query open-source statistical data from
datacommons.org. **To get started, you'll need a Data Commons API key and uv
installed**. These and other details to get you started with the extension can
be found at
[https://github.com/gemini-cli-extensions/datacommons](https://github.com/gemini-cli-extensions/datacommons).
## Announcements: v0.15.0 - 2025-11-03
- **🎉 Seamless scrollable UI and mouse support:** We’ve given Gemini CLI a
major facelift to make your terminal experience smoother and much more
polished. You now get a flicker-free display with sticky headers that keep
important context visible and a stable input prompt that doesn't jump around.
We even added mouse support so you can click right where you need to type!
([gif](https://imgur.com/a/O6qc7bx),
[@jacob314](https://github.com/jacob314)).
- **Announcement:**
[https://developers.googleblog.com/en/making-the-terminal-beautiful-one-pixel-at-a-time/](https://developers.googleblog.com/en/making-the-terminal-beautiful-one-pixel-at-a-time/)
- **🎉 New partner extensions:**
- **Arize:** Seamlessly instrument AI applications with Arize AX and grant
direct access to Arize support:
`gemini extensions install https://github.com/Arize-ai/arize-tracing-assistant`
- **Chronosphere:** Retrieve logs, metrics, traces, events, and specific
entities:
`gemini extensions install https://github.com/chronosphereio/chronosphere-mcp`
- **Transmit:** Comprehensive context, validation, and automated fixes for
creating production-ready authentication and identity workflows:
`gemini extensions install https://github.com/TransmitSecurity/transmit-security-journey-builder`
- **Todo planning:** Complex questions now get broken down into todo lists that
the model can manage and check off. ([gif](https://imgur.com/a/EGDfNlZ),
[pr](https://github.com/google-gemini/gemini-cli/pull/12905) by
[@anj-s](https://github.com/anj-s))
- **Disable GitHub extensions:** Users can now prevent the installation and
loading of extensions from GitHub.
([pr](https://github.com/google-gemini/gemini-cli/pull/12838) by
[@kevinjwang1](https://github.com/kevinjwang1)).
- **Extensions restart:** Users can now explicitly restart extensions using the
`/extensions restart` command.
([pr](https://github.com/google-gemini/gemini-cli/pull/12739) by
[@jakemac53](https://github.com/jakemac53)).
- **Better Angular support:** Angular workflows should now be more seamless
([pr](https://github.com/google-gemini/gemini-cli/pull/10252) by
[@MarkTechson](https://github.com/MarkTechson)).
- **Validate command:** Users can now check that local extensions are formatted
correctly. ([pr](https://github.com/google-gemini/gemini-cli/pull/12186) by
[@kevinjwang1](https://github.com/kevinjwang1)).
## Announcements: v0.12.0 - 2025-10-27

- **🎉 New partner extensions:**
- **🤗 Hugging Face extension:** Access the Hugging Face hub.
([gif](https://drive.google.com/file/d/1LEzIuSH6_igFXq96_tWev11svBNyPJEB/view?usp=sharing&resourcekey=0-LtPTzR1woh-rxGtfPzjjfg))
`gemini extensions install https://github.com/huggingface/hf-mcp-server`
- **Monday.com extension**: Analyze your sprints, update your task boards,
etc.
([gif](https://drive.google.com/file/d/1cO0g6kY1odiBIrZTaqu5ZakaGZaZgpQv/view?usp=sharing&resourcekey=0-xEr67SIjXmAXRe1PKy7Jlw))
`gemini extensions install https://github.com/mondaycom/mcp`
- **Data Commons extension:** Query public datasets or ground responses on
data from Data Commons
([gif](https://drive.google.com/file/d/1cuj-B-vmUkeJnoBXrO_Y1CuqphYc6p-O/view?usp=sharing&resourcekey=0-0adXCXDQEd91ZZW63HbW-Q)).
`gemini extensions install https://github.com/gemini-cli-extensions/datacommons`
- **Model selection:** Choose the Gemini model for your session with `/model`.
([pic](https://imgur.com/a/ABFcWWw),
[pr](https://github.com/google-gemini/gemini-cli/pull/8940) by
[@abhipatel12](https://github.com/abhipatel12)).
- **Model routing:** Gemini CLI will now intelligently pick the best model for
the task. Simple queries will be sent to Flash while complex analytical or
creative tasks will still use the power of Pro. This ensures your quota will
last for a longer period of time. You can always opt-out of this via `/model`.
([pr](https://github.com/google-gemini/gemini-cli/pull/9262) by
[@abhipatel12](https://github.com/abhipatel12)).
- Discussion:
[https://github.com/google-gemini/gemini-cli/discussions/12375](https://github.com/google-gemini/gemini-cli/discussions/12375)
- **Codebase investigator subagent:** We now have a new built-in subagent that
will explore your workspace and resolve relevant information to improve
overall performance.
([pr](https://github.com/google-gemini/gemini-cli/pull/9988) by
[@abhipatel12](https://github.com/abhipatel12),
[pr](https://github.com/google-gemini/gemini-cli/pull/10282) by
[@silviojr](https://github.com/silviojr)).
- Enable, disable, or limit turns in `/settings`, plus advanced configs in
`settings.json` ([pic](https://imgur.com/a/yJiggNO),
[pr](https://github.com/google-gemini/gemini-cli/pull/10844) by
[@silviojr](https://github.com/silviojr)).
- **Explore extensions with `/extension`:** Users can now open the extensions
page in their default browser directly from the CLI using the `/extension`
explore command. ([pr](https://github.com/google-gemini/gemini-cli/pull/11846)
by [@JayadityaGit](https://github.com/JayadityaGit)).
- **Configurable compression:** Users can modify the context compression
threshold in `/settings` (decimal with percentage display). The default has
been made more proactive
([pr](https://github.com/google-gemini/gemini-cli/pull/12317) by
[@scidomino](https://github.com/scidomino)).
- **API key authentication:** Users can now securely enter and store their
Gemini API key via a new dialog, eliminating the need for environment
variables and repeated entry.
([pr](https://github.com/google-gemini/gemini-cli/pull/11760) by
[@galz10](https://github.com/galz10)).
- **Sequential approval:** Users can now approve multiple tool calls
sequentially during execution.
([pr](https://github.com/google-gemini/gemini-cli/pull/11593) by
[@joshualitt](https://github.com/joshualitt)).
## Announcements: v0.11.0 - 2025-10-20

- 🎉 **Gemini CLI Jules Extension:** Use Gemini CLI to orchestrate Jules. Spawn
remote workers, delegate tedious tasks, or check in on running jobs!
- Install:
`gemini extensions install https://github.com/gemini-cli-extensions/jules`
- Announcement:
[https://developers.googleblog.com/en/introducing-the-jules-extension-for-gemini-cli/](https://developers.googleblog.com/en/introducing-the-jules-extension-for-gemini-cli/)
- **Stream JSON output:** Stream real-time JSONL events with
`--output-format stream-json` to monitor AI agent progress when run
headlessly. ([gif](https://imgur.com/a/0UCE81X),
[pr](https://github.com/google-gemini/gemini-cli/pull/10883) by
[@anj-s](https://github.com/anj-s))
- **Markdown toggle:** Users can now switch between rendered and raw markdown
display using `alt+m `or` ctrl+m`. ([gif](https://imgur.com/a/lDNdLqr),
[pr](https://github.com/google-gemini/gemini-cli/pull/10383) by
[@srivatsj](https://github.com/srivatsj))
- **Queued message editing:** Users can now quickly edit queued messages by
pressing the up arrow key when the input is empty.
([gif](https://imgur.com/a/ioRslLd),
[pr](https://github.com/google-gemini/gemini-cli/pull/10392) by
[@akhil29](https://github.com/akhil29))
- **JSON web fetch**: Non-HTML content like JSON APIs or raw source code are now
properly shown to the model (previously only supported HTML)
([gif](https://imgur.com/a/Q58U4qJ),
[pr](https://github.com/google-gemini/gemini-cli/pull/11284) by
[@abhipatel12](https://github.com/abhipatel12))
- **Non-interactive MCP commands:** Users can now run MCP slash commands in
non-interactive mode `gemini "/some-mcp-prompt"`.
([pr](https://github.com/google-gemini/gemini-cli/pull/10194) by
[@capachino](https://github.com/capachino))
- **Removal of deprecated flags:** We’ve finally removed a number of deprecated
flags to cleanup Gemini CLI’s invocation profile:
- `--all-files` / `-a` in favor of `@` from within Gemini CLI.
([pr](https://github.com/google-gemini/gemini-cli/pull/11228) by
[@allenhutchison](https://github.com/allenhutchison))
- `--telemetry-*` flags in favor of
[environment variables](https://github.com/google-gemini/gemini-cli/pull/11318)
([pr](https://github.com/google-gemini/gemini-cli/pull/11318) by
[@allenhutchison](https://github.com/allenhutchison))
## Announcements: v0.10.0 - 2025-10-13
- **Polish:** The team has been heads down bug fixing and investing heavily into
polishing existing flows, tools, and interactions.
- **Interactive Shell Tool calling:** Gemini CLI can now also execute
interactive tools if needed
([pr](https://github.com/google-gemini/gemini-cli/pull/11225) by
[@galz10](https://github.com/galz10)).
- **Alt+Key support:** Enables broader support for Alt+Key keyboard shortcuts
across different terminals.
([pr](https://github.com/google-gemini/gemini-cli/pull/10767) by
[@srivatsj](https://github.com/srivatsj)).
- **Telemetry Diff stats:** Track line changes made by the model and user during
file operations via OTEL.
([pr](https://github.com/google-gemini/gemini-cli/pull/10819) by
[@jerop](https://github.com/jerop)).
## Announcements: v0.9.0 - 2025-10-06
- 🎉 **Interactive Shell:** Run interactive commands like `vim`, `rebase -i`, or
even `gemini` 😎 directly in Gemini CLI:
- Blog:
[https://developers.googleblog.com/en/say-hello-to-a-new-level-of-interactivity-in-gemini-cli/](https://developers.googleblog.com/en/say-hello-to-a-new-level-of-interactivity-in-gemini-cli/)
- **Install pre-release extensions:** Install the latest `--pre-release`
versions of extensions. Used for when an extension’s release hasn’t been
marked as "latest".
([pr](https://github.com/google-gemini/gemini-cli/pull/10752) by
[@jakemac53](https://github.com/jakemac53))
- **Simplified extension creation:** Create a new, empty extension. Templates
are no longer required.
([pr](https://github.com/google-gemini/gemini-cli/pull/10629) by
[@chrstnb](https://github.com/chrstnb))
- **OpenTelemetry GenAI metrics:** Aligns telemetry with industry-standard
semantic conventions for improved interoperability.
([spec](https://opentelemetry.io/docs/concepts/semantic-conventions/),
[pr](https://github.com/google-gemini/gemini-cli/pull/10343) by
[@jerop](https://github.com/jerop))
- **List memory files:** Quickly find the location of your long-term memory
files with `/memory list`.
([pr](https://github.com/google-gemini/gemini-cli/pull/10108) by
[@sgnagnarella](https://github.com/sgnagnarella))
## Announcements: v0.8.0 - 2025-09-29
- 🎉 **Announcing Gemini CLI Extensions** 🎉
- Completely customize your Gemini CLI experience to fit your workflow.
- Build and share your own Gemini CLI extensions with the world.
- Launching with a growing catalog of community, partner, and Google-built
extensions.
- Check extensions from
[key launch partners](https://github.com/google-gemini/gemini-cli/discussions/10718).
- Easy install:
- `gemini extensions install `
- Easy management:
- `gemini extensions install|uninstall|link`
- `gemini extensions enable|disable`
- `gemini extensions list|update|new`
- Or use commands while running with `/extensions list|update`.
- Everything you need to know:
[Now open for building: Introducing Gemini CLI extensions](https://blog.google/technology/developers/gemini-cli-extensions/).
- 🎉 **Our New Home Page & Better Documentation** 🎉
- Check out our new home page for better getting started material, reference
documentation, extensions and more!
- _Homepage:_ [https://geminicli.com](https://geminicli.com)
- ‼️*NEW documentation:*
[https://geminicli.com/docs](https://geminicli.com/docs) (Have any
[suggestions](https://github.com/google-gemini/gemini-cli/discussions/8722)?)
- _Extensions:_
[https://geminicli.com/extensions](https://geminicli.com/extensions)
- **Non-Interactive Allowed Tools:** `--allowed-tools` will now also work in
non-interactive mode.
([pr](https://github.com/google-gemini/gemini-cli/pull/9114) by
[@mistergarrison](https://github.com/mistergarrison))
- **Terminal Title Status:** See the CLI's real-time status and thoughts
directly in the terminal window's title by setting `showStatusInTitle: true`.
([pr](https://github.com/google-gemini/gemini-cli/pull/4386) by
[@Fridayxiao](https://github.com/Fridayxiao))
- **Small features, polish, reliability & bug fixes:** A large amount of
changes, smaller features, UI updates, reliability and bug fixes + general
polish made it in this week!
## Announcements: v0.7.0 - 2025-09-22
- 🎉**Build your own Gemini CLI IDE plugin:** We've published a spec for
creating IDE plugins to enable rich context-aware experiences and native
in-editor diffing in your IDE of choice.
([pr](https://github.com/google-gemini/gemini-cli/pull/8479) by
[@skeshive](https://github.com/skeshive))
- 🎉 **Gemini CLI extensions**
- **Flutter:** An early version to help you create, build, test, and run
Flutter apps with Gemini CLI
([extension](https://github.com/gemini-cli-extensions/flutter))
- **nanobanana:** Integrate nanobanana into Gemini CLI
([extension](https://github.com/gemini-cli-extensions/nanobanana))
- **Telemetry config via environment:** Manage telemetry settings using
environment variables for a more flexible setup.
([docs](https://github.com/google-gemini/gemini-cli/blob/main/docs/cli/telemetry.md#configuration),
[pr](https://github.com/google-gemini/gemini-cli/pull/9113) by
[@jerop](https://github.com/jerop))
- **Experimental todos:** Track and display progress on complex tasks with a
managed checklist. Off by default but can be enabled via
`"useWriteTodos": true`
([pr](https://github.com/google-gemini/gemini-cli/pull/8761) by
[@anj-s](https://github.com/anj-s))
- **Share chat support for tools:** Using `/chat share` will now also render
function calls and responses in the final markdown file.
([pr](https://github.com/google-gemini/gemini-cli/pull/8693) by
[@rramkumar1](https://github.com/rramkumar1))
- **Citations:** Now enabled for all users
([pr](https://github.com/google-gemini/gemini-cli/pull/8570) by
[@scidomino](https://github.com/scidomino))
- **Custom commands in Headless Mode:** Run custom slash commands directly from
the command line in non-interactive mode: `gemini "/joke Chuck Norris"`
([pr](https://github.com/google-gemini/gemini-cli/pull/8305) by
[@capachino](https://github.com/capachino))
- **Small features, polish, reliability & bug fixes:** A large amount of
changes, smaller features, UI updates, reliability and bug fixes + general
polish made it in this week!
## Announcements: v0.6.0 - 2025-09-15
- 🎉 **Higher limits for Google AI Pro and Ultra subscribers:** We’re psyched to
finally announce that Google AI Pro and AI Ultra subscribers now get access to
significantly higher 2.5 quota limits for Gemini CLI!
- **Announcement:**
[https://blog.google/technology/developers/gemini-cli-code-assist-higher-limits/](https://blog.google/technology/developers/gemini-cli-code-assist-higher-limits/)
- 🎉**Gemini CLI Databases and BigQuery Extensions:** Connect Gemini CLI to all
of your cloud data with Gemini CLI.
- Announcement and how to get started with each of the below extensions:
[https://cloud.google.com/blog/products/databases/gemini-cli-extensions-for-google-data-cloud?e=48754805](https://cloud.google.com/blog/products/databases/gemini-cli-extensions-for-google-data-cloud?e=48754805)
- **AlloyDB:** Interact, manage and observe AlloyDB for PostgreSQL databases
([manage](https://github.com/gemini-cli-extensions/alloydb#configuration),
[observe](https://github.com/gemini-cli-extensions/alloydb-observability#configuration))
- **BigQuery:** Connect and query your BigQuery datasets or utilize a
sub-agent for contextual insights
([query](https://github.com/gemini-cli-extensions/bigquery-data-analytics#configuration),
[sub-agent](https://github.com/gemini-cli-extensions/bigquery-conversational-analytics))
- **Cloud SQL:** Interact, manage and observe Cloud SQL for PostgreSQL
([manage](https://github.com/gemini-cli-extensions/cloud-sql-postgresql#configuration),[ observe](https://github.com/gemini-cli-extensions/cloud-sql-postgresql-observability#configuration)),
Cloud SQL for MySQL
([manage](https://github.com/gemini-cli-extensions/cloud-sql-mysql#configuration),[ observe](https://github.com/gemini-cli-extensions/cloud-sql-mysql-observability#configuration))
and Cloud SQL for SQL Server
([manage](https://github.com/gemini-cli-extensions/cloud-sql-sqlserver#configuration),[ observe](https://github.com/gemini-cli-extensions/cloud-sql-sqlserver-observability#configuration))
databases.
- **Dataplex:** Discover, manage, and govern data and AI artifacts
([extension](https://github.com/gemini-cli-extensions/dataplex#configuration))
- **Firestore:** Interact with Firestore databases, collections and documents
([extension](https://github.com/gemini-cli-extensions/firestore-native#configuration))
- **Looker:** Query data, run Looks and create dashboards
([extension](https://github.com/gemini-cli-extensions/looker#configuration))
- **MySQL:** Interact with MySQL databases
([extension](https://github.com/gemini-cli-extensions/mysql#configuration))
- **Postgres:** Interact with PostgreSQL databases
([extension](https://github.com/gemini-cli-extensions/postgres#configuration))
- **Spanner:** Interact with Spanner databases
([extension](https://github.com/gemini-cli-extensions/spanner#configuration))
- **SQL Server:** Interact with SQL Server databases
([extension](https://github.com/gemini-cli-extensions/sql-server#configuration))
- **MCP Toolbox:** Configure and load custom tools for more than 30+ data
sources
([extension](https://github.com/gemini-cli-extensions/mcp-toolbox#configuration))
- **JSON output mode:** Have Gemini CLI output JSON with `--output-format json`
when invoked headlessly for easy parsing and post-processing. Includes
response, stats and errors.
([pr](https://github.com/google-gemini/gemini-cli/pull/8119) by
[@jerop](https://github.com/jerop))
- **Keybinding triggered approvals:** When you use shortcuts (`shift+y` or
`shift+tab`) to activate YOLO/auto-edit modes any pending confirmation dialogs
will now approve. ([pr](https://github.com/google-gemini/gemini-cli/pull/6665)
by [@bulkypanda](https://github.com/bulkypanda))
- **Chat sharing:** Convert the current conversation to a Markdown or JSON file
with _/chat share <file.md|file.json>_
([pr](https://github.com/google-gemini/gemini-cli/pull/8139) by
[@rramkumar1](https://github.com/rramkumar1))
- **Prompt search:** Search your prompt history using `ctrl+r`.
([pr](https://github.com/google-gemini/gemini-cli/pull/5539) by
[@Aisha630](https://github.com/Aisha630))
- **Input undo/redo:** Recover accidentally deleted text in the input prompt
using `ctrl+z` (undo) and `ctrl+shift+z` (redo).
([pr](https://github.com/google-gemini/gemini-cli/pull/4625) by
[@masiafrest](https://github.com/masiafrest))
- **Loop detection confirmation:** When loops are detected you are now presented
with a dialog to disable detection for the current session.
([pr](https://github.com/google-gemini/gemini-cli/pull/8231) by
[@SandyTao520](https://github.com/SandyTao520))
- **Direct to Google Cloud Telemetry:** Directly send telemetry to Google Cloud
for a simpler and more streamlined setup.
([pr](https://github.com/google-gemini/gemini-cli/pull/8541) by
[@jerop](https://github.com/jerop))
- **Visual Mode Indicator Revamp:** ‘shell’, 'accept edits' and 'yolo' modes now
have colors to match their impact / usage. Input box now also updates.
([shell](https://imgur.com/a/DovpVF1),
[accept-edits](https://imgur.com/a/33KDz3J),
[yolo](https://imgur.com/a/tbFwIWp),
[pr](https://github.com/google-gemini/gemini-cli/pull/8200) by
[@miguelsolorio](https://github.com/miguelsolorio))
- **Small features, polish, reliability & bug fixes:** A large amount of
changes, smaller features, UI updates, reliability and bug fixes + general
polish made it in this week!
## Announcements: v0.5.0 - 2025-09-08
- 🎉**FastMCP + Gemini CLI**🎉: Quickly install and manage your Gemini CLI MCP
servers with FastMCP ([video](https://imgur.com/a/m8QdCPh),
[pr](https://github.com/jlowin/fastmcp/pull/1709) by
[@jackwotherspoon](https://github.com/jackwotherspoon)**)**
- Getting started:
[https://gofastmcp.com/integrations/gemini-cli](https://gofastmcp.com/integrations/gemini-cli)
- **Positional Prompt for Non-Interactive:** Seamlessly invoke Gemini CLI
headlessly via `gemini "Hello"`. Synonymous with passing `-p`.
([gif](https://imgur.com/a/hcBznpB),
[pr](https://github.com/google-gemini/gemini-cli/pull/7668) by
[@allenhutchison](https://github.com/allenhutchison))
- **Experimental Tool output truncation:** Enable truncating shell tool outputs
and saving full output to a file by setting
`"enableToolOutputTruncation": true `([pr](https://github.com/google-gemini/gemini-cli/pull/8039)
by [@SandyTao520](https://github.com/SandyTao520))
- **Edit Tool improvements:** Gemini CLI’s ability to edit files should now be
far more capable. ([pr](https://github.com/google-gemini/gemini-cli/pull/7679)
by [@silviojr](https://github.com/silviojr))
- **Custom witty messages:** The feature you’ve all been waiting for…
Personalized witty loading messages via
`"ui": { "customWittyPhrases": ["YOLO"]}` in `settings.json`.
([pr](https://github.com/google-gemini/gemini-cli/pull/7641) by
[@JayadityaGit](https://github.com/JayadityaGit))
- **Nested .gitignore File Handling:** Nested `.gitignore` files are now
respected. ([pr](https://github.com/google-gemini/gemini-cli/pull/7645) by
[@gsquared94](https://github.com/gsquared94))
- **Enforced authentication:** System administrators can now mandate a specific
authentication method via
`"enforcedAuthType": "oauth-personal|gemini-api-key|…"`in `settings.json`.
([pr](https://github.com/google-gemini/gemini-cli/pull/6564) by
[@chrstnb](https://github.com/chrstnb))
- **A2A development-tool extension:** An RFC for an Agent2Agent
([A2A](https://a2a-protocol.org/latest/)) powered extension for developer tool
use cases.
([feedback](https://github.com/google-gemini/gemini-cli/discussions/7822),
[pr](https://github.com/google-gemini/gemini-cli/pull/7817) by
[@skeshive](https://github.com/skeshive))
- **Hands on Codelab:
**[https://codelabs.developers.google.com/gemini-cli-hands-on](https://codelabs.developers.google.com/gemini-cli-hands-on)
- **Small features, polish, reliability & bug fixes:** A large amount of
changes, smaller features, UI updates, reliability and bug fixes + general
polish made it in this week!
## Announcements: v0.4.0 - 2025-09-01
- 🎉**Gemini CLI CloudRun and Security Integrations**🎉: Automate app deployment
and security analysis with CloudRun and Security extension integrations. Once
installed deploy your app to the cloud with `/deploy` and find and fix
security vulnerabilities with `/security:analyze`.
- Announcement and how to get started:
[https://cloud.google.com/blog/products/ai-machine-learning/automate-app-deployment-and-security-analysis-with-new-gemini-cli-extensions](https://cloud.google.com/blog/products/ai-machine-learning/automate-app-deployment-and-security-analysis-with-new-gemini-cli-extensions)
- **Experimental**
- **Edit Tool:** Give our new edit tool a try by setting
`"useSmartEdit": true` in `settings.json`!
([feedback](https://github.com/google-gemini/gemini-cli/discussions/7758),
[pr](https://github.com/google-gemini/gemini-cli/pull/6823) by
[@silviojr](https://github.com/silviojr))
- **Model talking to itself fix:** We’ve removed a model workaround that would
encourage Gemini CLI to continue conversations on your behalf. This may be
disruptive and can be disabled via `"skipNextSpeakerCheck": false` in your
`settings.json`
([feedback](https://github.com/google-gemini/gemini-cli/discussions/6666),
[pr](https://github.com/google-gemini/gemini-cli/pull/7614) by
[@SandyTao520](https://github.com/SandyTao520))
- **Prompt completion:** Get real-time AI suggestions to complete your prompts
as you type. Enable it with `"general": { "enablePromptCompletion": true }`
and share your feedback!
([gif](https://miro.medium.com/v2/resize:fit:2000/format:webp/1*hvegW7YXOg6N_beUWhTdxA.gif),
[pr](https://github.com/google-gemini/gemini-cli/pull/4691) by
[@3ks](https://github.com/3ks))
- **Footer visibility configuration:** Customize the CLI's footer look and feel
in `settings.json`
([pr](https://github.com/google-gemini/gemini-cli/pull/7419) by
[@miguelsolorio](https://github.com/miguelsolorio))
- `hideCWD`: hide current working directory.
- `hideSandboxStatus`: hide sandbox status.
- `hideModelInfo`: hide current model information.
- `hideContextSummary`: hide request context summary.
- **Citations:** For enterprise Code Assist licenses users will now see
citations in their responses by default. Enable this yourself with
`"showCitations": true`
([pr](https://github.com/google-gemini/gemini-cli/pull/7350) by
[@scidomino](https://github.com/scidomino))
- **Pro Quota Dialog:** Handle daily Pro model usage limits with an interactive
dialog that lets you immediately switch auth or fallback.
([pr](https://github.com/google-gemini/gemini-cli/pull/7094) by
[@JayadityaGit](https://github.com/JayadityaGit))
- **Custom commands @:** Embed local file or directory content directly into
your custom command prompts using `@{path}` syntax
([gif](https://miro.medium.com/v2/resize:fit:2000/format:webp/1*GosBAo2SjMfFffAnzT7ZMg.gif),
[pr](https://github.com/google-gemini/gemini-cli/pull/6716) by
[@abhipatel12](https://github.com/abhipatel12))
- **2.5 Flash Lite support:** You can now use the `gemini-2.5-flash-lite` model
for Gemini CLI via `gemini -m …`.
([gif](https://miro.medium.com/v2/resize:fit:2000/format:webp/1*P4SKwnrsyBuULoHrFqsFKQ.gif),
[pr](https://github.com/google-gemini/gemini-cli/pull/4652) by
[@psinha40898](https://github.com/psinha40898))
- **CLI streamlining:** We have deprecated a number of command line arguments in
favor of `settings.json` alternatives. We will remove these arguments in a
future release. See the PR for the full list of deprecations.
([pr](https://github.com/google-gemini/gemini-cli/pull/7360) by
[@allenhutchison](https://github.com/allenhutchison))
- **JSON session summary:** Track and save detailed CLI session statistics to a
JSON file for performance analysis with `--session-summary `
([pr](https://github.com/google-gemini/gemini-cli/pull/7347) by
[@leehagoodjames](https://github.com/leehagoodjames))
- **Robust keyboard handling:** More reliable and consistent behavior for arrow
keys, special keys (Home, End, etc.), and modifier combinations across various
terminals. ([pr](https://github.com/google-gemini/gemini-cli/pull/7118) by
[@deepankarsharma](https://github.com/deepankarsharma))
- **MCP loading indicator:** Provides visual feedback during CLI initialization
when connecting to multiple servers.
([pr](https://github.com/google-gemini/gemini-cli/pull/6923) by
[@swissspidy](https://github.com/swissspidy))
- **Small features, polish, reliability & bug fixes:** A large amount of
changes, smaller features, UI updates, reliability and bug fixes + general
polish made it in this week!
================================================
FILE: docs/changelogs/latest.md
================================================
# Latest stable release: v0.34.0
Released: March 17, 2026
For most users, our latest stable release is the recommended release. Install
the latest stable version with:
```
npm install -g @google/gemini-cli
```
## Highlights
- **Plan Mode Enabled by Default**: The comprehensive planning capability is now
enabled by default, allowing for better structured task management and
execution.
- **Enhanced Sandboxing Capabilities**: Added support for native gVisor (runsc)
sandboxing as well as experimental LXC container sandboxing to provide more
robust and isolated execution environments.
- **Improved Loop Detection & Recovery**: Implemented iterative loop detection
and model feedback mechanisms to prevent the CLI from getting stuck in
repetitive actions.
- **Customizable UI Elements**: You can now configure a custom footer using the
new `/footer` command, and enjoy standardized semantic focus colors for better
history visibility.
- **Extensive Subagent Updates**: Refinements across the tracker visualization
tools, background process logging, and broader fallback support for models in
tool execution scenarios.
## What's Changed
- feat(cli): add chat resume footer on session quit by @lordshashank in
[#20667](https://github.com/google-gemini/gemini-cli/pull/20667)
- Support bold and other styles in svg snapshots by @jacob314 in
[#20937](https://github.com/google-gemini/gemini-cli/pull/20937)
- fix(core): increase A2A agent timeout to 30 minutes by @adamfweidman in
[#21028](https://github.com/google-gemini/gemini-cli/pull/21028)
- Cleanup old branches. by @jacob314 in
[#19354](https://github.com/google-gemini/gemini-cli/pull/19354)
- chore(release): bump version to 0.34.0-nightly.20260303.34f0c1538 by
@gemini-cli-robot in
[#21034](https://github.com/google-gemini/gemini-cli/pull/21034)
- feat(ui): standardize semantic focus colors and enhance history visibility by
@keithguerin in
[#20745](https://github.com/google-gemini/gemini-cli/pull/20745)
- fix: merge duplicate imports in packages/core (3/4) by @Nixxx19 in
[#20928](https://github.com/google-gemini/gemini-cli/pull/20928)
- Add extra safety checks for proto pollution by @jacob314 in
[#20396](https://github.com/google-gemini/gemini-cli/pull/20396)
- feat(core): Add tracker CRUD tools & visualization by @anj-s in
[#19489](https://github.com/google-gemini/gemini-cli/pull/19489)
- Revert "fix(ui): persist expansion in AskUser dialog when navigating options"
by @jacob314 in
[#21042](https://github.com/google-gemini/gemini-cli/pull/21042)
- Changelog for v0.33.0-preview.0 by @gemini-cli-robot in
[#21030](https://github.com/google-gemini/gemini-cli/pull/21030)
- fix: model persistence for all scenarios by @sripasg in
[#21051](https://github.com/google-gemini/gemini-cli/pull/21051)
- chore/release: bump version to 0.34.0-nightly.20260304.28af4e127 by
@gemini-cli-robot in
[#21054](https://github.com/google-gemini/gemini-cli/pull/21054)
- Consistently guard restarts against concurrent auto updates by @scidomino in
[#21016](https://github.com/google-gemini/gemini-cli/pull/21016)
- Defensive coding to reduce the risk of Maximum update depth errors by
@jacob314 in [#20940](https://github.com/google-gemini/gemini-cli/pull/20940)
- fix(cli): Polish shell autocomplete rendering to be a little more shell native
feeling. by @jacob314 in
[#20931](https://github.com/google-gemini/gemini-cli/pull/20931)
- Docs: Update plan mode docs by @jkcinouye in
[#19682](https://github.com/google-gemini/gemini-cli/pull/19682)
- fix(mcp): Notifications/tools/list_changed support not working by @jacob314 in
[#21050](https://github.com/google-gemini/gemini-cli/pull/21050)
- fix(cli): register extension lifecycle events in DebugProfiler by
@fayerman-source in
[#20101](https://github.com/google-gemini/gemini-cli/pull/20101)
- chore(dev): update vscode settings for typescriptreact by @rohit-4321 in
[#19907](https://github.com/google-gemini/gemini-cli/pull/19907)
- fix(cli): enable multi-arch docker builds for sandbox by @ru-aish in
[#19821](https://github.com/google-gemini/gemini-cli/pull/19821)
- Changelog for v0.32.0 by @gemini-cli-robot in
[#21033](https://github.com/google-gemini/gemini-cli/pull/21033)
- Changelog for v0.33.0-preview.1 by @gemini-cli-robot in
[#21058](https://github.com/google-gemini/gemini-cli/pull/21058)
- feat(core): improve @scripts/copy_files.js autocomplete to prioritize
filenames by @sehoon38 in
[#21064](https://github.com/google-gemini/gemini-cli/pull/21064)
- feat(sandbox): add experimental LXC container sandbox support by @h30s in
[#20735](https://github.com/google-gemini/gemini-cli/pull/20735)
- feat(evals): add overall pass rate row to eval nightly summary table by
@gundermanc in
[#20905](https://github.com/google-gemini/gemini-cli/pull/20905)
- feat(telemetry): include language in telemetry and fix accepted lines
computation by @gundermanc in
[#21126](https://github.com/google-gemini/gemini-cli/pull/21126)
- Changelog for v0.32.1 by @gemini-cli-robot in
[#21055](https://github.com/google-gemini/gemini-cli/pull/21055)
- feat(core): add robustness tests, logging, and metrics for CodeAssistServer
SSE parsing by @yunaseoul in
[#21013](https://github.com/google-gemini/gemini-cli/pull/21013)
- feat: add issue assignee workflow by @kartikangiras in
[#21003](https://github.com/google-gemini/gemini-cli/pull/21003)
- fix: improve error message when OAuth succeeds but project ID is required by
@Nixxx19 in [#21070](https://github.com/google-gemini/gemini-cli/pull/21070)
- feat(loop-reduction): implement iterative loop detection and model feedback by
@aishaneeshah in
[#20763](https://github.com/google-gemini/gemini-cli/pull/20763)
- chore(github): require prompt approvers for agent prompt files by @gundermanc
in [#20896](https://github.com/google-gemini/gemini-cli/pull/20896)
- Docs: Create tools reference by @jkcinouye in
[#19470](https://github.com/google-gemini/gemini-cli/pull/19470)
- fix(core, a2a-server): prevent hang during OAuth in non-interactive sessions
by @spencer426 in
[#21045](https://github.com/google-gemini/gemini-cli/pull/21045)
- chore(cli): enable deprecated settings removal by default by @yashodipmore in
[#20682](https://github.com/google-gemini/gemini-cli/pull/20682)
- feat(core): Disable fast ack helper for hints. by @joshualitt in
[#21011](https://github.com/google-gemini/gemini-cli/pull/21011)
- fix(ui): suppress redundant failure note when tool error note is shown by
@NTaylorMullen in
[#21078](https://github.com/google-gemini/gemini-cli/pull/21078)
- docs: document planning workflows with Conductor example by @jerop in
[#21166](https://github.com/google-gemini/gemini-cli/pull/21166)
- feat(release): ship esbuild bundle in npm package by @genneth in
[#19171](https://github.com/google-gemini/gemini-cli/pull/19171)
- fix(extensions): preserve symlinks in extension source path while enforcing
folder trust by @galz10 in
[#20867](https://github.com/google-gemini/gemini-cli/pull/20867)
- fix(cli): defer tool exclusions to policy engine in non-interactive mode by
@EricRahm in [#20639](https://github.com/google-gemini/gemini-cli/pull/20639)
- fix(ui): removed double padding on rendered content by @devr0306 in
[#21029](https://github.com/google-gemini/gemini-cli/pull/21029)
- fix(core): truncate excessively long lines in grep search output by
@gundermanc in
[#21147](https://github.com/google-gemini/gemini-cli/pull/21147)
- feat: add custom footer configuration via `/footer` by @jackwotherspoon in
[#19001](https://github.com/google-gemini/gemini-cli/pull/19001)
- perf(core): fix OOM crash in long-running sessions by @WizardsForgeGames in
[#19608](https://github.com/google-gemini/gemini-cli/pull/19608)
- refactor(cli): categorize built-in themes into dark/ and light/ directories by
@JayadityaGit in
[#18634](https://github.com/google-gemini/gemini-cli/pull/18634)
- fix(core): explicitly allow codebase_investigator and cli_help in read-only
mode by @Adib234 in
[#21157](https://github.com/google-gemini/gemini-cli/pull/21157)
- test: add browser agent integration tests by @kunal-10-cloud in
[#21151](https://github.com/google-gemini/gemini-cli/pull/21151)
- fix(cli): fix enabling kitty codes on Windows Terminal by @scidomino in
[#21136](https://github.com/google-gemini/gemini-cli/pull/21136)
- refactor(core): extract shared OAuth flow primitives from MCPOAuthProvider by
@SandyTao520 in
[#20895](https://github.com/google-gemini/gemini-cli/pull/20895)
- fix(ui): add partial output to cancelled shell UI by @devr0306 in
[#21178](https://github.com/google-gemini/gemini-cli/pull/21178)
- fix(cli): replace hardcoded keybinding strings with dynamic formatters by
@scidomino in [#21159](https://github.com/google-gemini/gemini-cli/pull/21159)
- DOCS: Update quota and pricing page by @g-samroberts in
[#21194](https://github.com/google-gemini/gemini-cli/pull/21194)
- feat(telemetry): implement Clearcut logging for startup statistics by
@yunaseoul in [#21172](https://github.com/google-gemini/gemini-cli/pull/21172)
- feat(triage): add area/documentation to issue triage by @g-samroberts in
[#21222](https://github.com/google-gemini/gemini-cli/pull/21222)
- Fix so shell calls are formatted by @jacob314 in
[#21237](https://github.com/google-gemini/gemini-cli/pull/21237)
- feat(cli): add native gVisor (runsc) sandboxing support by @Zheyuan-Lin in
[#21062](https://github.com/google-gemini/gemini-cli/pull/21062)
- docs: use absolute paths for internal links in plan-mode.md by @jerop in
[#21299](https://github.com/google-gemini/gemini-cli/pull/21299)
- fix(core): prevent unhandled AbortError crash during stream loop detection by
@7hokerz in [#21123](https://github.com/google-gemini/gemini-cli/pull/21123)
- fix:reorder env var redaction checks to scan values first by @kartikangiras in
[#21059](https://github.com/google-gemini/gemini-cli/pull/21059)
- fix(acp): rename --experimental-acp to --acp & remove Zed-specific refrences
by @skeshive in
[#21171](https://github.com/google-gemini/gemini-cli/pull/21171)
- feat(core): fallback to 2.5 models with no access for toolcalls by @sehoon38
in [#21283](https://github.com/google-gemini/gemini-cli/pull/21283)
- test(core): improve testing for API request/response parsing by @sehoon38 in
[#21227](https://github.com/google-gemini/gemini-cli/pull/21227)
- docs(links): update docs-writer skill and fix broken link by @g-samroberts in
[#21314](https://github.com/google-gemini/gemini-cli/pull/21314)
- Fix code colorizer ansi escape bug. by @jacob314 in
[#21321](https://github.com/google-gemini/gemini-cli/pull/21321)
- remove wildcard behavior on keybindings by @scidomino in
[#21315](https://github.com/google-gemini/gemini-cli/pull/21315)
- feat(acp): Add support for AI Gateway auth by @skeshive in
[#21305](https://github.com/google-gemini/gemini-cli/pull/21305)
- fix(theme): improve theme color contrast for macOS Terminal.app by @clocky in
[#21175](https://github.com/google-gemini/gemini-cli/pull/21175)
- feat (core): Implement tracker related SI changes by @anj-s in
[#19964](https://github.com/google-gemini/gemini-cli/pull/19964)
- Changelog for v0.33.0-preview.2 by @gemini-cli-robot in
[#21333](https://github.com/google-gemini/gemini-cli/pull/21333)
- Changelog for v0.33.0-preview.3 by @gemini-cli-robot in
[#21347](https://github.com/google-gemini/gemini-cli/pull/21347)
- docs: format release times as HH:MM UTC by @pavan-sh in
[#20726](https://github.com/google-gemini/gemini-cli/pull/20726)
- fix(cli): implement --all flag for extensions uninstall by @sehoon38 in
[#21319](https://github.com/google-gemini/gemini-cli/pull/21319)
- docs: fix incorrect relative links to command reference by @kanywst in
[#20964](https://github.com/google-gemini/gemini-cli/pull/20964)
- documentiong ensures ripgrep by @Jatin24062005 in
[#21298](https://github.com/google-gemini/gemini-cli/pull/21298)
- fix(core): handle AbortError thrown during processTurn by @MumuTW in
[#21296](https://github.com/google-gemini/gemini-cli/pull/21296)
- docs(cli): clarify ! command output visibility in shell commands tutorial by
@MohammedADev in
[#21041](https://github.com/google-gemini/gemini-cli/pull/21041)
- fix: logic for task tracker strategy and remove tracker tools by @anj-s in
[#21355](https://github.com/google-gemini/gemini-cli/pull/21355)
- fix(partUtils): display media type and size for inline data parts by @Aboudjem
in [#21358](https://github.com/google-gemini/gemini-cli/pull/21358)
- Fix(accessibility): add screen reader support to RewindViewer by @Famous077 in
[#20750](https://github.com/google-gemini/gemini-cli/pull/20750)
- fix(hooks): propagate stopHookActive in AfterAgent retry path (#20426) by
@Aarchi-07 in [#20439](https://github.com/google-gemini/gemini-cli/pull/20439)
- fix(core): deduplicate GEMINI.md files by device/inode on case-insensitive
filesystems (#19904) by @Nixxx19 in
[#19915](https://github.com/google-gemini/gemini-cli/pull/19915)
- feat(core): add concurrency safety guidance for subagent delegation (#17753)
by @abhipatel12 in
[#21278](https://github.com/google-gemini/gemini-cli/pull/21278)
- feat(ui): dynamically generate all keybinding hints by @scidomino in
[#21346](https://github.com/google-gemini/gemini-cli/pull/21346)
- feat(core): implement unified KeychainService and migrate token storage by
@ehedlund in [#21344](https://github.com/google-gemini/gemini-cli/pull/21344)
- fix(cli): gracefully handle --resume when no sessions exist by @SandyTao520 in
[#21429](https://github.com/google-gemini/gemini-cli/pull/21429)
- fix(plan): keep approved plan during chat compression by @ruomengz in
[#21284](https://github.com/google-gemini/gemini-cli/pull/21284)
- feat(core): implement generic CacheService and optimize setupUser by @sehoon38
in [#21374](https://github.com/google-gemini/gemini-cli/pull/21374)
- Update quota and pricing documentation with subscription tiers by @srithreepo
in [#21351](https://github.com/google-gemini/gemini-cli/pull/21351)
- fix(core): append correct OTLP paths for HTTP exporters by
@sebastien-prudhomme in
[#16836](https://github.com/google-gemini/gemini-cli/pull/16836)
- Changelog for v0.33.0-preview.4 by @gemini-cli-robot in
[#21354](https://github.com/google-gemini/gemini-cli/pull/21354)
- feat(cli): implement dot-prefixing for slash command conflicts by @ehedlund in
[#20979](https://github.com/google-gemini/gemini-cli/pull/20979)
- refactor(core): standardize MCP tool naming to mcp\_ FQN format by
@abhipatel12 in
[#21425](https://github.com/google-gemini/gemini-cli/pull/21425)
- feat(cli): hide gemma settings from display and mark as experimental by
@abhipatel12 in
[#21471](https://github.com/google-gemini/gemini-cli/pull/21471)
- feat(skills): refine string-reviewer guidelines and description by @clocky in
[#20368](https://github.com/google-gemini/gemini-cli/pull/20368)
- fix(core): whitelist TERM and COLORTERM in environment sanitization by
@deadsmash07 in
[#20514](https://github.com/google-gemini/gemini-cli/pull/20514)
- fix(billing): fix overage strategy lifecycle and settings integration by
@gsquared94 in
[#21236](https://github.com/google-gemini/gemini-cli/pull/21236)
- fix: expand paste placeholders in TextInput on submit by @Jefftree in
[#19946](https://github.com/google-gemini/gemini-cli/pull/19946)
- fix(core): add in-memory cache to ChatRecordingService to prevent OOM by
@SandyTao520 in
[#21502](https://github.com/google-gemini/gemini-cli/pull/21502)
- feat(cli): overhaul thinking UI by @keithguerin in
[#18725](https://github.com/google-gemini/gemini-cli/pull/18725)
- fix(ui): unify Ctrl+O expansion hint experience across buffer modes by
@jwhelangoog in
[#21474](https://github.com/google-gemini/gemini-cli/pull/21474)
- fix(cli): correct shell height reporting by @jacob314 in
[#21492](https://github.com/google-gemini/gemini-cli/pull/21492)
- Make test suite pass when the GEMINI_SYSTEM_MD env variable or
GEMINI_WRITE_SYSTEM_MD variable happens to be set locally/ by @jacob314 in
[#21480](https://github.com/google-gemini/gemini-cli/pull/21480)
- Disallow underspecified types by @gundermanc in
[#21485](https://github.com/google-gemini/gemini-cli/pull/21485)
- refactor(cli): standardize on 'reload' verb for all components by @keithguerin
in [#20654](https://github.com/google-gemini/gemini-cli/pull/20654)
- feat(cli): Invert quota language to 'percent used' by @keithguerin in
[#20100](https://github.com/google-gemini/gemini-cli/pull/20100)
- Docs: Add documentation for notifications (experimental)(macOS) by @jkcinouye
in [#21163](https://github.com/google-gemini/gemini-cli/pull/21163)
- Code review comments as a pr by @jacob314 in
[#21209](https://github.com/google-gemini/gemini-cli/pull/21209)
- feat(cli): unify /chat and /resume command UX by @LyalinDotCom in
[#20256](https://github.com/google-gemini/gemini-cli/pull/20256)
- docs: fix typo 'allowslisted' -> 'allowlisted' in mcp-server.md by
@Gyanranjan-Priyam in
[#21665](https://github.com/google-gemini/gemini-cli/pull/21665)
- fix(core): display actual graph output in tracker_visualize tool by @anj-s in
[#21455](https://github.com/google-gemini/gemini-cli/pull/21455)
- fix(core): sanitize SSE-corrupted JSON and domain strings in error
classification by @gsquared94 in
[#21702](https://github.com/google-gemini/gemini-cli/pull/21702)
- Docs: Make documentation links relative by @diodesign in
[#21490](https://github.com/google-gemini/gemini-cli/pull/21490)
- feat(cli): expose /tools desc as explicit subcommand for discoverability by
@aworki in [#21241](https://github.com/google-gemini/gemini-cli/pull/21241)
- feat(cli): add /compact alias for /compress command by @jackwotherspoon in
[#21711](https://github.com/google-gemini/gemini-cli/pull/21711)
- feat(plan): enable Plan Mode by default by @jerop in
[#21713](https://github.com/google-gemini/gemini-cli/pull/21713)
- feat(core): Introduce `AgentLoopContext`. by @joshualitt in
[#21198](https://github.com/google-gemini/gemini-cli/pull/21198)
- fix(core): resolve symlinks for non-existent paths during validation by
@Adib234 in [#21487](https://github.com/google-gemini/gemini-cli/pull/21487)
- docs: document tool exclusion from memory via deny policy by @Abhijit-2592 in
[#21428](https://github.com/google-gemini/gemini-cli/pull/21428)
- perf(core): cache loadApiKey to reduce redundant keychain access by @sehoon38
in [#21520](https://github.com/google-gemini/gemini-cli/pull/21520)
- feat(cli): implement /upgrade command by @sehoon38 in
[#21511](https://github.com/google-gemini/gemini-cli/pull/21511)
- Feat/browser agent progress emission by @kunal-10-cloud in
[#21218](https://github.com/google-gemini/gemini-cli/pull/21218)
- fix(settings): display objects as JSON instead of [object Object] by
@Zheyuan-Lin in
[#21458](https://github.com/google-gemini/gemini-cli/pull/21458)
- Unmarshall update by @DavidAPierce in
[#21721](https://github.com/google-gemini/gemini-cli/pull/21721)
- Update mcp's list function to check for disablement. by @DavidAPierce in
[#21148](https://github.com/google-gemini/gemini-cli/pull/21148)
- robustness(core): static checks to validate history is immutable by @jacob314
in [#21228](https://github.com/google-gemini/gemini-cli/pull/21228)
- refactor(cli): better react patterns for BaseSettingsDialog by @psinha40898 in
[#21206](https://github.com/google-gemini/gemini-cli/pull/21206)
- feat(security): implement robust IP validation and safeFetch foundation by
@alisa-alisa in
[#21401](https://github.com/google-gemini/gemini-cli/pull/21401)
- feat(core): improve subagent result display by @joshualitt in
[#20378](https://github.com/google-gemini/gemini-cli/pull/20378)
- docs: fix broken markdown syntax and anchor links in /tools by @campox747 in
[#20902](https://github.com/google-gemini/gemini-cli/pull/20902)
- feat(policy): support subagent-specific policies in TOML by @akh64bit in
[#21431](https://github.com/google-gemini/gemini-cli/pull/21431)
- Add script to speed up reviewing PRs adding a worktree. by @jacob314 in
[#21748](https://github.com/google-gemini/gemini-cli/pull/21748)
- fix(core): prevent infinite recursion in symlink resolution by @Adib234 in
[#21750](https://github.com/google-gemini/gemini-cli/pull/21750)
- fix(docs): fix headless mode docs by @ame2en in
[#21287](https://github.com/google-gemini/gemini-cli/pull/21287)
- feat/redesign header compact by @jacob314 in
[#20922](https://github.com/google-gemini/gemini-cli/pull/20922)
- refactor: migrate to useKeyMatchers hook by @scidomino in
[#21753](https://github.com/google-gemini/gemini-cli/pull/21753)
- perf(cli): cache loadSettings to reduce redundant disk I/O at startup by
@sehoon38 in [#21521](https://github.com/google-gemini/gemini-cli/pull/21521)
- fix(core): resolve Windows line ending and path separation bugs across CLI by
@muhammadusman586 in
[#21068](https://github.com/google-gemini/gemini-cli/pull/21068)
- docs: fix heading formatting in commands.md and phrasing in tools-api.md by
@campox747 in [#20679](https://github.com/google-gemini/gemini-cli/pull/20679)
- refactor(ui): unify keybinding infrastructure and support string
initialization by @scidomino in
[#21776](https://github.com/google-gemini/gemini-cli/pull/21776)
- Add support for updating extension sources and names by @chrstnb in
[#21715](https://github.com/google-gemini/gemini-cli/pull/21715)
- fix(core): handle GUI editor non-zero exit codes gracefully by @reyyanxahmed
in [#20376](https://github.com/google-gemini/gemini-cli/pull/20376)
- fix(core): destroy PTY on kill() and exception to prevent fd leak by @nbardy
in [#21693](https://github.com/google-gemini/gemini-cli/pull/21693)
- fix(docs): update theme screenshots and add missing themes by @ashmod in
[#20689](https://github.com/google-gemini/gemini-cli/pull/20689)
- refactor(cli): rename 'return' key to 'enter' internally by @scidomino in
[#21796](https://github.com/google-gemini/gemini-cli/pull/21796)
- build(release): restrict npm bundling to non-stable tags by @sehoon38 in
[#21821](https://github.com/google-gemini/gemini-cli/pull/21821)
- fix(core): override toolRegistry property for sub-agent schedulers by
@gsquared94 in
[#21766](https://github.com/google-gemini/gemini-cli/pull/21766)
- fix(cli): make footer items equally spaced by @jacob314 in
[#21843](https://github.com/google-gemini/gemini-cli/pull/21843)
- docs: clarify global policy rules application in plan mode by @jerop in
[#21864](https://github.com/google-gemini/gemini-cli/pull/21864)
- fix(core): ensure correct flash model steering in plan mode implementation
phase by @jerop in
[#21871](https://github.com/google-gemini/gemini-cli/pull/21871)
- fix(core): update @a2a-js/sdk to 0.3.11 by @adamfweidman in
[#21875](https://github.com/google-gemini/gemini-cli/pull/21875)
- refactor(core): improve API response error logging when retry by @yunaseoul in
[#21784](https://github.com/google-gemini/gemini-cli/pull/21784)
- fix(ui): handle headless execution in credits and upgrade dialogs by
@gsquared94 in
[#21850](https://github.com/google-gemini/gemini-cli/pull/21850)
- fix(core): treat retryable errors with >5 min delay as terminal quota errors
by @gsquared94 in
[#21881](https://github.com/google-gemini/gemini-cli/pull/21881)
- feat(telemetry): add specific PR, issue, and custom tracking IDs for GitHub
Actions by @cocosheng-g in
[#21129](https://github.com/google-gemini/gemini-cli/pull/21129)
- feat(core): add OAuth2 Authorization Code auth provider for A2A agents by
@SandyTao520 in
[#21496](https://github.com/google-gemini/gemini-cli/pull/21496)
- feat(cli): give visibility to /tools list command in the TUI and follow the
subcommand pattern of other commands by @JayadityaGit in
[#21213](https://github.com/google-gemini/gemini-cli/pull/21213)
- Handle dirty worktrees better and warn about running scripts/review.sh on
untrusted code. by @jacob314 in
[#21791](https://github.com/google-gemini/gemini-cli/pull/21791)
- feat(policy): support auto-add to policy by default and scoped persistence by
@spencer426 in
[#20361](https://github.com/google-gemini/gemini-cli/pull/20361)
- fix(core): handle AbortError when ESC cancels tool execution by @PrasannaPal21
in [#20863](https://github.com/google-gemini/gemini-cli/pull/20863)
- fix(release): Improve Patch Release Workflow Comments: Clearer Approval
Guidance by @jerop in
[#21894](https://github.com/google-gemini/gemini-cli/pull/21894)
- docs: clarify telemetry setup and comprehensive data map by @jerop in
[#21879](https://github.com/google-gemini/gemini-cli/pull/21879)
- feat(core): add per-model token usage to stream-json output by @yongruilin in
[#21839](https://github.com/google-gemini/gemini-cli/pull/21839)
- docs: remove experimental badge from plan mode in sidebar by @jerop in
[#21906](https://github.com/google-gemini/gemini-cli/pull/21906)
- fix(cli): prevent race condition in loop detection retry by @skyvanguard in
[#17916](https://github.com/google-gemini/gemini-cli/pull/17916)
- Add behavioral evals for tracker by @anj-s in
[#20069](https://github.com/google-gemini/gemini-cli/pull/20069)
- fix(auth): update terminology to 'sign in' and 'sign out' by @clocky in
[#20892](https://github.com/google-gemini/gemini-cli/pull/20892)
- docs(mcp): standardize mcp tool fqn documentation by @abhipatel12 in
[#21664](https://github.com/google-gemini/gemini-cli/pull/21664)
- fix(ui): prevent empty tool-group border stubs after filtering by @Aaxhirrr in
[#21852](https://github.com/google-gemini/gemini-cli/pull/21852)
- make command names consistent by @scidomino in
[#21907](https://github.com/google-gemini/gemini-cli/pull/21907)
- refactor: remove agent_card_requires_auth config flag by @adamfweidman in
[#21914](https://github.com/google-gemini/gemini-cli/pull/21914)
- feat(a2a): implement standardized normalization and streaming reassembly by
@alisa-alisa in
[#21402](https://github.com/google-gemini/gemini-cli/pull/21402)
- feat(cli): enable skill activation via slash commands by @NTaylorMullen in
[#21758](https://github.com/google-gemini/gemini-cli/pull/21758)
- docs(cli): mention per-model token usage in stream-json result event by
@yongruilin in
[#21908](https://github.com/google-gemini/gemini-cli/pull/21908)
- fix(plan): prevent plan truncation in approval dialog by supporting
unconstrained heights by @Adib234 in
[#21037](https://github.com/google-gemini/gemini-cli/pull/21037)
- feat(a2a): switch from callback-based to event-driven tool scheduler by
@cocosheng-g in
[#21467](https://github.com/google-gemini/gemini-cli/pull/21467)
- feat(voice): implement speech-friendly response formatter by @ayush31010 in
[#20989](https://github.com/google-gemini/gemini-cli/pull/20989)
- feat: add pulsating blue border automation overlay to browser agent by
@kunal-10-cloud in
[#21173](https://github.com/google-gemini/gemini-cli/pull/21173)
- Add extensionRegistryURI setting to change where the registry is read from by
@kevinjwang1 in
[#20463](https://github.com/google-gemini/gemini-cli/pull/20463)
- fix: patch gaxios v7 Array.toString() stream corruption by @gsquared94 in
[#21884](https://github.com/google-gemini/gemini-cli/pull/21884)
- fix: prevent hangs in non-interactive mode and improve agent guidance by
@cocosheng-g in
[#20893](https://github.com/google-gemini/gemini-cli/pull/20893)
- Add ExtensionDetails dialog and support install by @chrstnb in
[#20845](https://github.com/google-gemini/gemini-cli/pull/20845)
- chore/release: bump version to 0.34.0-nightly.20260310.4653b126f by
@gemini-cli-robot in
[#21816](https://github.com/google-gemini/gemini-cli/pull/21816)
- Changelog for v0.33.0-preview.13 by @gemini-cli-robot in
[#21927](https://github.com/google-gemini/gemini-cli/pull/21927)
- fix(cli): stabilize prompt layout to prevent jumping when typing by
@NTaylorMullen in
[#21081](https://github.com/google-gemini/gemini-cli/pull/21081)
- fix: preserve prompt text when cancelling streaming by @Nixxx19 in
[#21103](https://github.com/google-gemini/gemini-cli/pull/21103)
- fix: robust UX for remote agent errors by @Shyam-Raghuwanshi in
[#20307](https://github.com/google-gemini/gemini-cli/pull/20307)
- feat: implement background process logging and cleanup by @galz10 in
[#21189](https://github.com/google-gemini/gemini-cli/pull/21189)
- Changelog for v0.33.0-preview.14 by @gemini-cli-robot in
[#21938](https://github.com/google-gemini/gemini-cli/pull/21938)
- fix(patch): cherry-pick 45faf4d to release/v0.34.0-preview.0-pr-22148
[CONFLICTS] by @gemini-cli-robot in
[#22174](https://github.com/google-gemini/gemini-cli/pull/22174)
- fix(patch): cherry-pick 8432bce to release/v0.34.0-preview.1-pr-22069 to patch
version v0.34.0-preview.1 and create version 0.34.0-preview.2 by
@gemini-cli-robot in
[#22205](https://github.com/google-gemini/gemini-cli/pull/22205)
- fix(patch): cherry-pick 24adacd to release/v0.34.0-preview.2-pr-22332 to patch
version v0.34.0-preview.2 and create version 0.34.0-preview.3 by
@gemini-cli-robot in
[#22391](https://github.com/google-gemini/gemini-cli/pull/22391)
- fix(patch): cherry-pick 48130eb to release/v0.34.0-preview.3-pr-22665 to patch
version v0.34.0-preview.3 and create version 0.34.0-preview.4 by
@gemini-cli-robot in
[#22719](https://github.com/google-gemini/gemini-cli/pull/22719)
**Full Changelog**:
https://github.com/google-gemini/gemini-cli/compare/v0.33.2...v0.34.0
================================================
FILE: docs/changelogs/preview.md
================================================
# Preview release: v0.35.0-preview.2
Released: March 19, 2026
Our preview release includes the latest, new, and experimental features. This
release may not be as stable as our [latest weekly release](latest.md).
To install the preview release:
```
npm install -g @google/gemini-cli@preview
```
## Highlights
- **Subagents & Architecture Enhancements**: Enabled subagents and laid the
foundation for subagent tool isolation. Added proxy routing support for remote
A2A subagents and integrated `SandboxManager` to sandbox all process-spawning
tools.
- **CLI & UI Improvements**: Introduced customizable keyboard shortcuts and
support for literal character keybindings. Added missing vim mode motions and
CJK input support. Enabled code splitting and deferred UI loading for improved
performance.
- **Context & Tools Optimization**: JIT context loading is now enabled by
default with deduplication for project memory. Introduced a model-driven
parallel tool scheduler and allowed safe tools to execute concurrently.
- **Security & Extensions**: Implemented cryptographic integrity verification
for extension updates and added a `disableAlwaysAllow` setting to prevent
auto-approvals for enhanced security.
- **Plan Mode & Web Fetch Updates**: Added an 'All the above' option for
multi-select AskUser questions in Plan Mode. Rolled out Stage 1 and Stage 2
security and consistency improvements for the `web_fetch` tool.
## What's Changed
- fix(patch): cherry-pick 4e5dfd0 to release/v0.35.0-preview.1-pr-23074 to patch
version v0.35.0-preview.1 and create version 0.35.0-preview.2 by
@gemini-cli-robot in
[#23134](https://github.com/google-gemini/gemini-cli/pull/23134)
- feat(cli): customizable keyboard shortcuts by @scidomino in
[#21945](https://github.com/google-gemini/gemini-cli/pull/21945)
- feat(core): Thread `AgentLoopContext` through core. by @joshualitt in
[#21944](https://github.com/google-gemini/gemini-cli/pull/21944)
- chore(release): bump version to 0.35.0-nightly.20260311.657f19c1f by
@gemini-cli-robot in
[#21966](https://github.com/google-gemini/gemini-cli/pull/21966)
- refactor(a2a): remove legacy CoreToolScheduler by @adamfweidman in
[#21955](https://github.com/google-gemini/gemini-cli/pull/21955)
- feat(ui): add missing vim mode motions (X, ~, r, f/F/t/T, df/dt and friends)
by @aanari in [#21932](https://github.com/google-gemini/gemini-cli/pull/21932)
- Feat/retry fetch notifications by @aishaneeshah in
[#21813](https://github.com/google-gemini/gemini-cli/pull/21813)
- fix(core): remove OAuth check from handleFallback and clean up stray file by
@sehoon38 in [#21962](https://github.com/google-gemini/gemini-cli/pull/21962)
- feat(cli): support literal character keybindings and extended Kitty protocol
keys by @scidomino in
[#21972](https://github.com/google-gemini/gemini-cli/pull/21972)
- fix(ui): clamp cursor to last char after all NORMAL mode deletes by @aanari in
[#21973](https://github.com/google-gemini/gemini-cli/pull/21973)
- test(core): add missing tests for prompts/utils.ts by @krrishverma1805-web in
[#19941](https://github.com/google-gemini/gemini-cli/pull/19941)
- fix(cli): allow scrolling keys in copy mode (Ctrl+S selection mode) by
@nsalerni in [#19933](https://github.com/google-gemini/gemini-cli/pull/19933)
- docs(cli): add custom keybinding documentation by @scidomino in
[#21980](https://github.com/google-gemini/gemini-cli/pull/21980)
- docs: fix misleading YOLO mode description in defaultApprovalMode by
@Gyanranjan-Priyam in
[#21878](https://github.com/google-gemini/gemini-cli/pull/21878)
- fix: clean up /clear and /resume by @jackwotherspoon in
[#22007](https://github.com/google-gemini/gemini-cli/pull/22007)
- fix(core)#20941: reap orphaned descendant processes on PTY abort by @manavmax
in [#21124](https://github.com/google-gemini/gemini-cli/pull/21124)
- fix(core): update language detection to use LSP 3.18 identifiers by @yunaseoul
in [#21931](https://github.com/google-gemini/gemini-cli/pull/21931)
- feat(cli): support removing keybindings via '-' prefix by @scidomino in
[#22042](https://github.com/google-gemini/gemini-cli/pull/22042)
- feat(policy): add --admin-policy flag for supplemental admin policies by
@galz10 in [#20360](https://github.com/google-gemini/gemini-cli/pull/20360)
- merge duplicate imports packages/cli/src subtask1 by @Nixxx19 in
[#22040](https://github.com/google-gemini/gemini-cli/pull/22040)
- perf(core): parallelize user quota and experiments fetching in refreshAuth by
@sehoon38 in [#21648](https://github.com/google-gemini/gemini-cli/pull/21648)
- Changelog for v0.34.0-preview.0 by @gemini-cli-robot in
[#21965](https://github.com/google-gemini/gemini-cli/pull/21965)
- Changelog for v0.33.0 by @gemini-cli-robot in
[#21967](https://github.com/google-gemini/gemini-cli/pull/21967)
- fix(core): handle EISDIR in robustRealpath on Windows by @sehoon38 in
[#21984](https://github.com/google-gemini/gemini-cli/pull/21984)
- feat(core): include initiationMethod in conversation interaction telemetry by
@yunaseoul in [#22054](https://github.com/google-gemini/gemini-cli/pull/22054)
- feat(ui): add vim yank/paste (y/p/P) with unnamed register by @aanari in
[#22026](https://github.com/google-gemini/gemini-cli/pull/22026)
- fix(core): enable numerical routing for api key users by @sehoon38 in
[#21977](https://github.com/google-gemini/gemini-cli/pull/21977)
- feat(telemetry): implement retry attempt telemetry for network related retries
by @aishaneeshah in
[#22027](https://github.com/google-gemini/gemini-cli/pull/22027)
- fix(policy): remove unnecessary escapeRegex from pattern builders by
@spencer426 in
[#21921](https://github.com/google-gemini/gemini-cli/pull/21921)
- fix(core): preserve dynamic tool descriptions on session resume by @sehoon38
in [#18835](https://github.com/google-gemini/gemini-cli/pull/18835)
- chore: allow 'gemini-3.1' in sensitive keyword linter by @scidomino in
[#22065](https://github.com/google-gemini/gemini-cli/pull/22065)
- feat(core): support custom base URL via env vars by @junaiddshaukat in
[#21561](https://github.com/google-gemini/gemini-cli/pull/21561)
- merge duplicate imports packages/cli/src subtask2 by @Nixxx19 in
[#22051](https://github.com/google-gemini/gemini-cli/pull/22051)
- fix(core): silently retry API errors up to 3 times before halting session by
@spencer426 in
[#21989](https://github.com/google-gemini/gemini-cli/pull/21989)
- feat(core): simplify subagent success UI and improve early termination display
by @abhipatel12 in
[#21917](https://github.com/google-gemini/gemini-cli/pull/21917)
- merge duplicate imports packages/cli/src subtask3 by @Nixxx19 in
[#22056](https://github.com/google-gemini/gemini-cli/pull/22056)
- fix(hooks): fix BeforeAgent/AfterAgent inconsistencies (#18514) by @krishdef7
in [#21383](https://github.com/google-gemini/gemini-cli/pull/21383)
- feat(core): implement SandboxManager interface and config schema by @galz10 in
[#21774](https://github.com/google-gemini/gemini-cli/pull/21774)
- docs: document npm deprecation warnings as safe to ignore by @h30s in
[#20692](https://github.com/google-gemini/gemini-cli/pull/20692)
- fix: remove status/need-triage from maintainer-only issues by @SandyTao520 in
[#22044](https://github.com/google-gemini/gemini-cli/pull/22044)
- fix(core): propagate subagent context to policy engine by @NTaylorMullen in
[#22086](https://github.com/google-gemini/gemini-cli/pull/22086)
- fix(cli): resolve skill uninstall failure when skill name is updated by
@NTaylorMullen in
[#22085](https://github.com/google-gemini/gemini-cli/pull/22085)
- docs(plan): clarify interactive plan editing with Ctrl+X by @Adib234 in
[#22076](https://github.com/google-gemini/gemini-cli/pull/22076)
- fix(policy): ensure user policies are loaded when policyPaths is empty by
@NTaylorMullen in
[#22090](https://github.com/google-gemini/gemini-cli/pull/22090)
- Docs: Add documentation for model steering (experimental). by @jkcinouye in
[#21154](https://github.com/google-gemini/gemini-cli/pull/21154)
- Add issue for automated changelogs by @g-samroberts in
[#21912](https://github.com/google-gemini/gemini-cli/pull/21912)
- fix(core): secure argsPattern and revert WEB_FETCH_TOOL_NAME escalation by
@spencer426 in
[#22104](https://github.com/google-gemini/gemini-cli/pull/22104)
- feat(core): differentiate User-Agent for a2a-server and ACP clients by
@bdmorgan in [#22059](https://github.com/google-gemini/gemini-cli/pull/22059)
- refactor(core): extract ExecutionLifecycleService for tool backgrounding by
@adamfweidman in
[#21717](https://github.com/google-gemini/gemini-cli/pull/21717)
- feat: Display pending and confirming tool calls by @sripasg in
[#22106](https://github.com/google-gemini/gemini-cli/pull/22106)
- feat(browser): implement input blocker overlay during automation by
@kunal-10-cloud in
[#21132](https://github.com/google-gemini/gemini-cli/pull/21132)
- fix: register themes on extension load not start by @jackwotherspoon in
[#22148](https://github.com/google-gemini/gemini-cli/pull/22148)
- feat(ui): Do not show Ultra users /upgrade hint (#22154) by @sehoon38 in
[#22156](https://github.com/google-gemini/gemini-cli/pull/22156)
- chore: remove unnecessary log for themes by @jackwotherspoon in
[#22165](https://github.com/google-gemini/gemini-cli/pull/22165)
- fix(core): resolve MCP tool FQN validation, schema export, and wildcards in
subagents by @abhipatel12 in
[#22069](https://github.com/google-gemini/gemini-cli/pull/22069)
- fix(cli): validate --model argument at startup by @JaisalJain in
[#21393](https://github.com/google-gemini/gemini-cli/pull/21393)
- fix(core): handle policy ALLOW for exit_plan_mode by @backnotprop in
[#21802](https://github.com/google-gemini/gemini-cli/pull/21802)
- feat(telemetry): add Clearcut instrumentation for AI credits billing events by
@gsquared94 in
[#22153](https://github.com/google-gemini/gemini-cli/pull/22153)
- feat(core): add google credentials provider for remote agents by @adamfweidman
in [#21024](https://github.com/google-gemini/gemini-cli/pull/21024)
- test(cli): add integration test for node deprecation warnings by @Nixxx19 in
[#20215](https://github.com/google-gemini/gemini-cli/pull/20215)
- feat(cli): allow safe tools to execute concurrently while agent is busy by
@spencer426 in
[#21988](https://github.com/google-gemini/gemini-cli/pull/21988)
- feat(core): implement model-driven parallel tool scheduler by @abhipatel12 in
[#21933](https://github.com/google-gemini/gemini-cli/pull/21933)
- update vulnerable deps by @scidomino in
[#22180](https://github.com/google-gemini/gemini-cli/pull/22180)
- fix(core): fix startup stats to use int values for timestamps and durations by
@yunaseoul in [#22201](https://github.com/google-gemini/gemini-cli/pull/22201)
- fix(core): prevent duplicate tool schemas for instantiated tools by
@abhipatel12 in
[#22204](https://github.com/google-gemini/gemini-cli/pull/22204)
- fix(core): add proxy routing support for remote A2A subagents by @adamfweidman
in [#22199](https://github.com/google-gemini/gemini-cli/pull/22199)
- fix(core/ide): add Antigravity CLI fallbacks by @apfine in
[#22030](https://github.com/google-gemini/gemini-cli/pull/22030)
- fix(browser): fix duplicate function declaration error in browser agent by
@gsquared94 in
[#22207](https://github.com/google-gemini/gemini-cli/pull/22207)
- feat(core): implement Stage 1 improvements for webfetch tool by @aishaneeshah
in [#21313](https://github.com/google-gemini/gemini-cli/pull/21313)
- Changelog for v0.34.0-preview.1 by @gemini-cli-robot in
[#22194](https://github.com/google-gemini/gemini-cli/pull/22194)
- perf(cli): enable code splitting and deferred UI loading by @sehoon38 in
[#22117](https://github.com/google-gemini/gemini-cli/pull/22117)
- fix: remove unused img.png from project root by @SandyTao520 in
[#22222](https://github.com/google-gemini/gemini-cli/pull/22222)
- docs(local model routing): add docs on how to use Gemma for local model
routing by @douglas-reid in
[#21365](https://github.com/google-gemini/gemini-cli/pull/21365)
- feat(a2a): enable native gRPC support and protocol routing by @alisa-alisa in
[#21403](https://github.com/google-gemini/gemini-cli/pull/21403)
- fix(cli): escape @ symbols on paste to prevent unintended file expansion by
@krishdef7 in [#21239](https://github.com/google-gemini/gemini-cli/pull/21239)
- feat(core): add trajectoryId to ConversationOffered telemetry by @yunaseoul in
[#22214](https://github.com/google-gemini/gemini-cli/pull/22214)
- docs: clarify that tools.core is an allowlist for ALL built-in tools by
@hobostay in [#18813](https://github.com/google-gemini/gemini-cli/pull/18813)
- docs(plan): document hooks with plan mode by @ruomengz in
[#22197](https://github.com/google-gemini/gemini-cli/pull/22197)
- Changelog for v0.33.1 by @gemini-cli-robot in
[#22235](https://github.com/google-gemini/gemini-cli/pull/22235)
- build(ci): fix false positive evals trigger on merge commits by @gundermanc in
[#22237](https://github.com/google-gemini/gemini-cli/pull/22237)
- fix(core): explicitly pass messageBus to policy engine for MCP tool saves by
@abhipatel12 in
[#22255](https://github.com/google-gemini/gemini-cli/pull/22255)
- feat(core): Fully migrate packages/core to AgentLoopContext. by @joshualitt in
[#22115](https://github.com/google-gemini/gemini-cli/pull/22115)
- feat(core): increase sub-agent turn and time limits by @bdmorgan in
[#22196](https://github.com/google-gemini/gemini-cli/pull/22196)
- feat(core): instrument file system tools for JIT context discovery by
@SandyTao520 in
[#22082](https://github.com/google-gemini/gemini-cli/pull/22082)
- refactor(ui): extract pure session browser utilities by @abhipatel12 in
[#22256](https://github.com/google-gemini/gemini-cli/pull/22256)
- fix(plan): Fix AskUser evals by @Adib234 in
[#22074](https://github.com/google-gemini/gemini-cli/pull/22074)
- fix(settings): prevent j/k navigation keys from intercepting edit buffer input
by @student-ankitpandit in
[#21865](https://github.com/google-gemini/gemini-cli/pull/21865)
- feat(skills): improve async-pr-review workflow and logging by @mattKorwel in
[#21790](https://github.com/google-gemini/gemini-cli/pull/21790)
- refactor(cli): consolidate getErrorMessage utility to core by @scidomino in
[#22190](https://github.com/google-gemini/gemini-cli/pull/22190)
- fix(core): show descriptive error messages when saving settings fails by
@afarber in [#18095](https://github.com/google-gemini/gemini-cli/pull/18095)
- docs(core): add authentication guide for remote subagents by @adamfweidman in
[#22178](https://github.com/google-gemini/gemini-cli/pull/22178)
- docs: overhaul subagents documentation and add /agents command by @abhipatel12
in [#22345](https://github.com/google-gemini/gemini-cli/pull/22345)
- refactor(ui): extract SessionBrowser static ui components by @abhipatel12 in
[#22348](https://github.com/google-gemini/gemini-cli/pull/22348)
- test: add Object.create context regression test and tool confirmation
integration test by @gsquared94 in
[#22356](https://github.com/google-gemini/gemini-cli/pull/22356)
- feat(tracker): return TodoList display for tracker tools by @anj-s in
[#22060](https://github.com/google-gemini/gemini-cli/pull/22060)
- feat(agent): add allowed domain restrictions for browser agent by
@cynthialong0-0 in
[#21775](https://github.com/google-gemini/gemini-cli/pull/21775)
- chore/release: bump version to 0.35.0-nightly.20260313.bb060d7a9 by
@gemini-cli-robot in
[#22251](https://github.com/google-gemini/gemini-cli/pull/22251)
- Move keychain fallback to keychain service by @chrstnb in
[#22332](https://github.com/google-gemini/gemini-cli/pull/22332)
- feat(core): integrate SandboxManager to sandbox all process-spawning tools by
@galz10 in [#22231](https://github.com/google-gemini/gemini-cli/pull/22231)
- fix(cli): support CJK input and full Unicode scalar values in terminal
protocols by @scidomino in
[#22353](https://github.com/google-gemini/gemini-cli/pull/22353)
- Promote stable tests. by @gundermanc in
[#22253](https://github.com/google-gemini/gemini-cli/pull/22253)
- feat(tracker): add tracker policy by @anj-s in
[#22379](https://github.com/google-gemini/gemini-cli/pull/22379)
- feat(security): add disableAlwaysAllow setting to disable auto-approvals by
@galz10 in [#21941](https://github.com/google-gemini/gemini-cli/pull/21941)
- Revert "fix(cli): validate --model argument at startup" by @sehoon38 in
[#22378](https://github.com/google-gemini/gemini-cli/pull/22378)
- fix(mcp): handle equivalent root resource URLs in OAuth validation by @galz10
in [#20231](https://github.com/google-gemini/gemini-cli/pull/20231)
- fix(core): use session-specific temp directory for task tracker by @anj-s in
[#22382](https://github.com/google-gemini/gemini-cli/pull/22382)
- Fix issue where config was undefined. by @gundermanc in
[#22397](https://github.com/google-gemini/gemini-cli/pull/22397)
- fix(core): deduplicate project memory when JIT context is enabled by
@SandyTao520 in
[#22234](https://github.com/google-gemini/gemini-cli/pull/22234)
- feat(prompts): implement Topic-Action-Summary model for verbosity reduction by
@Abhijit-2592 in
[#21503](https://github.com/google-gemini/gemini-cli/pull/21503)
- fix(core): fix manual deletion of subagent histories by @abhipatel12 in
[#22407](https://github.com/google-gemini/gemini-cli/pull/22407)
- Add registry var by @kevinjwang1 in
[#22224](https://github.com/google-gemini/gemini-cli/pull/22224)
- Add ModelDefinitions to ModelConfigService by @kevinjwang1 in
[#22302](https://github.com/google-gemini/gemini-cli/pull/22302)
- fix(cli): improve command conflict handling for skills by @NTaylorMullen in
[#21942](https://github.com/google-gemini/gemini-cli/pull/21942)
- fix(core): merge user settings with extension-provided MCP servers by
@abhipatel12 in
[#22484](https://github.com/google-gemini/gemini-cli/pull/22484)
- fix(core): skip discovery for incomplete MCP configs and resolve merge race
condition by @abhipatel12 in
[#22494](https://github.com/google-gemini/gemini-cli/pull/22494)
- fix(automation): harden stale PR closer permissions and maintainer detection
by @bdmorgan in
[#22558](https://github.com/google-gemini/gemini-cli/pull/22558)
- fix(automation): evaluate staleness before checking protected labels by
@bdmorgan in [#22561](https://github.com/google-gemini/gemini-cli/pull/22561)
- feat(agent): replace the runtime npx for browser agent chrome devtool mcp with
pre-built bundle by @cynthialong0-0 in
[#22213](https://github.com/google-gemini/gemini-cli/pull/22213)
- perf: optimize TrackerService dependency checks by @anj-s in
[#22384](https://github.com/google-gemini/gemini-cli/pull/22384)
- docs(policy): remove trailing space from commandPrefix examples by @kawasin73
in [#22264](https://github.com/google-gemini/gemini-cli/pull/22264)
- fix(a2a-server): resolve unsafe assignment lint errors by @ehedlund in
[#22661](https://github.com/google-gemini/gemini-cli/pull/22661)
- fix: Adjust ToolGroupMessage filtering to hide Confirming and show Canceled
tool calls. by @sripasg in
[#22230](https://github.com/google-gemini/gemini-cli/pull/22230)
- Disallow Object.create() and reflect. by @gundermanc in
[#22408](https://github.com/google-gemini/gemini-cli/pull/22408)
- Guard pro model usage by @sehoon38 in
[#22665](https://github.com/google-gemini/gemini-cli/pull/22665)
- refactor(core): Creates AgentSession abstraction for consolidated agent
interface. by @mbleigh in
[#22270](https://github.com/google-gemini/gemini-cli/pull/22270)
- docs(changelog): remove internal commands from release notes by
@jackwotherspoon in
[#22529](https://github.com/google-gemini/gemini-cli/pull/22529)
- feat: enable subagents by @abhipatel12 in
[#22386](https://github.com/google-gemini/gemini-cli/pull/22386)
- feat(extensions): implement cryptographic integrity verification for extension
updates by @ehedlund in
[#21772](https://github.com/google-gemini/gemini-cli/pull/21772)
- feat(tracker): polish UI sorting and formatting by @anj-s in
[#22437](https://github.com/google-gemini/gemini-cli/pull/22437)
- Changelog for v0.34.0-preview.2 by @gemini-cli-robot in
[#22220](https://github.com/google-gemini/gemini-cli/pull/22220)
- fix(core): fix three JIT context bugs in read_file, read_many_files, and
memoryDiscovery by @SandyTao520 in
[#22679](https://github.com/google-gemini/gemini-cli/pull/22679)
- refactor(core): introduce InjectionService with source-aware injection and
backend-native background completions by @adamfweidman in
[#22544](https://github.com/google-gemini/gemini-cli/pull/22544)
- Linux sandbox bubblewrap by @DavidAPierce in
[#22680](https://github.com/google-gemini/gemini-cli/pull/22680)
- feat(core): increase thought signature retry resilience by @bdmorgan in
[#22202](https://github.com/google-gemini/gemini-cli/pull/22202)
- feat(core): implement Stage 2 security and consistency improvements for
web_fetch by @aishaneeshah in
[#22217](https://github.com/google-gemini/gemini-cli/pull/22217)
- refactor(core): replace positional execute params with ExecuteOptions bag by
@adamfweidman in
[#22674](https://github.com/google-gemini/gemini-cli/pull/22674)
- feat(config): enable JIT context loading by default by @SandyTao520 in
[#22736](https://github.com/google-gemini/gemini-cli/pull/22736)
- fix(config): ensure discoveryMaxDirs is passed to global config during
initialization by @kevin-ramdass in
[#22744](https://github.com/google-gemini/gemini-cli/pull/22744)
- fix(plan): allowlist get_internal_docs in Plan Mode by @Adib234 in
[#22668](https://github.com/google-gemini/gemini-cli/pull/22668)
- Changelog for v0.34.0-preview.3 by @gemini-cli-robot in
[#22393](https://github.com/google-gemini/gemini-cli/pull/22393)
- feat(core): add foundation for subagent tool isolation by @akh64bit in
[#22708](https://github.com/google-gemini/gemini-cli/pull/22708)
- fix(core): handle surrogate pairs in truncateString by @sehoon38 in
[#22754](https://github.com/google-gemini/gemini-cli/pull/22754)
- fix(cli): override j/k navigation in settings dialog to fix search input
conflict by @sehoon38 in
[#22800](https://github.com/google-gemini/gemini-cli/pull/22800)
- feat(plan): add 'All the above' option to multi-select AskUser questions by
@Adib234 in [#22365](https://github.com/google-gemini/gemini-cli/pull/22365)
- docs: distribute package-specific GEMINI.md context to each package by
@SandyTao520 in
[#22734](https://github.com/google-gemini/gemini-cli/pull/22734)
- fix(cli): clean up stale pasted placeholder metadata after word/line deletions
by @Jomak-x in
[#20375](https://github.com/google-gemini/gemini-cli/pull/20375)
- refactor(core): align JIT memory placement with tiered context model by
@SandyTao520 in
[#22766](https://github.com/google-gemini/gemini-cli/pull/22766)
- Linux sandbox seccomp by @DavidAPierce in
[#22815](https://github.com/google-gemini/gemini-cli/pull/22815)
**Full Changelog**:
https://github.com/google-gemini/gemini-cli/compare/v0.34.0-preview.4...v0.35.0-preview.2
================================================
FILE: docs/cli/checkpointing.md
================================================
# Checkpointing
The Gemini CLI includes a Checkpointing feature that automatically saves a
snapshot of your project's state before any file modifications are made by
AI-powered tools. This lets you safely experiment with and apply code changes,
knowing you can instantly revert back to the state before the tool was run.
## How it works
When you approve a tool that modifies the file system (like `write_file` or
`replace`), the CLI automatically creates a "checkpoint." This checkpoint
includes:
1. **A Git snapshot:** A commit is made in a special, shadow Git repository
located in your home directory (`~/.gemini/history/`). This
snapshot captures the complete state of your project files at that moment.
It does **not** interfere with your own project's Git repository.
2. **Conversation history:** The entire conversation you've had with the agent
up to that point is saved.
3. **The tool call:** The specific tool call that was about to be executed is
also stored.
If you want to undo the change or simply go back, you can use the `/restore`
command. Restoring a checkpoint will:
- Revert all files in your project to the state captured in the snapshot.
- Restore the conversation history in the CLI.
- Re-propose the original tool call, allowing you to run it again, modify it, or
simply ignore it.
All checkpoint data, including the Git snapshot and conversation history, is
stored locally on your machine. The Git snapshot is stored in the shadow
repository while the conversation history and tool calls are saved in a JSON
file in your project's temporary directory, typically located at
`~/.gemini/tmp//checkpoints`.
## Enabling the feature
The Checkpointing feature is disabled by default. To enable it, you need to edit
your `settings.json` file.
> [!CAUTION]
> The `--checkpointing` command-line flag was removed in version
> 0.11.0. Checkpointing can now only be enabled through the `settings.json`
> configuration file.
Add the following key to your `settings.json`:
```json
{
"general": {
"checkpointing": {
"enabled": true
}
}
}
```
## Using the `/restore` command
Once enabled, checkpoints are created automatically. To manage them, you use the
`/restore` command.
### List available checkpoints
To see a list of all saved checkpoints for the current project, simply run:
```
/restore
```
The CLI will display a list of available checkpoint files. These file names are
typically composed of a timestamp, the name of the file being modified, and the
name of the tool that was about to be run (e.g.,
`2025-06-22T10-00-00_000Z-my-file.txt-write_file`).
### Restore a specific checkpoint
To restore your project to a specific checkpoint, use the checkpoint file from
the list:
```
/restore
```
For example:
```
/restore 2025-06-22T10-00-00_000Z-my-file.txt-write_file
```
After running the command, your files and conversation will be immediately
restored to the state they were in when the checkpoint was created, and the
original tool prompt will reappear.
================================================
FILE: docs/cli/cli-reference.md
================================================
# Gemini CLI cheatsheet
This page provides a reference for commonly used Gemini CLI commands, options,
and parameters.
## CLI commands
| Command | Description | Example |
| ---------------------------------- | ---------------------------------- | ------------------------------------------------------------ |
| `gemini` | Start interactive REPL | `gemini` |
| `gemini -p "query"` | Query non-interactively | `gemini -p "summarize README.md"` |
| `gemini "query"` | Query and continue interactively | `gemini "explain this project"` |
| `cat file \| gemini` | Process piped content | `cat logs.txt \| gemini` `Get-Content logs.txt \| gemini` |
| `gemini -i "query"` | Execute and continue interactively | `gemini -i "What is the purpose of this project?"` |
| `gemini -r "latest"` | Continue most recent session | `gemini -r "latest"` |
| `gemini -r "latest" "query"` | Continue session with a new prompt | `gemini -r "latest" "Check for type errors"` |
| `gemini -r "" "query"` | Resume session by ID | `gemini -r "abc123" "Finish this PR"` |
| `gemini update` | Update to latest version | `gemini update` |
| `gemini extensions` | Manage extensions | See [Extensions Management](#extensions-management) |
| `gemini mcp` | Configure MCP servers | See [MCP Server Management](#mcp-server-management) |
### Positional arguments
| Argument | Type | Description |
| -------- | ----------------- | ---------------------------------------------------------------------------------------------------------- |
| `query` | string (variadic) | Positional prompt. Defaults to interactive mode in a TTY. Use `-p/--prompt` for non-interactive execution. |
## Interactive commands
These commands are available within the interactive REPL.
| Command | Description |
| -------------------- | ---------------------------------------- |
| `/skills reload` | Reload discovered skills from disk |
| `/agents reload` | Reload the agent registry |
| `/commands reload` | Reload custom slash commands |
| `/memory reload` | Reload context files (e.g., `GEMINI.md`) |
| `/mcp reload` | Restart and reload MCP servers |
| `/extensions reload` | Reload all active extensions |
| `/help` | Show help for all commands |
| `/quit` | Exit the interactive session |
## CLI Options
| Option | Alias | Type | Default | Description |
| -------------------------------- | ----- | ------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `--debug` | `-d` | boolean | `false` | Run in debug mode with verbose logging |
| `--version` | `-v` | - | - | Show CLI version number and exit |
| `--help` | `-h` | - | - | Show help information |
| `--model` | `-m` | string | `auto` | Model to use. See [Model Selection](#model-selection) for available values. |
| `--prompt` | `-p` | string | - | Prompt text. Appended to stdin input if provided. Forces non-interactive mode. |
| `--prompt-interactive` | `-i` | string | - | Execute prompt and continue in interactive mode |
| `--sandbox` | `-s` | boolean | `false` | Run in a sandboxed environment for safer execution |
| `--approval-mode` | - | string | `default` | Approval mode for tool execution. Choices: `default`, `auto_edit`, `yolo` |
| `--yolo` | `-y` | boolean | `false` | **Deprecated.** Auto-approve all actions. Use `--approval-mode=yolo` instead. |
| `--experimental-acp` | - | boolean | - | Start in ACP (Agent Code Pilot) mode. **Experimental feature.** |
| `--experimental-zed-integration` | - | boolean | - | Run in Zed editor integration mode. **Experimental feature.** |
| `--allowed-mcp-server-names` | - | array | - | Allowed MCP server names (comma-separated or multiple flags) |
| `--allowed-tools` | - | array | - | **Deprecated.** Use the [Policy Engine](../reference/policy-engine.md) instead. Tools that are allowed to run without confirmation (comma-separated or multiple flags) |
| `--extensions` | `-e` | array | - | List of extensions to use. If not provided, all extensions are enabled (comma-separated or multiple flags) |
| `--list-extensions` | `-l` | boolean | - | List all available extensions and exit |
| `--resume` | `-r` | string | - | Resume a previous session. Use `"latest"` for most recent or index number (e.g. `--resume 5`) |
| `--list-sessions` | - | boolean | - | List available sessions for the current project and exit |
| `--delete-session` | - | string | - | Delete a session by index number (use `--list-sessions` to see available sessions) |
| `--include-directories` | - | array | - | Additional directories to include in the workspace (comma-separated or multiple flags) |
| `--screen-reader` | - | boolean | - | Enable screen reader mode for accessibility |
| `--output-format` | `-o` | string | `text` | The format of the CLI output. Choices: `text`, `json`, `stream-json` |
## Model selection
The `--model` (or `-m`) flag lets you specify which Gemini model to use. You can
use either model aliases (user-friendly names) or concrete model names.
### Model aliases
These are convenient shortcuts that map to specific models:
| Alias | Resolves To | Description |
| ------------ | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------- |
| `auto` | `gemini-2.5-pro` or `gemini-3-pro-preview` | **Default.** Resolves to the preview model if preview features are enabled, otherwise resolves to the standard pro model. |
| `pro` | `gemini-2.5-pro` or `gemini-3-pro-preview` | For complex reasoning tasks. Uses preview model if enabled. |
| `flash` | `gemini-2.5-flash` | Fast, balanced model for most tasks. |
| `flash-lite` | `gemini-2.5-flash-lite` | Fastest model for simple tasks. |
## Extensions management
| Command | Description | Example |
| -------------------------------------------------- | -------------------------------------------- | ------------------------------------------------------------------------------ |
| `gemini extensions install ` | Install extension from Git URL or local path | `gemini extensions install https://github.com/user/my-extension` |
| `gemini extensions install --ref ` | Install from specific branch/tag/commit | `gemini extensions install https://github.com/user/my-extension --ref develop` |
| `gemini extensions install --auto-update` | Install with auto-update enabled | `gemini extensions install https://github.com/user/my-extension --auto-update` |
| `gemini extensions uninstall ` | Uninstall one or more extensions | `gemini extensions uninstall my-extension` |
| `gemini extensions list` | List all installed extensions | `gemini extensions list` |
| `gemini extensions update ` | Update a specific extension | `gemini extensions update my-extension` |
| `gemini extensions update --all` | Update all extensions | `gemini extensions update --all` |
| `gemini extensions enable ` | Enable an extension | `gemini extensions enable my-extension` |
| `gemini extensions disable ` | Disable an extension | `gemini extensions disable my-extension` |
| `gemini extensions link ` | Link local extension for development | `gemini extensions link /path/to/extension` |
| `gemini extensions new ` | Create new extension from template | `gemini extensions new ./my-extension` |
| `gemini extensions validate ` | Validate extension structure | `gemini extensions validate ./my-extension` |
See [Extensions Documentation](../extensions/index.md) for more details.
## MCP server management
| Command | Description | Example |
| ------------------------------------------------------------- | ------------------------------- | ---------------------------------------------------------------------------------------------------- |
| `gemini mcp add ` | Add stdio-based MCP server | `gemini mcp add github npx -y @modelcontextprotocol/server-github` |
| `gemini mcp add --transport http` | Add HTTP-based MCP server | `gemini mcp add api-server http://localhost:3000 --transport http` |
| `gemini mcp add --env KEY=value` | Add with environment variables | `gemini mcp add slack node server.js --env SLACK_TOKEN=xoxb-xxx` |
| `gemini mcp add --scope user` | Add with user scope | `gemini mcp add db node db-server.js --scope user` |
| `gemini mcp add --include-tools tool1,tool2` | Add with specific tools | `gemini mcp add github npx -y @modelcontextprotocol/server-github --include-tools list_repos,get_pr` |
| `gemini mcp remove ` | Remove an MCP server | `gemini mcp remove github` |
| `gemini mcp list` | List all configured MCP servers | `gemini mcp list` |
See [MCP Server Integration](../tools/mcp-server.md) for more details.
## Skills management
| Command | Description | Example |
| -------------------------------- | ------------------------------------- | ------------------------------------------------- |
| `gemini skills list` | List all discovered agent skills | `gemini skills list` |
| `gemini skills install ` | Install skill from Git, path, or file | `gemini skills install https://github.com/u/repo` |
| `gemini skills link ` | Link local agent skills via symlink | `gemini skills link /path/to/my-skills` |
| `gemini skills uninstall ` | Uninstall an agent skill | `gemini skills uninstall my-skill` |
| `gemini skills enable ` | Enable an agent skill | `gemini skills enable my-skill` |
| `gemini skills disable ` | Disable an agent skill | `gemini skills disable my-skill` |
| `gemini skills enable --all` | Enable all skills | `gemini skills enable --all` |
| `gemini skills disable --all` | Disable all skills | `gemini skills disable --all` |
See [Agent Skills Documentation](./skills.md) for more details.
================================================
FILE: docs/cli/creating-skills.md
================================================
# Creating Agent Skills
This guide provides an overview of how to create your own Agent Skills to extend
the capabilities of Gemini CLI.
## Getting started: The `skill-creator` skill
The recommended way to create a new skill is to use the built-in `skill-creator`
skill. To use it, ask Gemini CLI to create a new skill for you.
**Example prompt:**
> "create a new skill called 'code-reviewer'"
Gemini CLI will then use the `skill-creator` to generate the skill:
1. Generate a new directory for your skill (e.g., `my-new-skill/`).
2. Create a `SKILL.md` file with the necessary YAML frontmatter (`name` and
`description`).
3. Create the standard resource directories: `scripts/`, `references/`, and
`assets/`.
## Manual skill creation
If you prefer to create skills manually:
1. **Create a directory** for your skill (e.g., `my-new-skill/`).
2. **Create a `SKILL.md` file** inside the new directory.
To add additional resources that support the skill, refer to the skill
structure.
## Skill structure
A skill is a directory containing a `SKILL.md` file at its root.
### Folder structure
While a `SKILL.md` file is the only required component, we recommend the
following structure for organizing your skill's resources:
```text
my-skill/
├── SKILL.md (Required) Instructions and metadata
├── scripts/ (Optional) Executable scripts
├── references/ (Optional) Static documentation
└── assets/ (Optional) Templates and other resources
```
### `SKILL.md` file
The `SKILL.md` file is the core of your skill. This file uses YAML frontmatter
for metadata and Markdown for instructions. For example:
```markdown
---
name: code-reviewer
description:
Use this skill to review code. It supports both local changes and remote Pull
Requests.
---
# Code Reviewer
This skill guides the agent in conducting thorough code reviews.
## Workflow
### 1. Determine Review Target
- **Remote PR**: If the user gives a PR number or URL, target that remote PR.
- **Local Changes**: If changes are local... ...
```
- **`name`**: A unique identifier for the skill. This should match the directory
name.
- **`description`**: A description of what the skill does and when Gemini should
use it.
- **Body**: The Markdown body of the file contains the instructions that guide
the agent's behavior when the skill is active.
================================================
FILE: docs/cli/custom-commands.md
================================================
# Custom commands
Custom commands let you save and reuse your favorite or most frequently used
prompts as personal shortcuts within Gemini CLI. You can create commands that
are specific to a single project or commands that are available globally across
all your projects, streamlining your workflow and ensuring consistency.
## File locations and precedence
Gemini CLI discovers commands from two locations, loaded in a specific order:
1. **User commands (global):** Located in `~/.gemini/commands/`. These commands
are available in any project you are working on.
2. **Project commands (local):** Located in
`/.gemini/commands/`. These commands are specific to the
current project and can be checked into version control to be shared with
your team.
If a command in the project directory has the same name as a command in the user
directory, the **project command will always be used.** This allows projects to
override global commands with project-specific versions.
## Naming and namespacing
The name of a command is determined by its file path relative to its `commands`
directory. Subdirectories are used to create namespaced commands, with the path
separator (`/` or `\`) being converted to a colon (`:`).
- A file at `~/.gemini/commands/test.toml` becomes the command `/test`.
- A file at `/.gemini/commands/git/commit.toml` becomes the namespaced
command `/git:commit`.
> [!TIP]
> After creating or modifying `.toml` command files, run
> `/commands reload` to pick up your changes without restarting the CLI.
## TOML file format (v1)
Your command definition files must be written in the TOML format and use the
`.toml` file extension.
### Required fields
- `prompt` (String): The prompt that will be sent to the Gemini model when the
command is executed. This can be a single-line or multi-line string.
### Optional fields
- `description` (String): A brief, one-line description of what the command
does. This text will be displayed next to your command in the `/help` menu.
**If you omit this field, a generic description will be generated from the
filename.**
## Handling arguments
Custom commands support two powerful methods for handling arguments. The CLI
automatically chooses the correct method based on the content of your command's
`prompt`.
### 1. Context-aware injection with `{{args}}`
If your `prompt` contains the special placeholder `{{args}}`, the CLI will
replace that placeholder with the text the user typed after the command name.
The behavior of this injection depends on where it is used:
**A. Raw injection (outside shell commands)**
When used in the main body of the prompt, the arguments are injected exactly as
the user typed them.
**Example (`git/fix.toml`):**
```toml
# Invoked via: /git:fix "Button is misaligned"
description = "Generates a fix for a given issue."
prompt = "Please provide a code fix for the issue described here: {{args}}."
```
The model receives:
`Please provide a code fix for the issue described here: "Button is misaligned".`
**B. Using arguments in shell commands (inside `!{...}` blocks)**
When you use `{{args}}` inside a shell injection block (`!{...}`), the arguments
are automatically **shell-escaped** before replacement. This allows you to
safely pass arguments to shell commands, ensuring the resulting command is
syntactically correct and secure while preventing command injection
vulnerabilities.
**Example (`/grep-code.toml`):**
```toml
prompt = """
Please summarize the findings for the pattern `{{args}}`.
Search Results:
!{grep -r {{args}} .}
"""
```
When you run `/grep-code It's complicated`:
1. The CLI sees `{{args}}` used both outside and inside `!{...}`.
2. Outside: The first `{{args}}` is replaced raw with `It's complicated`.
3. Inside: The second `{{args}}` is replaced with the escaped version (e.g., on
Linux: `"It\'s complicated"`).
4. The command executed is `grep -r "It's complicated" .`.
5. The CLI prompts you to confirm this exact, secure command before execution.
6. The final prompt is sent.
### 2. Default argument handling
If your `prompt` does **not** contain the special placeholder `{{args}}`, the
CLI uses a default behavior for handling arguments.
If you provide arguments to the command (e.g., `/mycommand arg1`), the CLI will
append the full command you typed to the end of the prompt, separated by two
newlines. This allows the model to see both the original instructions and the
specific arguments you just provided.
If you do **not** provide any arguments (e.g., `/mycommand`), the prompt is sent
to the model exactly as it is, with nothing appended.
**Example (`changelog.toml`):**
This example shows how to create a robust command by defining a role for the
model, explaining where to find the user's input, and specifying the expected
format and behavior.
```toml
# In: /.gemini/commands/changelog.toml
# Invoked via: /changelog 1.2.0 added "Support for default argument parsing."
description = "Adds a new entry to the project's CHANGELOG.md file."
prompt = """
# Task: Update Changelog
You are an expert maintainer of this software project. A user has invoked a command to add a new entry to the changelog.
**The user's raw command is appended below your instructions.**
Your task is to parse the ``, ``, and `` from their input and use the `write_file` tool to correctly update the `CHANGELOG.md` file.
## Expected Format
The command follows this format: `/changelog `
- `` must be one of: "added", "changed", "fixed", "removed".
## Behavior
1. Read the `CHANGELOG.md` file.
2. Find the section for the specified ``.
3. Add the `` under the correct `` heading.
4. If the version or type section doesn't exist, create it.
5. Adhere strictly to the "Keep a Changelog" format.
"""
```
When you run `/changelog 1.2.0 added "New feature"`, the final text sent to the
model will be the original prompt followed by two newlines and the command you
typed.
### 3. Executing shell commands with `!{...}`
You can make your commands dynamic by executing shell commands directly within
your `prompt` and injecting their output. This is ideal for gathering context
from your local environment, like reading file content or checking the status of
Git.
When a custom command attempts to execute a shell command, Gemini CLI will now
prompt you for confirmation before proceeding. This is a security measure to
ensure that only intended commands can be run.
**How it works:**
1. **Inject commands:** Use the `!{...}` syntax.
2. **Argument substitution:** If `{{args}}` is present inside the block, it is
automatically shell-escaped (see
[Context-Aware Injection](#1-context-aware-injection-with-args) above).
3. **Robust parsing:** The parser correctly handles complex shell commands that
include nested braces, such as JSON payloads. The content inside `!{...}`
must have balanced braces (`{` and `}`). If you need to execute a command
containing unbalanced braces, consider wrapping it in an external script
file and calling the script within the `!{...}` block.
4. **Security check and confirmation:** The CLI performs a security check on
the final, resolved command (after arguments are escaped and substituted). A
dialog will appear showing the exact command(s) to be executed.
5. **Execution and error reporting:** The command is executed. If the command
fails, the output injected into the prompt will include the error messages
(stderr) followed by a status line, e.g.,
`[Shell command exited with code 1]`. This helps the model understand the
context of the failure.
**Example (`git/commit.toml`):**
This command gets the staged git diff and uses it to ask the model to write a
commit message.
````toml
# In: /.gemini/commands/git/commit.toml
# Invoked via: /git:commit
description = "Generates a Git commit message based on staged changes."
# The prompt uses !{...} to execute the command and inject its output.
prompt = """
Please generate a Conventional Commit message based on the following git diff:
```diff
!{git diff --staged}
```
"""
````
When you run `/git:commit`, the CLI first executes `git diff --staged`, then
replaces `!{git diff --staged}` with the output of that command before sending
the final, complete prompt to the model.
### 4. Injecting file content with `@{...}`
You can directly embed the content of a file or a directory listing into your
prompt using the `@{...}` syntax. This is useful for creating commands that
operate on specific files.
**How it works:**
- **File injection**: `@{path/to/file.txt}` is replaced by the content of
`file.txt`.
- **Multimodal support**: If the path points to a supported image (e.g., PNG,
JPEG), PDF, audio, or video file, it will be correctly encoded and injected as
multimodal input. Other binary files are handled gracefully and skipped.
- **Directory listing**: `@{path/to/dir}` is traversed and each file present
within the directory and all subdirectories is inserted into the prompt. This
respects `.gitignore` and `.geminiignore` if enabled.
- **Workspace-aware**: The command searches for the path in the current
directory and any other workspace directories. Absolute paths are allowed if
they are within the workspace.
- **Processing order**: File content injection with `@{...}` is processed
_before_ shell commands (`!{...}`) and argument substitution (`{{args}}`).
- **Parsing**: The parser requires the content inside `@{...}` (the path) to
have balanced braces (`{` and `}`).
**Example (`review.toml`):**
This command injects the content of a _fixed_ best practices file
(`docs/best-practices.md`) and uses the user's arguments to provide context for
the review.
```toml
# In: /.gemini/commands/review.toml
# Invoked via: /review FileCommandLoader.ts
description = "Reviews the provided context using a best practice guide."
prompt = """
You are an expert code reviewer.
Your task is to review {{args}}.
Use the following best practices when providing your review:
@{docs/best-practices.md}
"""
```
When you run `/review FileCommandLoader.ts`, the `@{docs/best-practices.md}`
placeholder is replaced by the content of that file, and `{{args}}` is replaced
by the text you provided, before the final prompt is sent to the model.
---
## Example: A "Pure Function" refactoring command
Let's create a global command that asks the model to refactor a piece of code.
**1. Create the file and directories:**
First, ensure the user commands directory exists, then create a `refactor`
subdirectory for organization and the final TOML file.
**macOS/Linux**
```bash
mkdir -p ~/.gemini/commands/refactor
touch ~/.gemini/commands/refactor/pure.toml
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path "$env:USERPROFILE\.gemini\commands\refactor"
New-Item -ItemType File -Force -Path "$env:USERPROFILE\.gemini\commands\refactor\pure.toml"
```
**2. Add the content to the file:**
Open `~/.gemini/commands/refactor/pure.toml` in your editor and add the
following content. We are including the optional `description` for best
practice.
```toml
# In: ~/.gemini/commands/refactor/pure.toml
# This command will be invoked via: /refactor:pure
description = "Asks the model to refactor the current context into a pure function."
prompt = """
Please analyze the code I've provided in the current context.
Refactor it into a pure function.
Your response should include:
1. The refactored, pure function code block.
2. A brief explanation of the key changes you made and why they contribute to purity.
"""
```
**3. Run the command:**
That's it! You can now run your command in the CLI. First, you might add a file
to the context, and then invoke your command:
```
> @my-messy-function.js
> /refactor:pure
```
Gemini CLI will then execute the multi-line prompt defined in your TOML file.
================================================
FILE: docs/cli/enterprise.md
================================================
# Gemini CLI for the enterprise
This document outlines configuration patterns and best practices for deploying
and managing Gemini CLI in an enterprise environment. By leveraging system-level
settings, administrators can enforce security policies, manage tool access, and
ensure a consistent experience for all users.
> [!WARNING]
> The patterns described in this document are intended to help
> administrators create a more controlled and secure environment for using
> Gemini CLI. However, they should not be considered a foolproof security
> boundary. A determined user with sufficient privileges on their local machine
> may still be able to circumvent these configurations. These measures are
> designed to prevent accidental misuse and enforce corporate policy in a
> managed environment, not to defend against a malicious actor with local
> administrative rights.
## Centralized configuration: The system settings file
The most powerful tools for enterprise administration are the system-wide
settings files. These files allow you to define a baseline configuration
(`system-defaults.json`) and a set of overrides (`settings.json`) that apply to
all users on a machine. For a complete overview of configuration options, see
the [Configuration documentation](../reference/configuration.md).
Settings are merged from four files. The precedence order for single-value
settings (like `theme`) is:
1. System Defaults (`system-defaults.json`)
2. User Settings (`~/.gemini/settings.json`)
3. Workspace Settings (`/.gemini/settings.json`)
4. System Overrides (`settings.json`)
This means the System Overrides file has the final say. For settings that are
arrays (`includeDirectories`) or objects (`mcpServers`), the values are merged.
**Example of merging and precedence:**
Here is how settings from different levels are combined.
- **System defaults `system-defaults.json`:**
```json
{
"ui": {
"theme": "default-corporate-theme"
},
"context": {
"includeDirectories": ["/etc/gemini-cli/common-context"]
}
}
```
- **User `settings.json` (`~/.gemini/settings.json`):**
```json
{
"ui": {
"theme": "user-preferred-dark-theme"
},
"mcpServers": {
"corp-server": {
"command": "/usr/local/bin/corp-server-dev"
},
"user-tool": {
"command": "npm start --prefix ~/tools/my-tool"
}
},
"context": {
"includeDirectories": ["~/gemini-context"]
}
}
```
- **Workspace `settings.json` (`/.gemini/settings.json`):**
```json
{
"ui": {
"theme": "project-specific-light-theme"
},
"mcpServers": {
"project-tool": {
"command": "npm start"
}
},
"context": {
"includeDirectories": ["./project-context"]
}
}
```
- **System overrides `settings.json`:**
```json
{
"ui": {
"theme": "system-enforced-theme"
},
"mcpServers": {
"corp-server": {
"command": "/usr/local/bin/corp-server-prod"
}
},
"context": {
"includeDirectories": ["/etc/gemini-cli/global-context"]
}
}
```
This results in the following merged configuration:
- **Final merged configuration:**
```json
{
"ui": {
"theme": "system-enforced-theme"
},
"mcpServers": {
"corp-server": {
"command": "/usr/local/bin/corp-server-prod"
},
"user-tool": {
"command": "npm start --prefix ~/tools/my-tool"
},
"project-tool": {
"command": "npm start"
}
},
"context": {
"includeDirectories": [
"/etc/gemini-cli/common-context",
"~/gemini-context",
"./project-context",
"/etc/gemini-cli/global-context"
]
}
}
```
**Why:**
- **`theme`**: The value from the system overrides (`system-enforced-theme`) is
used, as it has the highest precedence.
- **`mcpServers`**: The objects are merged. The `corp-server` definition from
the system overrides takes precedence over the user's definition. The unique
`user-tool` and `project-tool` are included.
- **`includeDirectories`**: The arrays are concatenated in the order of System
Defaults, User, Workspace, and then System Overrides.
- **Location**:
- **Linux**: `/etc/gemini-cli/settings.json`
- **Windows**: `C:\ProgramData\gemini-cli\settings.json`
- **macOS**: `/Library/Application Support/GeminiCli/settings.json`
- The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH`
environment variable.
- **Control**: This file should be managed by system administrators and
protected with appropriate file permissions to prevent unauthorized
modification by users.
By using the system settings file, you can enforce the security and
configuration patterns described below.
### Enforcing system settings with a wrapper script
While the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable provides
flexibility, a user could potentially override it to point to a different
settings file, bypassing the centrally managed configuration. To mitigate this,
enterprises can deploy a wrapper script or alias that ensures the environment
variable is always set to the corporate-controlled path.
This approach ensures that no matter how the user calls the `gemini` command,
the enterprise settings are always loaded with the highest precedence.
**Example wrapper script:**
Administrators can create a script named `gemini` and place it in a directory
that appears earlier in the user's `PATH` than the actual Gemini CLI binary
(e.g., `/usr/local/bin/gemini`).
```bash
#!/bin/bash
# Enforce the path to the corporate system settings file.
# This ensures that the company's configuration is always applied.
export GEMINI_CLI_SYSTEM_SETTINGS_PATH="/etc/gemini-cli/settings.json"
# Find the original gemini executable.
# This is a simple example; a more robust solution might be needed
# depending on the installation method.
REAL_GEMINI_PATH=$(type -aP gemini | grep -v "^$(type -P gemini)$" | head -n 1)
if [ -z "$REAL_GEMINI_PATH" ]; then
echo "Error: The original 'gemini' executable was not found." >&2
exit 1
fi
# Pass all arguments to the real Gemini CLI executable.
exec "$REAL_GEMINI_PATH" "$@"
```
By deploying this script, the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` is set within
the script's environment, and the `exec` command replaces the script process
with the actual Gemini CLI process, which inherits the environment variable.
This makes it significantly more difficult for a user to bypass the enforced
settings.
**PowerShell Profile (Windows alternative):**
On Windows, administrators can achieve similar results by adding the environment
variable to the system-wide or user-specific PowerShell profile:
```powershell
Add-Content -Path $PROFILE -Value '$env:GEMINI_CLI_SYSTEM_SETTINGS_PATH="C:\ProgramData\gemini-cli\settings.json"'
```
## User isolation in shared environments
In shared compute environments (like ML experiment runners or shared build
servers), you can isolate Gemini CLI state by overriding the user's home
directory.
By default, Gemini CLI stores configuration and history in `~/.gemini`. You can
use the `GEMINI_CLI_HOME` environment variable to point to a unique directory
for a specific user or job. The CLI will create a `.gemini` folder inside the
specified path.
**macOS/Linux**
```bash
# Isolate state for a specific job
export GEMINI_CLI_HOME="/tmp/gemini-job-123"
gemini
```
**Windows (PowerShell)**
```powershell
# Isolate state for a specific job
$env:GEMINI_CLI_HOME="C:\temp\gemini-job-123"
gemini
```
## Restricting tool access
You can significantly enhance security by controlling which tools the Gemini
model can use. This is achieved through the `tools.core` setting and the
[Policy Engine](../reference/policy-engine.md). For a list of available tools,
see the [Tools reference](../reference/tools.md).
### Allowlisting with `coreTools`
The most secure approach is to explicitly add the tools and commands that users
are permitted to execute to an allowlist. This prevents the use of any tool not
on the approved list.
**Example:** Allow only safe, read-only file operations and listing files.
```json
{
"tools": {
"core": ["ReadFileTool", "GlobTool", "ShellTool(ls)"]
}
}
```
### Blocklisting with `excludeTools` (Deprecated)
> **Deprecated:** Use the [Policy Engine](../reference/policy-engine.md) for
> more robust control.
Alternatively, you can add specific tools that are considered dangerous in your
environment to a blocklist.
**Example:** Prevent the use of the shell tool for removing files.
```json
{
"tools": {
"exclude": ["ShellTool(rm -rf)"]
}
}
```
> [!WARNING]
> Blocklisting with `excludeTools` is less secure than
> allowlisting with `coreTools`, as it relies on blocking known-bad commands,
> and clever users may find ways to bypass simple string-based blocks.
> **Allowlisting is the recommended approach.**
### Disabling YOLO mode
To ensure that users cannot bypass the confirmation prompt for tool execution,
you can disable YOLO mode at the policy level. This adds a critical layer of
safety, as it prevents the model from executing tools without explicit user
approval.
**Example:** Force all tool executions to require user confirmation.
```json
{
"security": {
"disableYoloMode": true
}
}
```
This setting is highly recommended in an enterprise environment to prevent
unintended tool execution.
## Managing custom tools (MCP servers)
If your organization uses custom tools via
[Model-Context Protocol (MCP) servers](../tools/mcp-server.md), it is crucial to
understand how server configurations are managed to apply security policies
effectively.
### How MCP server configurations are merged
Gemini CLI loads `settings.json` files from three levels: System, Workspace, and
User. When it comes to the `mcpServers` object, these configurations are
**merged**:
1. **Merging:** The lists of servers from all three levels are combined into a
single list.
2. **Precedence:** If a server with the **same name** is defined at multiple
levels (e.g., a server named `corp-api` exists in both system and user
settings), the definition from the highest-precedence level is used. The
order of precedence is: **System > Workspace > User**.
This means a user **cannot** override the definition of a server that is already
defined in the system-level settings. However, they **can** add new servers with
unique names.
### Enforcing a catalog of tools
The security of your MCP tool ecosystem depends on a combination of defining the
canonical servers and adding their names to an allowlist.
### Restricting tools within an MCP server
For even greater security, especially when dealing with third-party MCP servers,
you can restrict which specific tools from a server are exposed to the model.
This is done using the `includeTools` and `excludeTools` properties within a
server's definition. This allows you to use a subset of tools from a server
without allowing potentially dangerous ones.
Following the principle of least privilege, it is highly recommended to use
`includeTools` to create an allowlist of only the necessary tools.
**Example:** Only allow the `code-search` and `get-ticket-details` tools from a
third-party MCP server, even if the server offers other tools like
`delete-ticket`.
```json
{
"mcp": {
"allowed": ["third-party-analyzer"]
},
"mcpServers": {
"third-party-analyzer": {
"command": "/usr/local/bin/start-3p-analyzer.sh",
"includeTools": ["code-search", "get-ticket-details"]
}
}
}
```
#### More secure pattern: Define and add to allowlist in system settings
To create a secure, centrally-managed catalog of tools, the system administrator
**must** do both of the following in the system-level `settings.json` file:
1. **Define the full configuration** for every approved server in the
`mcpServers` object. This ensures that even if a user defines a server with
the same name, the secure system-level definition will take precedence.
2. **Add the names** of those servers to an allowlist using the `mcp.allowed`
setting. This is a critical security step that prevents users from running
any servers that are not on this list. If this setting is omitted, the CLI
will merge and allow any server defined by the user.
**Example system `settings.json`:**
1. Add the _names_ of all approved servers to an allowlist. This will prevent
users from adding their own servers.
2. Provide the canonical _definition_ for each server on the allowlist.
```json
{
"mcp": {
"allowed": ["corp-data-api", "source-code-analyzer"]
},
"mcpServers": {
"corp-data-api": {
"command": "/usr/local/bin/start-corp-api.sh",
"timeout": 5000
},
"source-code-analyzer": {
"command": "/usr/local/bin/start-analyzer.sh"
}
}
}
```
This pattern is more secure because it uses both definition and an allowlist.
Any server a user defines will either be overridden by the system definition (if
it has the same name) or blocked because its name is not in the `mcp.allowed`
list.
### Less secure pattern: Omitting the allowlist
If the administrator defines the `mcpServers` object but fails to also specify
the `mcp.allowed` allowlist, users may add their own servers.
**Example system `settings.json`:**
This configuration defines servers but does not enforce the allowlist. The
administrator has NOT included the "mcp.allowed" setting.
```json
{
"mcpServers": {
"corp-data-api": {
"command": "/usr/local/bin/start-corp-api.sh"
}
}
}
```
In this scenario, a user can add their own server in their local
`settings.json`. Because there is no `mcp.allowed` list to filter the merged
results, the user's server will be added to the list of available tools and
allowed to run.
## Enforcing sandboxing for security
To mitigate the risk of potentially harmful operations, you can enforce the use
of sandboxing for all tool execution. The sandbox isolates tool execution in a
containerized environment.
**Example:** Force all tool execution to happen within a Docker sandbox.
```json
{
"tools": {
"sandbox": "docker"
}
}
```
You can also specify a custom, hardened Docker image for the sandbox by building
a custom `sandbox.Dockerfile` as described in the
[Sandboxing documentation](./sandbox.md).
## Controlling network access via proxy
In corporate environments with strict network policies, you can configure Gemini
CLI to route all outbound traffic through a corporate proxy. This can be set via
an environment variable, but it can also be enforced for custom tools via the
`mcpServers` configuration.
**Example (for an MCP server):**
```json
{
"mcpServers": {
"proxied-server": {
"command": "node",
"args": ["mcp_server.js"],
"env": {
"HTTP_PROXY": "http://proxy.example.com:8080",
"HTTPS_PROXY": "http://proxy.example.com:8080"
}
}
}
}
```
## Telemetry and auditing
For auditing and monitoring purposes, you can configure Gemini CLI to send
telemetry data to a central location. This allows you to track tool usage and
other events. For more information, see the
[telemetry documentation](./telemetry.md).
**Example:** Enable telemetry and send it to a local OTLP collector. If
`otlpEndpoint` is not specified, it defaults to `http://localhost:4317`.
```json
{
"telemetry": {
"enabled": true,
"target": "gcp",
"logPrompts": false
}
}
```
> [!NOTE]
> Ensure that `logPrompts` is set to `false` in an enterprise setting to
> avoid collecting potentially sensitive information from user prompts.
## Authentication
You can enforce a specific authentication method for all users by setting the
`enforcedAuthType` in the system-level `settings.json` file. This prevents users
from choosing a different authentication method. See the
[Authentication docs](../get-started/authentication.md) for more details.
**Example:** Enforce the use of Google login for all users.
```json
{
"enforcedAuthType": "oauth-personal"
}
```
If a user has a different authentication method configured, they will be
prompted to switch to the enforced method. In non-interactive mode, the CLI will
exit with an error if the configured authentication method does not match the
enforced one.
### Restricting logins to corporate domains
For enterprises using Google Workspace, you can enforce that users only
authenticate with their corporate Google accounts. This is a network-level
control that is configured on a proxy server, not within Gemini CLI itself. It
works by intercepting authentication requests to Google and adding a special
HTTP header.
This policy prevents users from logging in with personal Gmail accounts or other
non-corporate Google accounts.
For detailed instructions, see the Google Workspace Admin Help article on
[blocking access to consumer accounts](https://support.google.com/a/answer/1668854?hl=en#zippy=%2Cstep-choose-a-web-proxy-server%2Cstep-configure-the-network-to-block-certain-accounts).
The general steps are as follows:
1. **Intercept Requests**: Configure your web proxy to intercept all requests
to `google.com`.
2. **Add HTTP Header**: For each intercepted request, add the
`X-GoogApps-Allowed-Domains` HTTP header.
3. **Specify Domains**: The value of the header should be a comma-separated
list of your approved Google Workspace domain names.
**Example header:**
```
X-GoogApps-Allowed-Domains: my-corporate-domain.com, secondary-domain.com
```
When this header is present, Google's authentication service will only allow
logins from accounts belonging to the specified domains.
## Putting it all together: example system `settings.json`
Here is an example of a system `settings.json` file that combines several of the
patterns discussed above to create a secure, controlled environment for Gemini
CLI.
```json
{
"tools": {
"sandbox": "docker",
"core": [
"ReadFileTool",
"GlobTool",
"ShellTool(ls)",
"ShellTool(cat)",
"ShellTool(grep)"
]
},
"mcp": {
"allowed": ["corp-tools"]
},
"mcpServers": {
"corp-tools": {
"command": "/opt/gemini-tools/start.sh",
"timeout": 5000
}
},
"telemetry": {
"enabled": true,
"target": "gcp",
"otlpEndpoint": "https://telemetry-prod.example.com:4317",
"logPrompts": false
},
"advanced": {
"bugCommand": {
"urlTemplate": "https://servicedesk.example.com/new-ticket?title={title}&details={info}"
}
},
"privacy": {
"usageStatisticsEnabled": false
}
}
```
This configuration:
- Forces all tool execution into a Docker sandbox.
- Strictly uses an allowlist for a small set of safe shell commands and file
tools.
- Defines and allows a single corporate MCP server for custom tools.
- Enables telemetry for auditing, without logging prompt content.
- Redirects the `/bug` command to an internal ticketing system.
- Disables general usage statistics collection.
================================================
FILE: docs/cli/gemini-ignore.md
================================================
# Ignoring files
This document provides an overview of the Gemini Ignore (`.geminiignore`)
feature of the Gemini CLI.
The Gemini CLI includes the ability to automatically ignore files, similar to
`.gitignore` (used by Git) and `.aiexclude` (used by Gemini Code Assist). Adding
paths to your `.geminiignore` file will exclude them from tools that support
this feature, although they will still be visible to other services (such as
Git).
## How it works
When you add a path to your `.geminiignore` file, tools that respect this file
will exclude matching files and directories from their operations. For example,
when you use the `@` command to share files, any paths in your `.geminiignore`
file will be automatically excluded.
For the most part, `.geminiignore` follows the conventions of `.gitignore`
files:
- Blank lines and lines starting with `#` are ignored.
- Standard glob patterns are supported (such as `*`, `?`, and `[]`).
- Putting a `/` at the end will only match directories.
- Putting a `/` at the beginning anchors the path relative to the
`.geminiignore` file.
- `!` negates a pattern.
You can update your `.geminiignore` file at any time. To apply the changes, you
must restart your Gemini CLI session.
## How to use `.geminiignore`
To enable `.geminiignore`:
1. Create a file named `.geminiignore` in the root of your project directory.
To add a file or directory to `.geminiignore`:
1. Open your `.geminiignore` file.
2. Add the path or file you want to ignore, for example: `/archive/` or
`apikeys.txt`.
### `.geminiignore` examples
You can use `.geminiignore` to ignore directories and files:
```
# Exclude your /packages/ directory and all subdirectories
/packages/
# Exclude your apikeys.txt file
apikeys.txt
```
You can use wildcards in your `.geminiignore` file with `*`:
```
# Exclude all .md files
*.md
```
Finally, you can exclude files and directories from exclusion with `!`:
```
# Exclude all .md files except README.md
*.md
!README.md
```
To remove paths from your `.geminiignore` file, delete the relevant lines.
================================================
FILE: docs/cli/gemini-md.md
================================================
# Provide context with GEMINI.md files
Context files, which use the default name `GEMINI.md`, are a powerful feature
for providing instructional context to the Gemini model. You can use these files
to give project-specific instructions, define a persona, or provide coding style
guides to make the AI's responses more accurate and tailored to your needs.
Instead of repeating instructions in every prompt, you can define them once in a
context file.
## Understand the context hierarchy
The CLI uses a hierarchical system to source context. It loads various context
files from several locations, concatenates the contents of all found files, and
sends them to the model with every prompt. The CLI loads files in the following
order:
1. **Global context file:**
- **Location:** `~/.gemini/GEMINI.md` (in your user home directory).
- **Scope:** Provides default instructions for all your projects.
2. **Environment and workspace context files:**
- **Location:** The CLI searches for `GEMINI.md` files in your configured
workspace directories and their parent directories.
- **Scope:** Provides context relevant to the projects you are currently
working on.
3. **Just-in-time (JIT) context files:**
- **Location:** When a tool accesses a file or directory, the CLI
automatically scans for `GEMINI.md` files in that directory and its
ancestors up to a trusted root.
- **Scope:** Lets the model discover highly specific instructions for
particular components only when they are needed.
The CLI footer displays the number of loaded context files, which gives you a
quick visual cue of the active instructional context.
### Example `GEMINI.md` file
Here is an example of what you can include in a `GEMINI.md` file at the root of
a TypeScript project:
```markdown
# Project: My TypeScript Library
## General Instructions
- When you generate new TypeScript code, follow the existing coding style.
- Ensure all new functions and classes have JSDoc comments.
- Prefer functional programming paradigms where appropriate.
## Coding Style
- Use 2 spaces for indentation.
- Prefix interface names with `I` (for example, `IUserService`).
- Always use strict equality (`===` and `!==`).
```
## Manage context with the `/memory` command
You can interact with the loaded context files by using the `/memory` command.
- **`/memory show`**: Displays the full, concatenated content of the current
hierarchical memory. This lets you inspect the exact instructional context
being provided to the model.
- **`/memory reload`**: Forces a re-scan and reload of all `GEMINI.md` files
from all configured locations.
- **`/memory add `**: Appends your text to your global
`~/.gemini/GEMINI.md` file. This lets you add persistent memories on the fly.
## Modularize context with imports
You can break down large `GEMINI.md` files into smaller, more manageable
components by importing content from other files using the `@file.md` syntax.
This feature supports both relative and absolute paths.
**Example `GEMINI.md` with imports:**
```markdown
# Main GEMINI.md file
This is the main content.
@./components/instructions.md
More content here.
@../shared/style-guide.md
```
For more details, see the [Memory Import Processor](../reference/memport.md)
documentation.
## Customize the context file name
While `GEMINI.md` is the default filename, you can configure this in your
`settings.json` file. To specify a different name or a list of names, use the
`context.fileName` property.
**Example `settings.json`:**
```json
{
"context": {
"fileName": ["AGENTS.md", "CONTEXT.md", "GEMINI.md"]
}
}
```
## Next steps
- Learn about [Ignoring files](./gemini-ignore.md) to exclude content from the
context system.
- Explore the [Memory tool](../tools/memory.md) to save persistent memories.
- See how to use [Custom commands](./custom-commands.md) to automate common
prompts.
================================================
FILE: docs/cli/generation-settings.md
================================================
# Advanced Model Configuration
This guide details the Model Configuration system within the Gemini CLI.
Designed for researchers, AI quality engineers, and advanced users, this system
provides a rigorous framework for managing generative model hyperparameters and
behaviors.
> **Warning**: This is a power-user feature. Configuration values are passed
> directly to the model provider with minimal validation. Incorrect settings
> (e.g., incompatible parameter combinations) may result in runtime errors from
> the API.
## 1. System Overview
The Model Configuration system (`ModelConfigService`) enables deterministic
control over model generation. It decouples the requested model identifier
(e.g., a CLI flag or agent request) from the underlying API configuration. This
allows for:
- **Precise Hyperparameter Tuning**: Direct control over `temperature`, `topP`,
`thinkingBudget`, and other SDK-level parameters.
- **Environment-Specific Behavior**: Distinct configurations for different
operating contexts (e.g., testing vs. production).
- **Agent-Scoped Customization**: Applying specific settings only when a
particular agent is active.
The system operates on two core primitives: **Aliases** and **Overrides**.
## 2. Configuration Primitives
These settings are located under the `modelConfigs` key in your configuration
file.
### Aliases (`customAliases`)
Aliases are named, reusable configuration presets. Users should define their own
aliases (or override system defaults) in the `customAliases` map.
- **Inheritance**: An alias can `extends` another alias (including system
defaults like `chat-base`), inheriting its `modelConfig`. Child aliases can
overwrite or augment inherited settings.
- **Abstract Aliases**: An alias is not required to specify a concrete `model`
if it serves purely as a base for other aliases.
**Example Hierarchy**:
```json
"modelConfigs": {
"customAliases": {
"base": {
"modelConfig": {
"generateContentConfig": { "temperature": 0.0 }
}
},
"chat-base": {
"extends": "base",
"modelConfig": {
"generateContentConfig": { "temperature": 0.7 }
}
}
}
}
```
### Overrides (`overrides`)
Overrides are conditional rules that inject configuration based on the runtime
context. They are evaluated dynamically for each model request.
- **Match Criteria**: Overrides apply when the request context matches the
specified `match` properties.
- `model`: Matches the requested model name or alias.
- `overrideScope`: Matches the distinct scope of the request (typically the
agent name, e.g., `codebaseInvestigator`).
**Example Override**:
```json
"modelConfigs": {
"overrides": [
{
"match": {
"overrideScope": "codebaseInvestigator"
},
"modelConfig": {
"generateContentConfig": { "temperature": 0.1 }
}
}
]
}
```
## 3. Resolution Strategy
The `ModelConfigService` resolves the final configuration through a two-step
process:
### Step 1: Alias Resolution
The requested model string is looked up in the merged map of system `aliases`
and user `customAliases`.
1. If found, the system recursively resolves the `extends` chain.
2. Settings are merged from parent to child (child wins).
3. This results in a base `ResolvedModelConfig`.
4. If not found, the requested string is treated as the raw model name.
### Step 2: Override Application
The system evaluates the `overrides` list against the request context (`model`
and `overrideScope`).
1. **Filtering**: All matching overrides are identified.
2. **Sorting**: Matches are prioritized by **specificity** (the number of
matched keys in the `match` object).
- Specific matches (e.g., `model` + `overrideScope`) override broad matches
(e.g., `model` only).
- Tie-breaking: If specificity is equal, the order of definition in the
`overrides` array is preserved (last one wins).
3. **Merging**: The configurations from the sorted overrides are merged
sequentially onto the base configuration.
## 4. Configuration Reference
The configuration follows the `ModelConfigServiceConfig` interface.
### `ModelConfig` Object
Defines the actual parameters for the model.
| Property | Type | Description |
| :---------------------- | :------- | :----------------------------------------------------------------- |
| `model` | `string` | The identifier of the model to be called (e.g., `gemini-2.5-pro`). |
| `generateContentConfig` | `object` | The configuration object passed to the `@google/genai` SDK. |
### `GenerateContentConfig` (Common Parameters)
Directly maps to the SDK's `GenerateContentConfig`. Common parameters include:
- **`temperature`**: (`number`) Controls output randomness. Lower values (0.0)
are deterministic; higher values (>0.7) are creative.
- **`topP`**: (`number`) Nucleus sampling probability.
- **`maxOutputTokens`**: (`number`) Limit on generated response length.
- **`thinkingConfig`**: (`object`) Configuration for models with reasoning
capabilities (e.g., `thinkingBudget`, `includeThoughts`).
## 5. Practical Examples
### Defining a Deterministic Baseline
Create an alias for tasks requiring high precision, extending the standard chat
configuration but enforcing zero temperature.
```json
"modelConfigs": {
"customAliases": {
"precise-mode": {
"extends": "chat-base",
"modelConfig": {
"generateContentConfig": {
"temperature": 0.0,
"topP": 1.0
}
}
}
}
}
```
### Agent-Specific Parameter Injection
Enforce extended thinking budgets for a specific agent without altering the
global default, e.g. for the `codebaseInvestigator`.
```json
"modelConfigs": {
"overrides": [
{
"match": {
"overrideScope": "codebaseInvestigator"
},
"modelConfig": {
"generateContentConfig": {
"thinkingConfig": { "thinkingBudget": 4096 }
}
}
}
]
}
```
### Experimental Model Evaluation
Route traffic for a specific alias to a preview model for A/B testing, without
changing client code.
```json
"modelConfigs": {
"overrides": [
{
"match": {
"model": "gemini-2.5-pro"
},
"modelConfig": {
"model": "gemini-2.5-pro-experimental-001"
}
}
]
}
```
================================================
FILE: docs/cli/headless.md
================================================
# Headless mode reference
Headless mode provides a programmatic interface to Gemini CLI, returning
structured text or JSON output without an interactive terminal UI.
## Technical reference
Headless mode is triggered when the CLI is run in a non-TTY environment or when
providing a query with the `-p` (or `--prompt`) flag.
### Output formats
You can specify the output format using the `--output-format` flag.
#### JSON output
Returns a single JSON object containing the response and usage statistics.
- **Schema:**
- `response`: (string) The model's final answer.
- `stats`: (object) Token usage and API latency metrics.
- `error`: (object, optional) Error details if the request failed.
#### Streaming JSON output
Returns a stream of newline-delimited JSON (JSONL) events.
- **Event types:**
- `init`: Session metadata (session ID, model).
- `message`: User and assistant message chunks.
- `tool_use`: Tool call requests with arguments.
- `tool_result`: Output from executed tools.
- `error`: Non-fatal warnings and system errors.
- `result`: Final outcome with aggregated statistics and per-model token usage
breakdowns.
## Exit codes
The CLI returns standard exit codes to indicate the result of the headless
execution:
- `0`: Success.
- `1`: General error or API failure.
- `42`: Input error (invalid prompt or arguments).
- `53`: Turn limit exceeded.
## Next steps
- Follow the [Automation tutorial](./tutorials/automation.md) for practical
scripting examples.
- See the [CLI reference](./cli-reference.md) for all available flags.
================================================
FILE: docs/cli/model-routing.md
================================================
# Model routing
Gemini CLI includes a model routing feature that automatically switches to a
fallback model in case of a model failure. This feature is enabled by default
and provides resilience when the primary model is unavailable.
## How it works
Model routing is managed by the `ModelAvailabilityService`, which monitors model
health and automatically routes requests to available models based on defined
policies.
1. **Model failure:** If the currently selected model fails (e.g., due to quota
or server errors), the CLI will initiate the fallback process.
2. **User consent:** Depending on the failure and the model's policy, the CLI
may prompt you to switch to a fallback model (by default always prompts
you).
Some internal utility calls (such as prompt completion and classification)
use a silent fallback chain for `gemini-2.5-flash-lite` and will fall back
to `gemini-2.5-flash` and `gemini-2.5-pro` without prompting or changing the
configured model.
3. **Model switch:** If approved, or if the policy allows for silent fallback,
the CLI will use an available fallback model for the current turn or the
remainder of the session.
### Local Model Routing (Experimental)
Gemini CLI supports using a local model for routing decisions. When configured,
Gemini CLI will use a locally-running **Gemma** model to make routing decisions
(instead of sending routing decisions to a hosted model). This feature can help
reduce costs associated with hosted model usage while offering similar routing
decision latency and quality.
In order to use this feature, the local Gemma model **must** be served behind a
Gemini API and accessible via HTTP at an endpoint configured in `settings.json`.
For more details on how to configure local model routing, see
[Local Model Routing](../core/local-model-routing.md).
### Model selection precedence
The model used by Gemini CLI is determined by the following order of precedence:
1. **`--model` command-line flag:** A model specified with the `--model` flag
when launching the CLI will always be used.
2. **`GEMINI_MODEL` environment variable:** If the `--model` flag is not used,
the CLI will use the model specified in the `GEMINI_MODEL` environment
variable.
3. **`model.name` in `settings.json`:** If neither of the above are set, the
model specified in the `model.name` property of your `settings.json` file
will be used.
4. **Local model (experimental):** If the Gemma local model router is enabled
in your `settings.json` file, the CLI will use the local Gemma model
(instead of Gemini models) to route the request to an appropriate model.
5. **Default model:** If none of the above are set, the default model will be
used. The default model is `auto`
================================================
FILE: docs/cli/model-steering.md
================================================
# Model steering (experimental)
Model steering lets you provide real-time guidance and feedback to Gemini CLI
while it is actively executing a task. This lets you correct course, add missing
context, or skip unnecessary steps without having to stop and restart the agent.
> [!NOTE]
> This is an experimental feature currently under active development and
> may need to be enabled under `/settings`.
Model steering is particularly useful during complex [Plan Mode](./plan-mode.md)
workflows or long-running subagent executions where you want to ensure the agent
stays on the right track.
## Enabling model steering
Model steering is an experimental feature and is disabled by default. You can
enable it using the `/settings` command or by updating your `settings.json`
file.
1. Type `/settings` in the Gemini CLI.
2. Search for **Model Steering**.
3. Set the value to **true**.
Alternatively, add the following to your `settings.json`:
```json
{
"experimental": {
"modelSteering": true
}
}
```
## Using model steering
When model steering is enabled, Gemini CLI treats any text you type while the
agent is working as a steering hint.
1. Start a task (for example, "Refactor the database service").
2. While the agent is working (the spinner is visible), type your feedback in
the input box.
3. Press **Enter**.
Gemini CLI acknowledges your hint with a brief message and injects it directly
into the model's context for the very next turn. The model then re-evaluates its
current plan and adjusts its actions accordingly.
### Common use cases
You can use steering hints to guide the model in several ways:
- **Correcting a path:** "Actually, the utilities are in `src/common/utils`."
- **Skipping a step:** "Skip the unit tests for now and just focus on the
implementation."
- **Adding context:** "The `User` type is defined in `packages/core/types.ts`."
- **Redirecting the effort:** "Stop searching the codebase and start drafting
the plan now."
- **Handling ambiguity:** "Use the existing `Logger` class instead of creating a
new one."
## How it works
When you submit a steering hint, Gemini CLI performs the following actions:
1. **Immediate acknowledgment:** It uses a small, fast model to generate a
one-sentence acknowledgment so you know your hint was received.
2. **Context injection:** It prepends an internal instruction to your hint that
tells the main agent to:
- Re-evaluate the active plan.
- Classify the update (for example, as a new task or extra context).
- Apply minimal-diff changes to affected tasks.
3. **Real-time update:** The hint is delivered to the agent at the beginning of
its next turn, ensuring the most immediate course correction possible.
## Next steps
- Tackle complex tasks with [Plan Mode](./plan-mode.md).
- Build custom [Agent Skills](./skills.md).
================================================
FILE: docs/cli/model.md
================================================
# Gemini CLI model selection (`/model` command)
Select your Gemini CLI model. The `/model` command lets you configure the model
used by Gemini CLI, giving you more control over your results. Use **Pro**
models for complex tasks and reasoning, **Flash** models for high speed results,
or the (recommended) **Auto** setting to choose the best model for your tasks.
> [!NOTE]
> The `/model` command (and the `--model` flag) does not override the
> model used by sub-agents. Consequently, even when using the `/model` flag you
> may see other models used in your model usage reports.
## How to use the `/model` command
Use the following command in Gemini CLI:
```
/model
```
Running this command will open a dialog with your options:
| Option | Description | Models |
| ----------------- | -------------------------------------------------------------- | -------------------------------------------- |
| Auto (Gemini 3) | Let the system choose the best Gemini 3 model for your task. | gemini-3-pro-preview, gemini-3-flash-preview |
| Auto (Gemini 2.5) | Let the system choose the best Gemini 2.5 model for your task. | gemini-2.5-pro, gemini-2.5-flash |
| Manual | Select a specific model. | Any available model. |
We recommend selecting one of the above **Auto** options. However, you can
select **Manual** to select a specific model from those available.
You can also use the `--model` flag to specify a particular Gemini model on
startup. For more details, refer to the
[configuration documentation](../reference/configuration.md).
Changes to these settings will be applied to all subsequent interactions with
Gemini CLI.
## Best practices for model selection
- **Default to Auto.** For most users, the _Auto_ option model provides a
balance between speed and performance, automatically selecting the correct
model based on the complexity of the task. Example: Developing a web
application could include a mix of complex tasks (building architecture and
scaffolding the project) and simple tasks (generating CSS).
- **Switch to Pro if you aren't getting the results you want.** If you think you
need your model to be a little "smarter," you can manually select Pro. Pro
will provide you with the highest levels of reasoning and creativity. Example:
A complex or multi-stage debugging task.
- **Switch to Flash or Flash-Lite if you need faster results.** If you need a
simple response quickly, Flash or Flash-Lite is the best option. Example:
Converting a JSON object to a YAML string.
================================================
FILE: docs/cli/notifications.md
================================================
# Notifications (experimental)
Gemini CLI can send system notifications to alert you when a session completes
or when it needs your attention, such as when it's waiting for you to approve a
tool call.
> [!NOTE]
> This is an experimental feature currently under active development and
> may need to be enabled under `/settings`.
Notifications are particularly useful when running long-running tasks or using
[Plan Mode](./plan-mode.md), letting you switch to other windows while Gemini
CLI works in the background.
## Requirements
Currently, system notifications are only supported on macOS.
### Terminal support
The CLI uses the OSC 9 terminal escape sequence to trigger system notifications.
This is supported by several modern terminal emulators. If your terminal does
not support OSC 9 notifications, Gemini CLI falls back to a system alert sound
to get your attention.
## Enable notifications
Notifications are disabled by default. You can enable them using the `/settings`
command or by updating your `settings.json` file.
1. Open the settings dialog by typing `/settings` in an interactive session.
2. Navigate to the **General** category.
3. Toggle the **Enable Notifications** setting to **On**.
Alternatively, add the following to your `settings.json`:
```json
{
"general": {
"enableNotifications": true
}
}
```
## Types of notifications
Gemini CLI sends notifications for the following events:
- **Action required:** Triggered when the model is waiting for user input or
tool approval. This helps you know when the CLI has paused and needs you to
intervene.
- **Session complete:** Triggered when a session finishes successfully. This is
useful for tracking the completion of automated tasks.
## Next steps
- Start planning with [Plan Mode](./plan-mode.md).
- Configure your experience with other [settings](./settings.md).
================================================
FILE: docs/cli/plan-mode.md
================================================
# Plan Mode
Plan Mode is a read-only environment for architecting robust solutions before
implementation. With Plan Mode, you can:
- **Research:** Explore the project in a read-only state to prevent accidental
changes.
- **Design:** Understand problems, evaluate trade-offs, and choose a solution.
- **Plan:** Align on an execution strategy before any code is modified.
Plan Mode is enabled by default. You can manage this setting using the
`/settings` command.
## How to enter Plan Mode
Plan Mode integrates seamlessly into your workflow, letting you switch between
planning and execution as needed.
You can either configure Gemini CLI to start in Plan Mode by default or enter
Plan Mode manually during a session.
### Launch in Plan Mode
To start Gemini CLI directly in Plan Mode by default:
1. Use the `/settings` command.
2. Set **Default Approval Mode** to `Plan`.
To launch Gemini CLI in Plan Mode once:
1. Use `gemini --approval-mode=plan` when launching Gemini CLI.
### Enter Plan Mode manually
To start Plan Mode while using Gemini CLI:
- **Keyboard shortcut:** Press `Shift+Tab` to cycle through approval modes
(`Default` -> `Auto-Edit` -> `Plan`). Plan Mode is automatically removed from
the rotation when Gemini CLI is actively processing or showing confirmation
dialogs.
- **Command:** Type `/plan` in the input box.
- **Natural Language:** Ask Gemini CLI to "start a plan for...". Gemini CLI
calls the
[`enter_plan_mode`](../tools/planning.md#1-enter_plan_mode-enterplanmode) tool
to switch modes. This tool is not available when Gemini CLI is in
[YOLO mode](../reference/configuration.md#command-line-arguments).
## How to use Plan Mode
Plan Mode lets you collaborate with Gemini CLI to design a solution before
Gemini CLI takes action.
1. **Provide a goal:** Start by describing what you want to achieve. Gemini CLI
will then enter Plan Mode (if it's not already) to research the task.
2. **Review research and provide input:** As Gemini CLI analyzes your codebase,
it may ask you questions or present different implementation options using
[`ask_user`](../tools/ask-user.md). Provide your preferences to help guide
the design.
3. **Review the plan:** Once Gemini CLI has a proposed strategy, it creates a
detailed implementation plan as a Markdown file in your plans directory.
- **View:** You can open and read this file to understand the proposed
changes.
- **Edit:** Press `Ctrl+X` to open the plan directly in your configured
external editor.
4. **Approve or iterate:** Gemini CLI will present the finalized plan for your
approval.
- **Approve:** If you're satisfied with the plan, approve it to start the
implementation immediately: **Yes, automatically accept edits** or **Yes,
manually accept edits**.
- **Iterate:** If the plan needs adjustments, provide feedback in the input
box or [edit the plan file directly](#collaborative-plan-editing). Gemini
CLI will refine the strategy and update the plan.
- **Cancel:** You can cancel your plan with `Esc`.
For more complex or specialized planning tasks, you can
[customize the planning workflow with skills](#custom-planning-with-skills).
### Collaborative plan editing
You can collaborate with Gemini CLI by making direct changes or leaving comments
in the implementation plan. This is often faster and more precise than
describing complex changes in natural language.
1. **Open the plan:** Press `Ctrl+X` when Gemini CLI presents a plan for
review.
2. **Edit or comment:** The plan opens in your configured external editor (for
example, VS Code or Vim). You can:
- **Modify steps:** Directly reorder, delete, or rewrite implementation
steps.
- **Leave comments:** Add inline questions or feedback (for example, "Wait,
shouldn't we use the existing `Logger` class here?").
3. **Save and close:** Save your changes and close the editor.
4. **Review and refine:** Gemini CLI automatically detects the changes, reviews
your comments, and adjusts the implementation strategy. It then presents the
refined plan for your final approval.
## How to exit Plan Mode
You can exit Plan Mode at any time, whether you have finalized a plan or want to
switch back to another mode.
- **Approve a plan:** When Gemini CLI presents a finalized plan, approving it
automatically exits Plan Mode and starts the implementation.
- **Keyboard shortcut:** Press `Shift+Tab` to cycle to the desired mode.
- **Natural language:** Ask Gemini CLI to "exit plan mode" or "stop planning."
## Tool Restrictions
Plan Mode enforces strict safety policies to prevent accidental changes.
These are the only allowed tools:
- **FileSystem (Read):**
[`read_file`](../tools/file-system.md#2-read_file-readfile),
[`list_directory`](../tools/file-system.md#1-list_directory-readfolder),
[`glob`](../tools/file-system.md#4-glob-findfiles)
- **Search:** [`grep_search`](../tools/file-system.md#5-grep_search-searchtext),
[`google_web_search`](../tools/web-search.md),
[`get_internal_docs`](../tools/internal-docs.md)
- **Research Subagents:**
[`codebase_investigator`](../core/subagents.md#codebase-investigator),
[`cli_help`](../core/subagents.md#cli-help-agent)
- **Interaction:** [`ask_user`](../tools/ask-user.md)
- **MCP tools (Read):** Read-only [MCP tools](../tools/mcp-server.md) (for
example, `github_read_issue`, `postgres_read_schema`) are allowed.
- **Planning (Write):**
[`write_file`](../tools/file-system.md#3-write_file-writefile) and
[`replace`](../tools/file-system.md#6-replace-edit) only allowed for `.md`
files in the `~/.gemini/tmp///plans/` directory or your
[custom plans directory](#custom-plan-directory-and-policies).
- **Memory:** [`save_memory`](../tools/memory.md)
- **Skills:** [`activate_skill`](../cli/skills.md) (allows loading specialized
instructions and resources in a read-only manner)
## Customization and best practices
Plan Mode is secure by default, but you can adapt it to fit your specific
workflows. You can customize how Gemini CLI plans by using skills, adjusting
safety policies, changing where plans are stored, or adding hooks.
### Custom planning with skills
You can use [Agent Skills](../cli/skills.md) to customize how Gemini CLI
approaches planning for specific types of tasks. When a skill is activated
during Plan Mode, its specialized instructions and procedural workflows will
guide the research, design, and planning phases.
For example:
- A **"Database Migration"** skill could ensure the plan includes data safety
checks and rollback strategies.
- A **"Security Audit"** skill could prompt Gemini CLI to look for specific
vulnerabilities during codebase exploration.
- A **"Frontend Design"** skill could guide Gemini CLI to use specific UI
components and accessibility standards in its proposal.
To use a skill in Plan Mode, you can explicitly ask Gemini CLI to "use the
`` skill to plan..." or Gemini CLI may autonomously activate it
based on the task description.
### Custom policies
Plan Mode's default tool restrictions are managed by the
[policy engine](../reference/policy-engine.md) and defined in the built-in
[`plan.toml`] file. The built-in policy (Tier 1) enforces the read-only state,
but you can customize these rules by creating your own policies in your
`~/.gemini/policies/` directory (Tier 2).
#### Global vs. mode-specific rules
As described in the
[policy engine documentation](../reference/policy-engine.md#approval-modes), any
rule that does not explicitly specify `modes` is considered "always active" and
will apply to Plan Mode as well.
If you want a rule to apply to other modes but _not_ to Plan Mode, you must
explicitly specify the target modes. For example, to allow `npm test` in default
and Auto-Edit modes but not in Plan Mode:
```toml
[[rule]]
toolName = "run_shell_command"
commandPrefix = "npm test"
decision = "allow"
priority = 100
# By omitting "plan", this rule will not be active in Plan Mode.
modes = ["default", "autoEdit"]
```
#### Example: Automatically approve read-only MCP tools
By default, read-only MCP tools require user confirmation in Plan Mode. You can
use `toolAnnotations` and the `mcpName` wildcard to customize this behavior for
your specific environment.
`~/.gemini/policies/mcp-read-only.toml`
```toml
[[rule]]
mcpName = "*"
toolAnnotations = { readOnlyHint = true }
decision = "allow"
priority = 100
modes = ["plan"]
```
For more information on how the policy engine works, see the
[policy engine](../reference/policy-engine.md) docs.
#### Example: Allow git commands in Plan Mode
This rule lets you check the repository status and see changes while in Plan
Mode.
`~/.gemini/policies/git-research.toml`
```toml
[[rule]]
toolName = "run_shell_command"
commandPrefix = ["git status", "git diff"]
decision = "allow"
priority = 100
modes = ["plan"]
```
#### Example: Enable custom subagents in Plan Mode
Built-in research [subagents](../core/subagents.md) like
[`codebase_investigator`](../core/subagents.md#codebase-investigator) and
[`cli_help`](../core/subagents.md#cli-help-agent) are enabled by default in Plan
Mode. You can enable additional
[custom subagents](../core/subagents.md#creating-custom-subagents) by adding a
rule to your policy.
`~/.gemini/policies/research-subagents.toml`
```toml
[[rule]]
toolName = "my_custom_subagent"
decision = "allow"
priority = 100
modes = ["plan"]
```
Tell Gemini CLI it can use these tools in your prompt, for example: _"You can
check ongoing changes in git."_
### Custom plan directory and policies
By default, planning artifacts are stored in a managed temporary directory
outside your project: `~/.gemini/tmp///plans/`.
You can configure a custom directory for plans in your `settings.json`. For
example, to store plans in a `.gemini/plans` directory within your project:
```json
{
"general": {
"plan": {
"directory": ".gemini/plans"
}
}
}
```
To maintain the safety of Plan Mode, user-configured paths for the plans
directory are restricted to the project root. This ensures that custom planning
locations defined within a project's workspace cannot be used to escape and
overwrite sensitive files elsewhere. Any user-configured directory must reside
within the project boundary.
Using a custom directory requires updating your
[policy engine](../reference/policy-engine.md) configurations to allow
`write_file` and `replace` in that specific location. For example, to allow
writing to the `.gemini/plans` directory within your project, create a policy
file at `~/.gemini/policies/plan-custom-directory.toml`:
```toml
[[rule]]
toolName = ["write_file", "replace"]
decision = "allow"
priority = 100
modes = ["plan"]
# Adjust the pattern to match your custom directory.
# This example matches any .md file in a .gemini/plans directory within the project.
argsPattern = "\"file_path\":\"[^\"]+[\\\\/]+\\.gemini[\\\\/]+plans[\\\\/]+[\\w-]+\\.md\""
```
### Using hooks with Plan Mode
You can use the [hook system](../hooks/writing-hooks.md) to automate parts of
the planning workflow or enforce additional checks when Gemini CLI transitions
into or out of Plan Mode.
Hooks such as `BeforeTool` or `AfterTool` can be configured to intercept the
`enter_plan_mode` and `exit_plan_mode` tool calls.
> [!WARNING] When hooks are triggered by **tool executions**, they do **not**
> run when you manually toggle Plan Mode using the `/plan` command or the
> `Shift+Tab` keyboard shortcut. If you need hooks to execute on mode changes,
> ensure the transition is initiated by the agent (e.g., by asking "start a plan
> for...").
#### Example: Archive approved plans to GCS (`AfterTool`)
If your organizational policy requires a record of all execution plans, you can
use an `AfterTool` hook to securely copy the plan artifact to Google Cloud
Storage whenever Gemini CLI exits Plan Mode to start the implementation.
**`.gemini/hooks/archive-plan.sh`:**
```bash
#!/usr/bin/env bash
# Extract the plan path from the tool input JSON
plan_path=$(jq -r '.tool_input.plan_path // empty')
if [ -f "$plan_path" ]; then
# Generate a unique filename using a timestamp
filename="$(date +%s)_$(basename "$plan_path")"
# Upload the plan to GCS in the background so it doesn't block the CLI
gsutil cp "$plan_path" "gs://my-audit-bucket/gemini-plans/$filename" > /dev/null 2>&1 &
fi
# AfterTool hooks should generally allow the flow to continue
echo '{"decision": "allow"}'
```
To register this `AfterTool` hook, add it to your `settings.json`:
```json
{
"hooks": {
"AfterTool": [
{
"matcher": "exit_plan_mode",
"hooks": [
{
"name": "archive-plan",
"type": "command",
"command": "./.gemini/hooks/archive-plan.sh"
}
]
}
]
}
}
```
## Commands
- **`/plan copy`**: Copy the currently approved plan to your clipboard.
## Planning workflows
Plan Mode provides building blocks for structured research and design. These are
implemented as [extensions](../extensions/index.md) using core planning tools
like [`enter_plan_mode`](../tools/planning.md#1-enter_plan_mode-enterplanmode),
[`exit_plan_mode`](../tools/planning.md#2-exit_plan_mode-exitplanmode), and
[`ask_user`](../tools/ask-user.md).
### Built-in planning workflow
The built-in planner uses an adaptive workflow to analyze your project, consult
you on trade-offs via [`ask_user`](../tools/ask-user.md), and draft a plan for
your approval.
### Custom planning workflows
You can install or create specialized planners to suit your workflow.
#### Conductor
[Conductor] is designed for spec-driven development. It organizes work into
"tracks" and stores persistent artifacts in your project's `conductor/`
directory:
- **Automate transitions:** Switches to read-only mode via
[`enter_plan_mode`](../tools/planning.md#1-enter_plan_mode-enterplanmode).
- **Streamline decisions:** Uses [`ask_user`](../tools/ask-user.md) for
architectural choices.
- **Maintain project context:** Stores artifacts in the project directory using
[custom plan directory and policies](#custom-plan-directory-and-policies).
- **Handoff execution:** Transitions to implementation via
[`exit_plan_mode`](../tools/planning.md#2-exit_plan_mode-exitplanmode).
#### Build your own
Since Plan Mode is built on modular building blocks, you can develop your own
custom planning workflow as an [extensions](../extensions/index.md). By
leveraging core tools and [custom policies](#custom-policies), you can define
how Gemini CLI researches and stores plans for your specific domain.
To build a custom planning workflow, you can use:
- **Tool usage:** Use core tools like
[`enter_plan_mode`](../tools/planning.md#1-enter_plan_mode-enterplanmode),
[`ask_user`](../tools/ask-user.md), and
[`exit_plan_mode`](../tools/planning.md#2-exit_plan_mode-exitplanmode) to
manage the research and design process.
- **Customization:** Set your own storage locations and policy rules using
[custom plan directories](#custom-plan-directory-and-policies) and
[custom policies](#custom-policies).
> [!TIP]
> Use [Conductor] as a reference when building your own custom
> planning workflow.
By using Plan Mode as its execution environment, your custom methodology can
enforce read-only safety during the design phase while benefiting from
high-reasoning model routing.
## Automatic Model Routing
When using an [auto model](../reference/configuration.md#model), Gemini CLI
automatically optimizes [model routing](../cli/telemetry.md#model-routing) based
on the current phase of your task:
1. **Planning Phase:** While in Plan Mode, the CLI routes requests to a
high-reasoning **Pro** model to ensure robust architectural decisions and
high-quality plans.
2. **Implementation Phase:** Once a plan is approved and you exit Plan Mode,
the CLI detects the existence of the approved plan and automatically
switches to a high-speed **Flash** model. This provides a faster, more
responsive experience during the implementation of the plan.
This behavior is enabled by default to provide the best balance of quality and
performance. You can disable this automatic switching in your settings:
```json
{
"general": {
"plan": {
"modelRouting": false
}
}
}
```
## Cleanup
By default, Gemini CLI automatically cleans up old session data, including all
associated plan files and task trackers.
- **Default behavior:** Sessions (and their plans) are retained for **30 days**.
- **Configuration:** You can customize this behavior via the `/settings` command
(search for **Session Retention**) or in your `settings.json` file. See
[session retention](../cli/session-management.md#session-retention) for more
details.
Manual deletion also removes all associated artifacts:
- **Command Line:** Use `gemini --delete-session `.
- **Session Browser:** Press `/resume`, navigate to a session, and press `x`.
If you use a [custom plans directory](#custom-plan-directory-and-policies),
those files are not automatically deleted and must be managed manually.
## Non-interactive execution
When running Gemini CLI in non-interactive environments (such as headless
scripts or CI/CD pipelines), Plan Mode optimizes for automated workflows:
- **Automatic transitions:** The policy engine automatically approves the
`enter_plan_mode` and `exit_plan_mode` tools without prompting for user
confirmation.
- **Automated implementation:** When exiting Plan Mode to execute the plan,
Gemini CLI automatically switches to
[YOLO mode](../reference/policy-engine.md#approval-modes) instead of the
standard Default mode. This allows the CLI to execute the implementation steps
automatically without hanging on interactive tool approvals.
**Example:**
```bash
gemini --approval-mode plan -p "Analyze telemetry and suggest improvements"
```
[`plan.toml`]:
https://github.com/google-gemini/gemini-cli/blob/main/packages/core/src/policy/policies/plan.toml
[Conductor]: https://github.com/gemini-cli-extensions/conductor
[open an issue]: https://github.com/google-gemini/gemini-cli/issues
================================================
FILE: docs/cli/rewind.md
================================================
# Rewind
The `/rewind` command lets you go back to a previous state in your conversation
and, optionally, revert any file changes made by the AI during those
interactions. This is a powerful tool for undoing mistakes, exploring different
approaches, or simply cleaning up your session history.
## Usage
To use the rewind feature, simply type `/rewind` into the input prompt and press
**Enter**.
Alternatively, you can use the keyboard shortcut: **Press `Esc` twice**.
## Interface
When you trigger a rewind, an interactive list of your previous interactions
appears.
1. **Select interaction:** Use the **Up/Down arrow keys** to navigate through
the list. The most recent interactions are at the bottom.
2. **Preview:** As you select an interaction, you'll see a preview of the user
prompt and, if applicable, the number of files changed during that step.
3. **Confirm selection:** Press **Enter** on the interaction you want to rewind
back to.
4. **Action selection:** After selecting an interaction, you'll be presented
with a confirmation dialog with up to three options:
- **Rewind conversation and revert code changes:** Reverts both the chat
history and the file modifications to the state before the selected
interaction.
- **Rewind conversation:** Only reverts the chat history. File changes are
kept.
- **Revert code changes:** Only reverts the file modifications. The chat
history is kept.
- **Do nothing (esc):** Cancels the rewind operation.
If no code changes were made since the selected point, the options related to
reverting code changes will be hidden.
## Key considerations
- **Destructive action:** Rewinding is a destructive action for your current
session history and potentially your files. Use it with care.
- **Agent awareness:** When you rewind the conversation, the AI model loses all
memory of the interactions that were removed. If you only revert code changes,
you may need to inform the model that the files have changed.
- **Manual edits:** Rewinding only affects file changes made by the AI's edit
tools. It does **not** undo manual edits you've made or changes triggered by
the shell tool (`!`).
- **Compression:** Rewind works across chat compression points by reconstructing
the history from stored session data.
================================================
FILE: docs/cli/sandbox.md
================================================
# Sandboxing in the Gemini CLI
This document provides a guide to sandboxing in the Gemini CLI, including
prerequisites, quickstart, and configuration.
## Prerequisites
Before using sandboxing, you need to install and set up the Gemini CLI:
```bash
npm install -g @google/gemini-cli
```
To verify the installation:
```bash
gemini --version
```
## Overview of sandboxing
Sandboxing isolates potentially dangerous operations (such as shell commands or
file modifications) from your host system, providing a security barrier between
AI operations and your environment.
The benefits of sandboxing include:
- **Security**: Prevent accidental system damage or data loss.
- **Isolation**: Limit file system access to project directory.
- **Consistency**: Ensure reproducible environments across different systems.
- **Safety**: Reduce risk when working with untrusted code or experimental
commands.
## Sandboxing methods
Your ideal method of sandboxing may differ depending on your platform and your
preferred container solution.
### 1. macOS Seatbelt (macOS only)
Lightweight, built-in sandboxing using `sandbox-exec`.
**Default profile**: `permissive-open` - restricts writes outside project
directory but allows most other operations.
### 2. Container-based (Docker/Podman)
Cross-platform sandboxing with complete process isolation.
**Note**: Requires building the sandbox image locally or using a published image
from your organization's registry.
### 3. Windows Native Sandbox (Windows only)
... **Troubleshooting and Side Effects:**
The Windows Native sandbox uses the `icacls` command to set a "Low Mandatory
Level" on files and directories it needs to write to.
- **Persistence**: These integrity level changes are persistent on the
filesystem. Even after the sandbox session ends, files created or modified by
the sandbox will retain their "Low" integrity level.
- **Manual Reset**: If you need to reset the integrity level of a file or
directory, you can use:
```powershell
icacls "C:\path\to\dir" /setintegritylevel Medium
```
- **System Folders**: The sandbox manager automatically skips setting integrity
levels on system folders (like `C:\Windows`) for safety.
### 4. gVisor / runsc (Linux only)
Strongest isolation available: runs containers inside a user-space kernel via
[gVisor](https://github.com/google/gvisor). gVisor intercepts all container
system calls and handles them in a sandboxed kernel written in Go, providing a
strong security barrier between AI operations and the host OS.
**Prerequisites:**
- Linux (gVisor supports Linux only)
- Docker installed and running
- gVisor/runsc runtime configured
When you set `sandbox: "runsc"`, Gemini CLI runs
`docker run --runtime=runsc ...` to execute containers with gVisor isolation.
runsc is not auto-detected; you must specify it explicitly (e.g.
`GEMINI_SANDBOX=runsc` or `sandbox: "runsc"`).
To set up runsc:
1. Install the runsc binary.
2. Configure the Docker daemon to use the runsc runtime.
3. Verify the installation.
### 4. LXC/LXD (Linux only, experimental)
Full-system container sandboxing using LXC/LXD. Unlike Docker/Podman, LXC
containers run a complete Linux system with `systemd`, `snapd`, and other system
services. This is ideal for tools that don't work in standard Docker containers,
such as Snapcraft and Rockcraft.
**Prerequisites**:
- Linux only.
- LXC/LXD must be installed (`snap install lxd` or `apt install lxd`).
- A container must be created and running before starting Gemini CLI. Gemini
does **not** create the container automatically.
**Quick setup**:
```bash
# Initialize LXD (first time only)
lxd init --auto
# Create and start an Ubuntu container
lxc launch ubuntu:24.04 gemini-sandbox
# Enable LXC sandboxing
export GEMINI_SANDBOX=lxc
gemini -p "build the project"
```
**Custom container name**:
```bash
export GEMINI_SANDBOX=lxc
export GEMINI_SANDBOX_IMAGE=my-snapcraft-container
gemini -p "build the snap"
```
**Limitations**:
- Linux only (LXC is not available on macOS or Windows).
- The container must already exist and be running.
- The workspace directory is bind-mounted into the container at the same
absolute path — the path must be writable inside the container.
- Used with tools like Snapcraft or Rockcraft that require a full system.
## Quickstart
```bash
# Enable sandboxing with command flag
gemini -s -p "analyze the code structure"
```
**Use environment variable**
**macOS/Linux**
```bash
export GEMINI_SANDBOX=true
gemini -p "run the test suite"
```
**Windows (PowerShell)**
```powershell
$env:GEMINI_SANDBOX="true"
gemini -p "run the test suite"
```
**Configure in settings.json**
```json
{
"tools": {
"sandbox": "docker"
}
}
```
## Configuration
### Enable sandboxing (in order of precedence)
1. **Command flag**: `-s` or `--sandbox`
2. **Environment variable**:
`GEMINI_SANDBOX=true|docker|podman|sandbox-exec|runsc|lxc`
3. **Settings file**: `"sandbox": true` in the `tools` object of your
`settings.json` file (e.g., `{"tools": {"sandbox": true}}`).
### macOS Seatbelt profiles
Built-in profiles (set via `SEATBELT_PROFILE` env var):
- `permissive-open` (default): Write restrictions, network allowed
- `permissive-proxied`: Write restrictions, network via proxy
- `restrictive-open`: Strict restrictions, network allowed
- `restrictive-proxied`: Strict restrictions, network via proxy
- `strict-open`: Read and write restrictions, network allowed
- `strict-proxied`: Read and write restrictions, network via proxy
### Custom sandbox flags
For container-based sandboxing, you can inject custom flags into the `docker` or
`podman` command using the `SANDBOX_FLAGS` environment variable. This is useful
for advanced configurations, such as disabling security features for specific
use cases.
**Example (Podman)**:
To disable SELinux labeling for volume mounts, you can set the following:
**macOS/Linux**
```bash
export SANDBOX_FLAGS="--security-opt label=disable"
```
**Windows (PowerShell)**
```powershell
$env:SANDBOX_FLAGS="--security-opt label=disable"
```
Multiple flags can be provided as a space-separated string:
**macOS/Linux**
```bash
export SANDBOX_FLAGS="--flag1 --flag2=value"
```
**Windows (PowerShell)**
```powershell
$env:SANDBOX_FLAGS="--flag1 --flag2=value"
```
## Linux UID/GID handling
The sandbox automatically handles user permissions on Linux. Override these
permissions with:
**macOS/Linux**
```bash
export SANDBOX_SET_UID_GID=true # Force host UID/GID
export SANDBOX_SET_UID_GID=false # Disable UID/GID mapping
```
**Windows (PowerShell)**
```powershell
$env:SANDBOX_SET_UID_GID="true" # Force host UID/GID
$env:SANDBOX_SET_UID_GID="false" # Disable UID/GID mapping
```
## Troubleshooting
### Common issues
**"Operation not permitted"**
- Operation requires access outside sandbox.
- Try more permissive profile or add mount points.
**Missing commands**
- Add to custom Dockerfile.
- Install via `sandbox.bashrc`.
**Network issues**
- Check sandbox profile allows network.
- Verify proxy configuration.
### Debug mode
```bash
DEBUG=1 gemini -s -p "debug command"
```
> [!NOTE]
> If you have `DEBUG=true` in a project's `.env` file, it won't affect
> gemini-cli due to automatic exclusion. Use `.gemini/.env` files for
> gemini-cli specific debug settings.
### Inspect sandbox
```bash
# Check environment
gemini -s -p "run shell command: env | grep SANDBOX"
# List mounts
gemini -s -p "run shell command: mount | grep workspace"
```
## Security notes
- Sandboxing reduces but doesn't eliminate all risks.
- Use the most restrictive profile that allows your work.
- Container overhead is minimal after first build.
- GUI applications may not work in sandboxes.
## Related documentation
- [Configuration](../reference/configuration.md): Full configuration options.
- [Commands](../reference/commands.md): Available commands.
- [Troubleshooting](../resources/troubleshooting.md): General troubleshooting.
================================================
FILE: docs/cli/session-management.md
================================================
# Session management
Session management saves your conversation history so you can resume your work
where you left off. Use these features to review past interactions, manage
history across different projects, and configure how long data is retained.
## Automatic saving
Your session history is recorded automatically as you interact with the model.
This background process ensures your work is preserved even if you interrupt a
session.
- **What is saved:** The complete conversation history, including:
- Your prompts and the model's responses.
- All tool executions (inputs and outputs).
- Token usage statistics (input, output, cached, etc.).
- Assistant thoughts and reasoning summaries (when available).
- **Location:** Sessions are stored in `~/.gemini/tmp//chats/`,
where `` is a unique identifier based on your project's root
directory.
- **Scope:** Sessions are project-specific. Switching directories to a different
project switches to that project's session history.
## Resuming sessions
You can resume a previous session to continue the conversation with all prior
context restored. Resuming is supported both through command-line flags and an
interactive browser.
### From the command line
When starting Gemini CLI, use the `--resume` (or `-r`) flag to load existing
sessions.
- **Resume latest:**
```bash
gemini --resume
```
This immediately loads the most recent session.
- **Resume by index:** List available sessions first (see
[Listing sessions](#listing-sessions)), then use the index number:
```bash
gemini --resume 1
```
- **Resume by ID:** You can also provide the full session UUID:
```bash
gemini --resume a1b2c3d4-e5f6-7890-abcd-ef1234567890
```
### From the interactive interface
While the CLI is running, use the `/resume` slash command to open the **Session
Browser**:
```text
/resume
```
When typing `/resume` (or `/chat`) in slash completion, commands are grouped
under titled separators:
- `-- auto --` (session browser)
- `list` is selectable and opens the session browser
- `-- checkpoints --` (manual tagged checkpoint commands)
Unique prefixes such as `/resum` and `/cha` resolve to the same grouped menu.
The Session Browser provides an interactive interface where you can perform the
following actions:
- **Browse:** Scroll through a list of your past sessions.
- **Preview:** See details like the session date, message count, and the first
user prompt.
- **Search:** Press `/` to enter search mode, then type to filter sessions by ID
or content.
- **Select:** Press **Enter** to resume the selected session.
- **Esc:** Press **Esc** to exit the Session Browser.
### Manual chat checkpoints
For named branch points inside a session, use chat checkpoints:
```text
/resume save decision-point
/resume list
/resume resume decision-point
```
Compatibility aliases:
- `/chat ...` works for the same commands.
- `/resume checkpoints ...` also remains supported during migration.
## Managing sessions
You can list and delete sessions to keep your history organized and manage disk
space.
### Listing sessions
To see a list of all available sessions for the current project from the command
line, use the `--list-sessions` flag:
```bash
gemini --list-sessions
```
Output example:
```text
Available sessions for this project (3):
1. Fix bug in auth (2 days ago) [a1b2c3d4]
2. Refactor database schema (5 hours ago) [e5f67890]
3. Update documentation (Just now) [abcd1234]
```
### Deleting sessions
You can remove old or unwanted sessions to free up space or declutter your
history.
**From the command line:** Use the `--delete-session` flag with an index or ID:
```bash
gemini --delete-session 2
```
**From the Session Browser:**
1. Open the browser with `/resume`.
2. Navigate to the session you want to remove.
3. Press **x**.
## Configuration
You can configure how Gemini CLI manages your session history in your
`settings.json` file. These settings let you control retention policies and
session lengths.
### Session retention
By default, Gemini CLI automatically cleans up old session data to prevent your
history from growing indefinitely. When a session is deleted, Gemini CLI also
removes all associated data, including implementation plans, task trackers, tool
outputs, and activity logs.
The default policy is to **retain sessions for 30 days**.
#### Configuration
You can customize these policies using the `/settings` command or by manually
editing your `settings.json` file:
```json
{
"general": {
"sessionRetention": {
"enabled": true,
"maxAge": "30d",
"maxCount": 50
}
}
}
```
- **`enabled`**: (boolean) Master switch for session cleanup. Defaults to
`true`.
- **`maxAge`**: (string) Duration to keep sessions (for example, "24h", "7d",
"4w"). Sessions older than this are deleted. Defaults to `"30d"`.
- **`maxCount`**: (number) Maximum number of sessions to retain. The oldest
sessions exceeding this count are deleted. Defaults to undefined (unlimited).
- **`minRetention`**: (string) Minimum retention period (safety limit). Defaults
to `"1d"`. Sessions newer than this period are never deleted by automatic
cleanup.
### Session limits
You can limit the length of individual sessions to prevent context windows from
becoming too large and expensive.
```json
{
"model": {
"maxSessionTurns": 100
}
}
```
- **`maxSessionTurns`**: (number) The maximum number of turns (user and model
exchanges) allowed in a single session. Set to `-1` for unlimited (default).
**Behavior when limit is reached:**
- **Interactive mode:** The CLI shows an informational message and stops
sending requests to the model. You must manually start a new session.
- **Non-interactive mode:** The CLI exits with an error.
## Next steps
- Explore the [Memory tool](../tools/memory.md) to save persistent information
across sessions.
- Learn how to [Checkpoint](./checkpointing.md) your session state.
- Check out the [CLI reference](./cli-reference.md) for all command-line flags.
================================================
FILE: docs/cli/settings.md
================================================
# Gemini CLI settings (`/settings` command)
Control your Gemini CLI experience with the `/settings` command. The `/settings`
command opens a dialog to view and edit all your Gemini CLI settings, including
your UI experience, keybindings, and accessibility features.
Your Gemini CLI settings are stored in a `settings.json` file. In addition to
using the `/settings` command, you can also edit them in one of the following
locations:
- **User settings**: `~/.gemini/settings.json`
- **Workspace settings**: `your-project/.gemini/settings.json`
> [!IMPORTANT]
> Workspace settings override user settings.
## Settings reference
Here is a list of all the available settings, grouped by category and ordered as
they appear in the UI.
### General
| UI Label | Setting | Description | Default |
| ----------------------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
| Vim Mode | `general.vimMode` | Enable Vim keybindings | `false` |
| Default Approval Mode | `general.defaultApprovalMode` | The default approval mode for tool execution. 'default' prompts for approval, 'auto_edit' auto-approves edit tools, and 'plan' is read-only mode. YOLO mode (auto-approve all actions) can only be enabled via command line (--yolo or --approval-mode=yolo). | `"default"` |
| Enable Auto Update | `general.enableAutoUpdate` | Enable automatic updates. | `true` |
| Enable Notifications | `general.enableNotifications` | Enable run-event notifications for action-required prompts and session completion. Currently macOS only. | `false` |
| Plan Directory | `general.plan.directory` | The directory where planning artifacts are stored. If not specified, defaults to the system temporary directory. | `undefined` |
| Plan Model Routing | `general.plan.modelRouting` | Automatically switch between Pro and Flash models based on Plan Mode status. Uses Pro for the planning phase and Flash for the implementation phase. | `true` |
| Retry Fetch Errors | `general.retryFetchErrors` | Retry on "exception TypeError: fetch failed sending request" errors. | `true` |
| Max Chat Model Attempts | `general.maxAttempts` | Maximum number of attempts for requests to the main chat model. Cannot exceed 10. | `10` |
| Debug Keystroke Logging | `general.debugKeystrokeLogging` | Enable debug logging of keystrokes to the console. | `false` |
| Enable Session Cleanup | `general.sessionRetention.enabled` | Enable automatic session cleanup | `true` |
| Keep chat history | `general.sessionRetention.maxAge` | Automatically delete chats older than this time period (e.g., "30d", "7d", "24h", "1w") | `"30d"` |
### Output
| UI Label | Setting | Description | Default |
| ------------- | --------------- | ------------------------------------------------------ | -------- |
| Output Format | `output.format` | The format of the CLI output. Can be `text` or `json`. | `"text"` |
### UI
| UI Label | Setting | Description | Default |
| ------------------------------------ | -------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- |
| Auto Theme Switching | `ui.autoThemeSwitching` | Automatically switch between default light and dark themes based on terminal background color. | `true` |
| Terminal Background Polling Interval | `ui.terminalBackgroundPollingInterval` | Interval in seconds to poll the terminal background color. | `60` |
| Hide Window Title | `ui.hideWindowTitle` | Hide the window title bar | `false` |
| Inline Thinking | `ui.inlineThinkingMode` | Display model thinking inline: off or full. | `"off"` |
| Show Thoughts in Title | `ui.showStatusInTitle` | Show Gemini CLI model thoughts in the terminal window title during the working phase | `false` |
| Dynamic Window Title | `ui.dynamicWindowTitle` | Update the terminal window title with current status icons (Ready: ◇, Action Required: ✋, Working: ✦) | `true` |
| Show Home Directory Warning | `ui.showHomeDirectoryWarning` | Show a warning when running Gemini CLI in the home directory. | `true` |
| Show Compatibility Warnings | `ui.showCompatibilityWarnings` | Show warnings about terminal or OS compatibility issues. | `true` |
| Hide Tips | `ui.hideTips` | Hide helpful tips in the UI | `false` |
| Escape Pasted @ Symbols | `ui.escapePastedAtSymbols` | When enabled, @ symbols in pasted text are escaped to prevent unintended @path expansion. | `false` |
| Show Shortcuts Hint | `ui.showShortcutsHint` | Show the "? for shortcuts" hint above the input. | `true` |
| Hide Banner | `ui.hideBanner` | Hide the application banner | `false` |
| Hide Context Summary | `ui.hideContextSummary` | Hide the context summary (GEMINI.md, MCP servers) above the input. | `false` |
| Hide CWD | `ui.footer.hideCWD` | Hide the current working directory in the footer. | `false` |
| Hide Sandbox Status | `ui.footer.hideSandboxStatus` | Hide the sandbox status indicator in the footer. | `false` |
| Hide Model Info | `ui.footer.hideModelInfo` | Hide the model name and context usage in the footer. | `false` |
| Hide Context Window Percentage | `ui.footer.hideContextPercentage` | Hides the context window usage percentage. | `true` |
| Hide Footer | `ui.hideFooter` | Hide the footer from the UI | `false` |
| Show Memory Usage | `ui.showMemoryUsage` | Display memory usage information in the UI | `false` |
| Show Line Numbers | `ui.showLineNumbers` | Show line numbers in the chat. | `true` |
| Show Citations | `ui.showCitations` | Show citations for generated text in the chat. | `false` |
| Show Model Info In Chat | `ui.showModelInfoInChat` | Show the model name in the chat for each model turn. | `false` |
| Show User Identity | `ui.showUserIdentity` | Show the signed-in user's identity (e.g. email) in the UI. | `true` |
| Use Alternate Screen Buffer | `ui.useAlternateBuffer` | Use an alternate screen buffer for the UI, preserving shell history. | `false` |
| Use Background Color | `ui.useBackgroundColor` | Whether to use background colors in the UI. | `true` |
| Incremental Rendering | `ui.incrementalRendering` | Enable incremental rendering for the UI. This option will reduce flickering but may cause rendering artifacts. Only supported when useAlternateBuffer is enabled. | `true` |
| Show Spinner | `ui.showSpinner` | Show the spinner during operations. | `true` |
| Loading Phrases | `ui.loadingPhrases` | What to show while the model is working: tips, witty comments, both, or nothing. | `"tips"` |
| Error Verbosity | `ui.errorVerbosity` | Controls whether recoverable errors are hidden (low) or fully shown (full). | `"low"` |
| Screen Reader Mode | `ui.accessibility.screenReader` | Render output in plain-text to be more screen reader accessible | `false` |
### IDE
| UI Label | Setting | Description | Default |
| -------- | ------------- | ---------------------------- | ------- |
| IDE Mode | `ide.enabled` | Enable IDE integration mode. | `false` |
### Billing
| UI Label | Setting | Description | Default |
| ---------------- | ------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| Overage Strategy | `billing.overageStrategy` | How to handle quota exhaustion when AI credits are available. 'ask' prompts each time, 'always' automatically uses credits, 'never' disables credit usage. | `"ask"` |
### Model
| UI Label | Setting | Description | Default |
| ----------------------------- | ---------------------------- | -------------------------------------------------------------------------------------- | ----------- |
| Model | `model.name` | The Gemini model to use for conversations. | `undefined` |
| Max Session Turns | `model.maxSessionTurns` | Maximum number of user/model/tool turns to keep in a session. -1 means unlimited. | `-1` |
| Context Compression Threshold | `model.compressionThreshold` | The fraction of context usage at which to trigger context compression (e.g. 0.2, 0.3). | `0.5` |
| Disable Loop Detection | `model.disableLoopDetection` | Disable automatic detection and prevention of infinite loops. | `false` |
| Skip Next Speaker Check | `model.skipNextSpeakerCheck` | Skip the next speaker check. | `true` |
### Context
| UI Label | Setting | Description | Default |
| ------------------------------------ | ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| Memory Discovery Max Dirs | `context.discoveryMaxDirs` | Maximum number of directories to search for memory. | `200` |
| Load Memory From Include Directories | `context.loadMemoryFromIncludeDirectories` | Controls how /memory reload loads GEMINI.md files. When true, include directories are scanned; when false, only the current directory is used. | `false` |
| Respect .gitignore | `context.fileFiltering.respectGitIgnore` | Respect .gitignore files when searching. | `true` |
| Respect .geminiignore | `context.fileFiltering.respectGeminiIgnore` | Respect .geminiignore files when searching. | `true` |
| Enable Recursive File Search | `context.fileFiltering.enableRecursiveFileSearch` | Enable recursive file search functionality when completing @ references in the prompt. | `true` |
| Enable Fuzzy Search | `context.fileFiltering.enableFuzzySearch` | Enable fuzzy search when searching for files. | `true` |
| Custom Ignore File Paths | `context.fileFiltering.customIgnoreFilePaths` | Additional ignore file paths to respect. These files take precedence over .geminiignore and .gitignore. Files earlier in the array take precedence over files later in the array, e.g. the first file takes precedence over the second one. | `[]` |
### Tools
| UI Label | Setting | Description | Default |
| -------------------------------- | ------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| Sandbox Allowed Paths | `tools.sandboxAllowedPaths` | List of additional paths that the sandbox is allowed to access. | `[]` |
| Sandbox Network Access | `tools.sandboxNetworkAccess` | Whether the sandbox is allowed to access the network. | `false` |
| Enable Interactive Shell | `tools.shell.enableInteractiveShell` | Use node-pty for an interactive shell experience. Fallback to child_process still applies. | `true` |
| Show Color | `tools.shell.showColor` | Show color in shell output. | `false` |
| Use Ripgrep | `tools.useRipgrep` | Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance. | `true` |
| Tool Output Truncation Threshold | `tools.truncateToolOutputThreshold` | Maximum characters to show when truncating large tool outputs. Set to 0 or negative to disable truncation. | `40000` |
| Disable LLM Correction | `tools.disableLLMCorrection` | Disable LLM-based error correction for edit tools. When enabled, tools will fail immediately if exact string matches are not found, instead of attempting to self-correct. | `true` |
### Security
| UI Label | Setting | Description | Default |
| ------------------------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- |
| Tool Sandboxing | `security.toolSandboxing` | Experimental tool-level sandboxing (implementation in progress). | `false` |
| Disable YOLO Mode | `security.disableYoloMode` | Disable YOLO mode, even if enabled by a flag. | `false` |
| Disable Always Allow | `security.disableAlwaysAllow` | Disable "Always allow" options in tool confirmation dialogs. | `false` |
| Allow Permanent Tool Approval | `security.enablePermanentToolApproval` | Enable the "Allow for all future sessions" option in tool confirmation dialogs. | `false` |
| Auto-add to Policy by Default | `security.autoAddToPolicyByDefault` | When enabled, the "Allow for all future sessions" option becomes the default choice for low-risk tools in trusted workspaces. | `false` |
| Blocks extensions from Git | `security.blockGitExtensions` | Blocks installing and loading extensions from Git. | `false` |
| Extension Source Regex Allowlist | `security.allowedExtensions` | List of Regex patterns for allowed extensions. If nonempty, only extensions that match the patterns in this list are allowed. Overrides the blockGitExtensions setting. | `[]` |
| Folder Trust | `security.folderTrust.enabled` | Setting to track whether Folder trust is enabled. | `true` |
| Enable Environment Variable Redaction | `security.environmentVariableRedaction.enabled` | Enable redaction of environment variables that may contain secrets. | `false` |
| Enable Context-Aware Security | `security.enableConseca` | Enable the context-aware security checker. This feature uses an LLM to dynamically generate and enforce security policies for tool use based on your prompt, providing an additional layer of protection against unintended actions. | `false` |
### Advanced
| UI Label | Setting | Description | Default |
| --------------------------------- | ------------------------------ | --------------------------------------------- | ------- |
| Auto Configure Max Old Space Size | `advanced.autoConfigureMemory` | Automatically configure Node.js memory limits | `false` |
### Experimental
| UI Label | Setting | Description | Default |
| -------------------------- | ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| Enable Tool Output Masking | `experimental.toolOutputMasking.enabled` | Enables tool output masking to save tokens. | `true` |
| Use OSC 52 Paste | `experimental.useOSC52Paste` | Use OSC 52 for pasting. This may be more robust than the default system when using remote terminal sessions (if your terminal is configured to allow it). | `false` |
| Use OSC 52 Copy | `experimental.useOSC52Copy` | Use OSC 52 for copying. This may be more robust than the default system when using remote terminal sessions (if your terminal is configured to allow it). | `false` |
| Plan | `experimental.plan` | Enable Plan Mode. | `true` |
| Model Steering | `experimental.modelSteering` | Enable model steering (user hints) to guide the model during tool execution. | `false` |
| Direct Web Fetch | `experimental.directWebFetch` | Enable web fetch behavior that bypasses LLM summarization. | `false` |
| Memory Manager Agent | `experimental.memoryManager` | Replace the built-in save_memory tool with a memory manager subagent that supports adding, removing, de-duplicating, and organizing memories. | `false` |
| Topic & Update Narration | `experimental.topicUpdateNarration` | Enable the experimental Topic & Update communication model for reduced chattiness and structured progress reporting. | `false` |
### Skills
| UI Label | Setting | Description | Default |
| ------------------- | ---------------- | -------------------- | ------- |
| Enable Agent Skills | `skills.enabled` | Enable Agent Skills. | `true` |
### HooksConfig
| UI Label | Setting | Description | Default |
| ------------------ | --------------------------- | -------------------------------------------------------------------------------- | ------- |
| Enable Hooks | `hooksConfig.enabled` | Canonical toggle for the hooks system. When disabled, no hooks will be executed. | `true` |
| Hook Notifications | `hooksConfig.notifications` | Show visual indicators when hooks are executing. | `true` |
================================================
FILE: docs/cli/skills.md
================================================
# Agent Skills
Agent Skills allow you to extend Gemini CLI with specialized expertise,
procedural workflows, and task-specific resources. Based on the
[Agent Skills](https://agentskills.io) open standard, a "skill" is a
self-contained directory that packages instructions and assets into a
discoverable capability.
## Overview
Unlike general context files ([`GEMINI.md`](./gemini-md.md)), which provide
persistent workspace-wide background, Skills represent **on-demand expertise**.
This allows Gemini to maintain a vast library of specialized capabilities—such
as security auditing, cloud deployments, or codebase migrations—without
cluttering the model's immediate context window.
Gemini autonomously decides when to employ a skill based on your request and the
skill's description. When a relevant skill is identified, the model "pulls in"
the full instructions and resources required to complete the task using the
`activate_skill` tool.
## Key Benefits
- **Shared Expertise:** Package complex workflows (like a specific team's PR
review process) into a folder that anyone can use.
- **Repeatable Workflows:** Ensure complex multi-step tasks are performed
consistently by providing a procedural framework.
- **Resource Bundling:** Include scripts, templates, or example data alongside
instructions so the agent has everything it needs.
- **Progressive Disclosure:** Only skill metadata (name and description) is
loaded initially. Detailed instructions and resources are only disclosed when
the model explicitly activates the skill, saving context tokens.
## Skill Discovery Tiers
Gemini CLI discovers skills from three primary locations:
1. **Workspace Skills**: Located in `.gemini/skills/` or the `.agents/skills/`
alias. Workspace skills are typically committed to version control and
shared with the team.
2. **User Skills**: Located in `~/.gemini/skills/` or the `~/.agents/skills/`
alias. These are personal skills available across all your workspaces.
3. **Extension Skills**: Skills bundled within installed
[extensions](../extensions/index.md).
**Precedence:** If multiple skills share the same name, higher-precedence
locations override lower ones: **Workspace > User > Extension**.
Within the same tier (user or workspace), the `.agents/skills/` alias takes
precedence over the `.gemini/skills/` directory. This generic alias provides an
intuitive path for managing agent-specific expertise that remains compatible
across different AI agent tools.
## Managing Skills
### In an Interactive Session
Use the `/skills` slash command to view and manage available expertise:
- `/skills list` (default): Shows all discovered skills and their status.
- `/skills link `: Links agent skills from a local directory via symlink.
- `/skills disable `: Prevents a specific skill from being used.
- `/skills enable `: Re-enables a disabled skill.
- `/skills reload`: Refreshes the list of discovered skills from all tiers.
> [!NOTE]
> `/skills disable` and `/skills enable` default to the `user` scope. Use
> `--scope workspace` to manage workspace-specific settings.
### From the Terminal
The `gemini skills` command provides management utilities:
```bash
# List all discovered skills
gemini skills list
# Link agent skills from a local directory via symlink
# Discovers skills (SKILL.md or */SKILL.md) and creates symlinks in ~/.gemini/skills
# (or ~/.agents/skills)
gemini skills link /path/to/my-skills-repo
# Link to the workspace scope (.gemini/skills or .agents/skills)
gemini skills link /path/to/my-skills-repo --scope workspace
# Install a skill from a Git repository, local directory, or zipped skill file (.skill)
# Uses the user scope by default (~/.gemini/skills or ~/.agents/skills)
gemini skills install https://github.com/user/repo.git
gemini skills install /path/to/local/skill
gemini skills install /path/to/local/my-expertise.skill
# Install a specific skill from a monorepo or subdirectory using --path
gemini skills install https://github.com/my-org/my-skills.git --path skills/frontend-design
# Install to the workspace scope (.gemini/skills or .agents/skills)
gemini skills install /path/to/skill --scope workspace
# Uninstall a skill by name
gemini skills uninstall my-expertise --scope workspace
# Enable a skill (globally)
gemini skills enable my-expertise
# Disable a skill. Can use --scope to specify workspace or user (defaults to workspace)
gemini skills disable my-expertise --scope workspace
```
## How it Works
1. **Discovery**: At the start of a session, Gemini CLI scans the discovery
tiers and injects the name and description of all enabled skills into the
system prompt.
2. **Activation**: When Gemini identifies a task matching a skill's
description, it calls the `activate_skill` tool.
3. **Consent**: You will see a confirmation prompt in the UI detailing the
skill's name, purpose, and the directory path it will gain access to.
4. **Injection**: Upon your approval:
- The `SKILL.md` body and folder structure is added to the conversation
history.
- The skill's directory is added to the agent's allowed file paths, granting
it permission to read any bundled assets.
5. **Execution**: The model proceeds with the specialized expertise active. It
is instructed to prioritize the skill's procedural guidance within reason.
### Skill activation
Once a skill is activated (typically by Gemini identifying a task that matches
the skill's description and your approval), its specialized instructions and
resources are loaded into the agent's context. A skill remains active and its
guidance is prioritized for the duration of the session.
## Creating your own skills
To create your own skills, see the [Create Agent Skills](./creating-skills.md)
guide.
================================================
FILE: docs/cli/system-prompt.md
================================================
# System Prompt Override (GEMINI_SYSTEM_MD)
The core system instructions that guide Gemini CLI can be completely replaced
with your own Markdown file. This feature is controlled via the
`GEMINI_SYSTEM_MD` environment variable.
## Overview
The `GEMINI_SYSTEM_MD` variable instructs the CLI to use an external Markdown
file for its system prompt, completely overriding the built-in default. This is
a full replacement, not a merge. If you use a custom file, none of the original
core instructions will apply unless you include them yourself.
This feature is intended for advanced users who need to enforce strict,
project-specific behavior or create a customized persona.
> [!TIP]
> You can export the current default system prompt to a file first, review
> it, and then selectively modify or replace it (see
> [“Export the default prompt”](#export-the-default-prompt-recommended)).
## How to enable
You can set the environment variable temporarily in your shell, or persist it
via a `.gemini/.env` file. See
[Persisting Environment Variables](../get-started/authentication.md#persisting-environment-variables).
- Use the project default path (`.gemini/system.md`):
- `GEMINI_SYSTEM_MD=true` or `GEMINI_SYSTEM_MD=1`
- The CLI reads `./.gemini/system.md` (relative to your current project
directory).
- Use a custom file path:
- `GEMINI_SYSTEM_MD=/absolute/path/to/my-system.md`
- Relative paths are supported and resolved from the current working
directory.
- Tilde expansion is supported (e.g., `~/my-system.md`).
- Disable the override (use built‑in prompt):
- `GEMINI_SYSTEM_MD=false` or `GEMINI_SYSTEM_MD=0` or unset the variable.
If the override is enabled but the target file does not exist, the CLI will
error with: `missing system prompt file ''`.
## Quick examples
- One‑off session using a project file:
- `GEMINI_SYSTEM_MD=1 gemini`
- Persist for a project using `.gemini/.env`:
- Create `.gemini/system.md`, then add to `.gemini/.env`:
- `GEMINI_SYSTEM_MD=1`
- Use a custom file under your home directory:
- `GEMINI_SYSTEM_MD=~/prompts/SYSTEM.md gemini`
## UI indicator
When `GEMINI_SYSTEM_MD` is active, the CLI shows a `|⌐■_■|` indicator in the UI
to signal custom system‑prompt mode.
## Variable Substitution
When using a custom system prompt file, you can use the following variables to
dynamically include built-in content:
- `${AgentSkills}`: Injects a complete section (including header) of all
available agent skills.
- `${SubAgents}`: Injects a complete section (including header) of available
sub-agents.
- `${AvailableTools}`: Injects a bulleted list of all currently enabled tool
names.
- Tool Name Variables: Injects the actual name of a tool using the pattern:
`${toolName}_ToolName` (e.g., `${write_file_ToolName}`,
`${run_shell_command_ToolName}`).
This pattern is generated dynamically for all available tools.
### Example
```markdown
# Custom System Prompt
You are a helpful assistant. ${AgentSkills}
${SubAgents}
## Tooling
The following tools are available to you: ${AvailableTools}
You can use ${write_file_ToolName} to save logs.
```
## Export the default prompt (recommended)
Before overriding, export the current default prompt so you can review required
safety and workflow rules.
- Write the built‑in prompt to the project default path:
- `GEMINI_WRITE_SYSTEM_MD=1 gemini`
- Or write to a custom path:
- `GEMINI_WRITE_SYSTEM_MD=~/prompts/DEFAULT_SYSTEM.md gemini`
This creates the file and writes the current built‑in system prompt to it.
## Best practices: SYSTEM.md vs GEMINI.md
- SYSTEM.md (firmware):
- Non‑negotiable operational rules: safety, tool‑use protocols, approvals, and
mechanics that keep the CLI reliable.
- Stable across tasks and projects (or per project when needed).
- GEMINI.md (strategy):
- Persona, goals, methodologies, and project/domain context.
- Evolves per task; relies on SYSTEM.md for safe execution.
Keep SYSTEM.md minimal but complete for safety and tool operation. Keep
GEMINI.md focused on high‑level guidance and project specifics.
## Troubleshooting
- Error: `missing system prompt file '…'`
- Ensure the referenced path exists and is readable.
- For `GEMINI_SYSTEM_MD=1|true`, create `./.gemini/system.md` in your project.
- Override not taking effect
- Confirm the variable is loaded (use `.gemini/.env` or export in your shell).
- Paths are resolved from the current working directory; try an absolute path.
- Restore defaults
- Unset `GEMINI_SYSTEM_MD` or set it to `0`/`false`.
================================================
FILE: docs/cli/telemetry.md
================================================
# Observability with OpenTelemetry
Observability is the key to turning experimental AI into reliable software.
Gemini CLI provides built-in support for OpenTelemetry, transforming every agent
interaction into a rich stream of logs, metrics, and traces. This three-pillar
approach gives you the high-fidelity visibility needed to understand agent
behavior, optimize performance, and ensure reliability across your entire
workflow.
Whether you are debugging a complex tool interaction locally or monitoring
enterprise-wide usage in the cloud, Gemini CLI's observability system provides
the actionable intelligence needed to move from "black box" AI to predictable,
high-performance systems.
## OpenTelemetry integration
Gemini CLI integrates with **[OpenTelemetry]**, a vendor-neutral,
industry-standard observability framework.
The observability system provides:
- Universal compatibility: Export to any OpenTelemetry backend (Google Cloud,
Jaeger, Prometheus, Datadog, etc.).
- Standardized data: Use consistent formats and collection methods across your
toolchain.
- Future-proof integration: Connect with existing and future observability
infrastructure.
- No vendor lock-in: Switch between backends without changing your
instrumentation.
[OpenTelemetry]: https://opentelemetry.io/
## Configuration
You control telemetry behavior through the `.gemini/settings.json` file.
Environment variables can override these settings.
| Setting | Environment Variable | Description | Values | Default |
| -------------- | -------------------------------- | --------------------------------------------------- | ----------------- | ----------------------- |
| `enabled` | `GEMINI_TELEMETRY_ENABLED` | Enable or disable telemetry | `true`/`false` | `false` |
| `target` | `GEMINI_TELEMETRY_TARGET` | Where to send telemetry data | `"gcp"`/`"local"` | `"local"` |
| `otlpEndpoint` | `GEMINI_TELEMETRY_OTLP_ENDPOINT` | OTLP collector endpoint | URL string | `http://localhost:4317` |
| `otlpProtocol` | `GEMINI_TELEMETRY_OTLP_PROTOCOL` | OTLP transport protocol | `"grpc"`/`"http"` | `"grpc"` |
| `outfile` | `GEMINI_TELEMETRY_OUTFILE` | Save telemetry to file (overrides `otlpEndpoint`) | file path | - |
| `logPrompts` | `GEMINI_TELEMETRY_LOG_PROMPTS` | Include prompts in telemetry logs | `true`/`false` | `true` |
| `useCollector` | `GEMINI_TELEMETRY_USE_COLLECTOR` | Use external OTLP collector (advanced) | `true`/`false` | `false` |
| `useCliAuth` | `GEMINI_TELEMETRY_USE_CLI_AUTH` | Use CLI credentials for telemetry (GCP target only) | `true`/`false` | `false` |
| - | `GEMINI_CLI_SURFACE` | Optional custom label for traffic reporting | string | - |
**Note on boolean environment variables:** For boolean settings like `enabled`,
setting the environment variable to `true` or `1` enables the feature.
For detailed configuration information, see the
[Configuration guide](../reference/configuration.md).
## Google Cloud telemetry
You can export telemetry data directly to Google Cloud Trace, Cloud Monitoring,
and Cloud Logging.
### Prerequisites
You must complete several setup steps before enabling Google Cloud telemetry.
1. Set your Google Cloud project ID:
- To send telemetry to a separate project:
**macOS/Linux**
```bash
export OTLP_GOOGLE_CLOUD_PROJECT="your-telemetry-project-id"
```
**Windows (PowerShell)**
```powershell
$env:OTLP_GOOGLE_CLOUD_PROJECT="your-telemetry-project-id"
```
- To send telemetry to the same project as inference:
**macOS/Linux**
```bash
export GOOGLE_CLOUD_PROJECT="your-project-id"
```
**Windows (PowerShell)**
```powershell
$env:GOOGLE_CLOUD_PROJECT="your-project-id"
```
2. Authenticate with Google Cloud using one of these methods:
- **Method A: Application Default Credentials (ADC)**: Use this method for
service accounts or standard `gcloud` authentication.
- For user accounts:
```bash
gcloud auth application-default login
```
- For service accounts:
**macOS/Linux**
```bash
export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/service-account.json"
```
**Windows (PowerShell)**
```powershell
$env:GOOGLE_APPLICATION_CREDENTIALS="C:\path\to\your\service-account.json"
```
* **Method B: CLI Auth** (Direct export only): Simplest method for local
users. Gemini CLI uses the same OAuth credentials you used for login. To
enable this, set `useCliAuth: true` in your `.gemini/settings.json`:
```json
{
"telemetry": {
"enabled": true,
"target": "gcp",
"useCliAuth": true
}
}
```
> [!NOTE]
> This setting requires **Direct export** (in-process exporters)
> and cannot be used when `useCollector` is `true`. If both are enabled,
> telemetry will be disabled.
3. Ensure your account or service account has these IAM roles:
- Cloud Trace Agent
- Monitoring Metric Writer
- Logs Writer
4. Enable the required Google Cloud APIs:
```bash
gcloud services enable \
cloudtrace.googleapis.com \
monitoring.googleapis.com \
logging.googleapis.com \
--project="$OTLP_GOOGLE_CLOUD_PROJECT"
```
### Direct export
We recommend using direct export to send telemetry directly to Google Cloud
services.
1. Enable telemetry in `.gemini/settings.json`:
```json
{
"telemetry": {
"enabled": true,
"target": "gcp"
}
}
```
2. Run Gemini CLI and send prompts.
3. View logs, metrics, and traces in the Google Cloud Console. See
[View Google Cloud telemetry](#view-google-cloud-telemetry) for details.
### View Google Cloud telemetry
After you enable telemetry and run Gemini CLI, you can view your data in the
Google Cloud Console.
- **Logs:** [Logs Explorer](https://console.cloud.google.com/logs/)
- **Metrics:**
[Metrics Explorer](https://console.cloud.google.com/monitoring/metrics-explorer)
- **Traces:** [Trace Explorer](https://console.cloud.google.com/traces/list)
For detailed information on how to use these tools, see the following official
Google Cloud documentation:
- [View and analyze logs with Logs Explorer](https://cloud.google.com/logging/docs/view/logs-explorer-interface)
- [Create charts with Metrics Explorer](https://cloud.google.com/monitoring/charts/metrics-explorer)
- [Find and explore traces](https://cloud.google.com/trace/docs/finding-traces)
#### Monitoring dashboards
Gemini CLI provides a pre-configured
[Google Cloud Monitoring](https://cloud.google.com/monitoring) dashboard to
visualize your telemetry.
Find this dashboard under **Google Cloud Monitoring Dashboard Templates** as
"**Gemini CLI Monitoring**".



To learn more, see
[Instant insights: Gemini CLI’s pre-configured monitoring dashboards](https://cloud.google.com/blog/topics/developers-practitioners/instant-insights-gemini-clis-new-pre-configured-monitoring-dashboards/).
## Local telemetry
You can capture telemetry data locally for development and debugging. We
recommend using file-based output for local development.
1. Enable telemetry in `.gemini/settings.json`:
```json
{
"telemetry": {
"enabled": true,
"target": "local",
"outfile": ".gemini/telemetry.log"
}
}
```
2. Run Gemini CLI and send prompts.
3. View logs and metrics in `.gemini/telemetry.log`.
For advanced local telemetry setups (such as Jaeger or Genkit), see the
[Local development guide](../local-development.md#viewing-traces).
## Client identification
Gemini CLI includes identifiers in its `User-Agent` header to help you
differentiate and report on API traffic from different environments (for
example, identifying calls from Gemini Code Assist versus a standard terminal).
### Automatic identification
Most integrated environments are identified automatically without additional
configuration. The identifier is included as a prefix to the `User-Agent` and as
a "surface" tag in the parenthetical metadata.
| Environment | User-Agent Prefix | Surface Tag |
| :---------------------------------- | :--------------------------- | :---------- |
| **Gemini Code Assist (Agent Mode)** | `GeminiCLI-a2a-server` | `vscode` |
| **Zed (via ACP)** | `GeminiCLI-acp-zed` | `zed` |
| **XCode (via ACP)** | `GeminiCLI-acp-xcode` | `xcode` |
| **IntelliJ IDEA (via ACP)** | `GeminiCLI-acp-intellijidea` | `jetbrains` |
| **Standard Terminal** | `GeminiCLI` | `terminal` |
**Example User-Agent:**
`GeminiCLI-a2a-server/0.34.0/gemini-pro (linux; x64; vscode)`
### Custom identification
You can provide a custom identifier for your own scripts or automation by
setting the `GEMINI_CLI_SURFACE` environment variable. This is useful for
tracking specific internal tools or distribution channels in your GCP logs.
**macOS/Linux**
```bash
export GEMINI_CLI_SURFACE="my-custom-tool"
```
**Windows (PowerShell)**
```powershell
$env:GEMINI_CLI_SURFACE="my-custom-tool"
```
When set, the value appears at the end of the `User-Agent` parenthetical:
`GeminiCLI/0.34.0/gemini-pro (linux; x64; my-custom-tool)`
## Logs, metrics, and traces
This section describes the structure of logs, metrics, and traces generated by
Gemini CLI.
Gemini CLI includes `session.id`, `installation.id`, `active_approval_mode`, and
`user.email` (when authenticated) as common attributes on all data.
### Logs
Logs provide timestamped records of specific events. Gemini CLI logs events
across several categories.
#### Sessions
Session logs capture startup configuration and prompt submissions.
##### `gemini_cli.config`
Emitted at startup with the CLI configuration.
Attributes
- `model` (string)
- `embedding_model` (string)
- `sandbox_enabled` (boolean)
- `core_tools_enabled` (string)
- `approval_mode` (string)
- `api_key_enabled` (boolean)
- `vertex_ai_enabled` (boolean)
- `log_user_prompts_enabled` (boolean)
- `file_filtering_respect_git_ignore` (boolean)
- `debug_mode` (boolean)
- `mcp_servers` (string)
- `mcp_servers_count` (int)
- `mcp_tools` (string)
- `mcp_tools_count` (int)
- `output_format` (string)
- `extensions` (string)
- `extension_ids` (string)
- `extensions_count` (int)
- `auth_type` (string)
- `github_workflow_name` (string, optional)
- `github_repository_hash` (string, optional)
- `github_event_name` (string, optional)
- `github_pr_number` (string, optional)
- `github_issue_number` (string, optional)
- `github_custom_tracking_id` (string, optional)
##### `gemini_cli.user_prompt`
Emitted when you submit a prompt.
Attributes
- `prompt_length` (int)
- `prompt_id` (string)
- `prompt` (string; excluded if `telemetry.logPrompts` is `false`)
- `auth_type` (string)
#### Approval mode
These logs track changes to and usage of different approval modes.
##### Lifecycle
##### `approval_mode_switch`
Logs when you change the approval mode.
Attributes
- `from_mode` (string)
- `to_mode` (string)
##### `approval_mode_duration`
Records time spent in an approval mode.
Attributes
- `mode` (string)
- `duration_ms` (int)
##### Execution
##### `plan_execution`
Logs when you execute a plan and switch from plan mode to active execution.
Attributes
- `approval_mode` (string)
#### Tools
Tool logs capture executions, truncation, and edit behavior.
##### `gemini_cli.tool_call`
Emitted for each tool (function) call.
Attributes
- `function_name` (string)
- `function_args` (string)
- `duration_ms` (int)
- `success` (boolean)
- `decision` (string: "accept", "reject", "auto_accept", or "modify")
- `error` (string, optional)
- `error_type` (string, optional)
- `prompt_id` (string)
- `tool_type` (string: "native" or "mcp")
- `mcp_server_name` (string, optional)
- `extension_name` (string, optional)
- `extension_id` (string, optional)
- `content_length` (int, optional)
- `start_time` (number, optional)
- `end_time` (number, optional)
- `metadata` (object, optional), which may include:
- `model_added_lines` (number)
- `model_removed_lines` (number)
- `user_added_lines` (number)
- `user_removed_lines` (number)
- `ask_user` (object)
##### `gemini_cli.tool_output_truncated`
Logs when tool output is truncated.
Attributes
- `tool_name` (string)
- `original_content_length` (int)
- `truncated_content_length` (int)
- `threshold` (int)
- `lines` (int)
- `prompt_id` (string)
##### `gemini_cli.edit_strategy`
Records the chosen edit strategy.
Attributes
- `strategy` (string)
##### `gemini_cli.edit_correction`
Records the result of an edit correction.
Attributes
- `correction` (string: "success" or "failure")
##### `gen_ai.client.inference.operation.details`
Provides detailed GenAI operation data aligned with OpenTelemetry conventions.
Attributes
- `gen_ai.request.model` (string)
- `gen_ai.provider.name` (string)
- `gen_ai.operation.name` (string)
- `gen_ai.input.messages` (json string)
- `gen_ai.output.messages` (json string)
- `gen_ai.response.finish_reasons` (array of strings)
- `gen_ai.usage.input_tokens` (int)
- `gen_ai.usage.output_tokens` (int)
- `gen_ai.request.temperature` (float)
- `gen_ai.request.top_p` (float)
- `gen_ai.request.top_k` (int)
- `gen_ai.request.max_tokens` (int)
- `gen_ai.system_instructions` (json string)
- `server.address` (string)
- `server.port` (int)
#### Files
File logs track operations performed by tools.
##### `gemini_cli.file_operation`
Emitted for each file creation, read, or update.
Attributes
- `tool_name` (string)
- `operation` (string: "create", "read", or "update")
- `lines` (int, optional)
- `mimetype` (string, optional)
- `extension` (string, optional)
- `programming_language` (string, optional)
#### API
API logs capture requests, responses, and errors from Gemini API.
##### `gemini_cli.api_request`
Request sent to Gemini API.
Attributes
- `model` (string)
- `prompt_id` (string)
- `role` (string: "user", "model", or "system")
- `request_text` (string, optional)
##### `gemini_cli.api_response`
Response received from Gemini API.
Attributes
- `model` (string)
- `status_code` (int or string)
- `duration_ms` (int)
- `input_token_count` (int)
- `output_token_count` (int)
- `cached_content_token_count` (int)
- `thoughts_token_count` (int)
- `tool_token_count` (int)
- `total_token_count` (int)
- `prompt_id` (string)
- `auth_type` (string)
- `finish_reasons` (array of strings)
- `response_text` (string, optional)
##### `gemini_cli.api_error`
Logs when an API request fails.
Attributes
- `error.message` (string)
- `model_name` (string)
- `duration` (int)
- `prompt_id` (string)
- `auth_type` (string)
- `error_type` (string, optional)
- `status_code` (int or string, optional)
- `role` (string, optional)
##### `gemini_cli.malformed_json_response`
Logs when a JSON response cannot be parsed.
Attributes
- `model` (string)
#### Model routing
These logs track how Gemini CLI selects and routes requests to models.
##### `gemini_cli.slash_command`
Logs slash command execution.
Attributes
- `command` (string)
- `subcommand` (string, optional)
- `status` (string: "success" or "error")
##### `gemini_cli.slash_command.model`
Logs model selection via slash command.
Attributes
- `model_name` (string)
##### `gemini_cli.model_routing`
Records model router decisions and reasoning.
Attributes
- `decision_model` (string)
- `decision_source` (string)
- `routing_latency_ms` (int)
- `reasoning` (string, optional)
- `failed` (boolean)
- `error_message` (string, optional)
- `approval_mode` (string)
#### Chat and streaming
These logs track chat context compression and streaming chunk errors.
##### `gemini_cli.chat_compression`
Logs chat context compression events.
Attributes
- `tokens_before` (int)
- `tokens_after` (int)
##### `gemini_cli.chat.invalid_chunk`
Logs invalid chunks received in a stream.
Attributes
- `error_message` (string, optional)
##### `gemini_cli.chat.content_retry`
Logs retries due to content errors.
Attributes
- `attempt_number` (int)
- `error_type` (string)
- `retry_delay_ms` (int)
- `model` (string)
##### `gemini_cli.chat.content_retry_failure`
Logs when all content retries fail.
Attributes
- `total_attempts` (int)
- `final_error_type` (string)
- `total_duration_ms` (int, optional)
- `model` (string)
##### `gemini_cli.conversation_finished`
Logs when a conversation session ends.
Attributes
- `approvalMode` (string)
- `turnCount` (int)
#### Resilience
Resilience logs record fallback mechanisms and recovery attempts.
##### `gemini_cli.flash_fallback`
Logs switch to a flash model fallback.
Attributes
- `auth_type` (string)
##### `gemini_cli.ripgrep_fallback`
Logs fallback to standard grep.
Attributes
- `error` (string, optional)
##### `gemini_cli.web_fetch_fallback_attempt`
Logs web-fetch fallback attempts.
Attributes
- `reason` (string: "private_ip" or "primary_failed")
##### `gemini_cli.agent.recovery_attempt`
Logs attempts to recover from agent errors.
Attributes
- `agent_name` (string)
- `attempt_number` (int)
- `success` (boolean)
- `error_type` (string, optional)
#### Extensions
Extension logs track lifecycle events and settings changes.
##### `gemini_cli.extension_install`
Logs when you install an extension.
Attributes
- `extension_name` (string)
- `extension_version` (string)
- `extension_source` (string)
- `status` (string)
##### `gemini_cli.extension_uninstall`
Logs when you uninstall an extension.
Attributes
- `extension_name` (string)
- `status` (string)
##### `gemini_cli.extension_enable`
Logs when you enable an extension.
Attributes
- `extension_name` (string)
- `setting_scope` (string)
##### `gemini_cli.extension_disable`
Logs when you disable an extension.
Attributes
- `extension_name` (string)
- `setting_scope` (string)
#### Agent runs
Agent logs track the lifecycle of agent executions.
##### `gemini_cli.agent.start`
Logs when an agent run begins.
Attributes
- `agent_id` (string)
- `agent_name` (string)
##### `gemini_cli.agent.finish`
Logs when an agent run completes.
Attributes
- `agent_id` (string)
- `agent_name` (string)
- `duration_ms` (int)
- `turn_count` (int)
- `terminate_reason` (string)
#### IDE
IDE logs capture connectivity events for the IDE companion.
##### `gemini_cli.ide_connection`
Logs IDE companion connections.
Attributes
- `connection_type` (string)
#### UI
UI logs track terminal rendering issues.
##### `kitty_sequence_overflow`
Logs terminal control sequence overflows.
Attributes
- `sequence_length` (int)
- `truncated_sequence` (string)
#### Miscellaneous
##### `gemini_cli.rewind`
Logs when the conversation state is rewound.
Attributes
- `outcome` (string)
##### `gemini_cli.conseca.verdict`
Logs security verdicts from ConSeca.
Attributes
- `verdict` (string)
- `decision` (string: "accept", "reject", or "modify")
- `reason` (string, optional)
- `tool_name` (string, optional)
##### `gemini_cli.hook_call`
Logs execution of lifecycle hooks.
Attributes
- `hook_name` (string)
- `hook_type` (string)
- `duration_ms` (int)
- `success` (boolean)
##### `gemini_cli.tool_output_masking`
Logs when tool output is masked for privacy.
Attributes
- `tokens_before` (int)
- `tokens_after` (int)
- `masked_count` (int)
- `total_prunable_tokens` (int)
##### `gemini_cli.keychain.availability`
Logs keychain availability checks.
Attributes
- `available` (boolean)
### Metrics
Metrics provide numerical measurements of behavior over time.
#### Custom metrics
Gemini CLI exports several custom metrics.
##### Sessions
##### `gemini_cli.session.count`
Incremented once per CLI startup.
##### Tools
##### `gemini_cli.tool.call.count`
Counts tool calls.
Attributes
- `function_name` (string)
- `success` (boolean)
- `decision` (string: "accept", "reject", "modify", or "auto_accept")
- `tool_type` (string: "mcp" or "native")
##### `gemini_cli.tool.call.latency`
Measures tool call latency (in ms).
Attributes
- `function_name` (string)
##### API
##### `gemini_cli.api.request.count`
Counts all API requests.
Attributes
- `model` (string)
- `status_code` (int or string)
- `error_type` (string, optional)
##### `gemini_cli.api.request.latency`
Measures API request latency (in ms).
Attributes
- `model` (string)
##### Token usage
##### `gemini_cli.token.usage`
Counts input, output, thought, cache, and tool tokens.
Attributes
- `model` (string)
- `type` (string: "input", "output", "thought", "cache", or "tool")
##### Files
##### `gemini_cli.file.operation.count`
Counts file operations.
Attributes
- `operation` (string: "create", "read", or "update")
- `lines` (int, optional)
- `mimetype` (string, optional)
- `extension` (string, optional)
- `programming_language` (string, optional)
##### `gemini_cli.lines.changed`
Counts added or removed lines.
Attributes
- `function_name` (string, optional)
- `type` (string: "added" or "removed")
##### Chat and streaming
##### `gemini_cli.chat_compression`
Counts compression operations.
Attributes
- `tokens_before` (int)
- `tokens_after` (int)
##### `gemini_cli.chat.invalid_chunk.count`
Counts invalid stream chunks.
##### `gemini_cli.chat.content_retry.count`
Counts content error retries.
##### `gemini_cli.chat.content_retry_failure.count`
Counts requests where all retries failed.
##### Model routing
##### `gemini_cli.slash_command.model.call_count`
Counts model selections.
Attributes
- `slash_command.model.model_name` (string)
##### `gemini_cli.model_routing.latency`
Measures routing decision latency.
Attributes
- `routing.decision_model` (string)
- `routing.decision_source` (string)
- `routing.approval_mode` (string)
##### `gemini_cli.model_routing.failure.count`
Counts routing failures.
Attributes
- `routing.decision_source` (string)
- `routing.error_message` (string)
- `routing.approval_mode` (string)
##### Agent runs
##### `gemini_cli.agent.run.count`
Counts agent runs.
Attributes
- `agent_name` (string)
- `terminate_reason` (string)
##### `gemini_cli.agent.duration`
Measures agent run duration.
Attributes
- `agent_name` (string)
##### `gemini_cli.agent.turns`
Counts turns per agent run.
Attributes
- `agent_name` (string)
##### Approval mode
##### `gemini_cli.plan.execution.count`
Counts plan executions.
Attributes
- `approval_mode` (string)
##### UI
##### `gemini_cli.ui.flicker.count`
Counts terminal flicker events.
##### Performance
Gemini CLI provides detailed performance metrics for advanced monitoring.
##### `gemini_cli.startup.duration`
Measures startup time by phase.
Attributes
- `phase` (string)
- `details` (map, optional)
##### `gemini_cli.memory.usage`
Measures heap and RSS memory.
Attributes
- `memory_type` (string: "heap_used", "heap_total", "external", "rss")
- `component` (string, optional)
##### `gemini_cli.cpu.usage`
Measures CPU usage percentage.
Attributes
- `component` (string, optional)
##### `gemini_cli.tool.queue.depth`
Measures tool execution queue depth.
##### `gemini_cli.tool.execution.breakdown`
Breaks down tool time by phase.
Attributes
- `function_name` (string)
- `phase` (string: "validation", "preparation", "execution",
"result_processing")
#### GenAI semantic convention
These metrics follow standard [OpenTelemetry GenAI semantic conventions].
- `gen_ai.client.token.usage`: Counts tokens used per operation.
- `gen_ai.client.operation.duration`: Measures operation duration in seconds.
[OpenTelemetry GenAI semantic conventions]:
https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-metrics.md
### Traces
Traces provide an "under-the-hood" view of agent and backend operations. Use
traces to debug tool interactions and optimize performance.
Every trace captures rich metadata via standard span attributes.
Standard span attributes
- `gen_ai.operation.name`: High-level operation (for example, `tool_call`,
`llm_call`, `user_prompt`, `system_prompt`, `agent_call`, or
`schedule_tool_calls`).
- `gen_ai.agent.name`: Set to `gemini-cli`.
- `gen_ai.agent.description`: The service agent description.
- `gen_ai.input.messages`: Input data or metadata.
- `gen_ai.output.messages`: Output data or results.
- `gen_ai.request.model`: Request model name.
- `gen_ai.response.model`: Response model name.
- `gen_ai.prompt.name`: The prompt name.
- `gen_ai.tool.name`: Executed tool name.
- `gen_ai.tool.call_id`: Unique ID for the tool call.
- `gen_ai.tool.description`: Tool description.
- `gen_ai.tool.definitions`: Tool definitions in JSON format.
- `gen_ai.usage.input_tokens`: Number of input tokens.
- `gen_ai.usage.output_tokens`: Number of output tokens.
- `gen_ai.system_instructions`: System instructions in JSON format.
- `gen_ai.conversation.id`: The CLI session ID.
For more details on semantic conventions for events, see the
[OpenTelemetry documentation](https://github.com/open-telemetry/semantic-conventions/blob/8b4f210f43136e57c1f6f47292eb6d38e3bf30bb/docs/gen-ai/gen-ai-events.md).
================================================
FILE: docs/cli/themes.md
================================================
# Themes
Gemini CLI supports a variety of themes to customize its color scheme and
appearance. You can change the theme to suit your preferences via the `/theme`
command or `"theme":` configuration setting.
## Available themes
Gemini CLI comes with a selection of pre-defined themes, which you can list
using the `/theme` command within Gemini CLI:
- **Dark themes:**
- `ANSI`
- `Atom One`
- `Ayu`
- `Default`
- `Dracula`
- `GitHub`
- `Holiday`
- `Shades Of Purple`
- `Solarized Dark`
- **Light themes:**
- `ANSI Light`
- `Ayu Light`
- `Default Light`
- `GitHub Light`
- `Google Code`
- `Solarized Light`
- `Xcode`
### Changing themes
1. Enter `/theme` into Gemini CLI.
2. A dialog or selection prompt appears, listing the available themes.
3. Using the arrow keys, select a theme. Some interfaces might offer a live
preview or highlight as you select.
4. Confirm your selection to apply the theme.
> [!NOTE]
> If a theme is defined in your `settings.json` file (either by name or
> by a file path), you must remove the `"theme"` setting from the file before
> you can change the theme using the `/theme` command.
### Theme persistence
Selected themes are saved in Gemini CLI's
[configuration](../reference/configuration.md) so your preference is remembered
across sessions.
---
## Custom color themes
Gemini CLI lets you create your own custom color themes by specifying them in
your `settings.json` file. This gives you full control over the color palette
used in the CLI.
### How to define a custom theme
Add a `customThemes` block to your user, project, or system `settings.json`
file. Each custom theme is defined as an object with a unique name and a set of
nested configuration objects. For example:
```json
{
"ui": {
"customThemes": {
"MyCustomTheme": {
"name": "MyCustomTheme",
"type": "custom",
"background": {
"primary": "#181818"
},
"text": {
"primary": "#f0f0f0",
"secondary": "#a0a0a0"
}
}
}
}
}
```
**Configuration objects:**
- **`text`**: Defines text colors.
- `primary`: The default text color.
- `secondary`: Used for less prominent text.
- `link`: Color for URLs and links.
- `accent`: Used for highlights and emphasis.
- `response`: Precedence over `primary` for rendering model responses.
- **`background`**: Defines background colors.
- `primary`: The main background color of the UI.
- `diff.added`: Background for added lines in diffs.
- `diff.removed`: Background for removed lines in diffs.
- **`border`**: Defines border colors.
- `default`: The standard border color.
- `focused`: Border color when an element is focused.
- **`status`**: Colors for status indicators.
- `success`: Used for successful operations.
- `warning`: Used for warnings.
- `error`: Used for errors.
- **`ui`**: Other UI elements.
- `comment`: Color for code comments.
- `symbol`: Color for code symbols and operators.
- `gradient`: An array of colors used for gradient effects.
**Required properties:**
- `name` (must match the key in the `customThemes` object and be a string)
- `type` (must be the string `"custom"`)
While all sub-properties are technically optional, we recommend providing at
least `background.primary`, `text.primary`, `text.secondary`, and the various
accent colors via `text.link`, `text.accent`, and `status` to ensure a cohesive
UI.
You can use either hex codes (e.g., `#FF0000`) **or** standard CSS color names
(e.g., `coral`, `teal`, `blue`) for any color value. See
[CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#color_keywords)
for a full list of supported names.
You can define multiple custom themes by adding more entries to the
`customThemes` object.
### Loading themes from a file
In addition to defining custom themes in `settings.json`, you can also load a
theme directly from a JSON file by specifying the file path in your
`settings.json`. This is useful for sharing themes or keeping them separate from
your main configuration.
To load a theme from a file, set the `theme` property in your `settings.json` to
the path of your theme file:
```json
{
"ui": {
"theme": "/path/to/your/theme.json"
}
}
```
The theme file must be a valid JSON file that follows the same structure as a
custom theme defined in `settings.json`.
**Example `my-theme.json`:**
```json
{
"name": "Gruvbox Dark",
"type": "custom",
"background": {
"primary": "#282828",
"diff": {
"added": "#2b3312",
"removed": "#341212"
}
},
"text": {
"primary": "#ebdbb2",
"secondary": "#a89984",
"link": "#83a598",
"accent": "#d3869b"
},
"border": {
"default": "#3c3836",
"focused": "#458588"
},
"status": {
"success": "#b8bb26",
"warning": "#fabd2f",
"error": "#fb4934"
},
"ui": {
"comment": "#928374",
"symbol": "#8ec07c",
"gradient": ["#cc241d", "#d65d0e", "#d79921"]
}
}
```
> [!WARNING]
> For your safety, Gemini CLI will only load theme files that
> are located within your home directory. If you attempt to load a theme from
> outside your home directory, a warning will be displayed and the theme will
> not be loaded. This is to prevent loading potentially malicious theme files
> from untrusted sources.
### Example custom theme
### Using your custom theme
- Select your custom theme using the `/theme` command in Gemini CLI. Your custom
theme will appear in the theme selection dialog.
- Or, set it as the default by adding `"theme": "MyCustomTheme"` to the `ui`
object in your `settings.json`.
- Custom themes can be set at the user, project, or system level, and follow the
same [configuration precedence](../reference/configuration.md) as other
settings.
### Themes from extensions
[Extensions](../extensions/reference.md#themes) can also provide custom themes.
Once an extension is installed and enabled, its themes are automatically added
to the selection list in the `/theme` command.
Themes from extensions appear with the extension name in parentheses to help you
identify their source, for example: `shades-of-green (green-extension)`.
---
## Dark themes
### ANSI
### Atom One
### Ayu
### Default
### Dracula
### GitHub
### Holiday
### Shades Of Purple
### Solarized Dark
## Light themes
### ANSI Light
### Ayu Light
### Default Light
### GitHub Light
### Google Code
### Solarized Light
### Xcode
================================================
FILE: docs/cli/token-caching.md
================================================
# Token caching and cost optimization
Gemini CLI automatically optimizes API costs through token caching when using
API key authentication (Gemini API key or Vertex AI). This feature reuses
previous system instructions and context to reduce the number of tokens
processed in subsequent requests.
**Token caching is available for:**
- API key users (Gemini API key)
- Vertex AI users (with project and location setup)
**Token caching is not available for:**
- OAuth users (Google Personal/Enterprise accounts) - the Code Assist API does
not support cached content creation at this time
You can view your token usage and cached token savings using the `/stats`
command. When cached tokens are available, they will be displayed in the stats
output.
================================================
FILE: docs/cli/trusted-folders.md
================================================
# Trusted Folders
The Trusted Folders feature is a security setting that gives you control over
which projects can use the full capabilities of the Gemini CLI. It prevents
potentially malicious code from running by asking you to approve a folder before
the CLI loads any project-specific configurations from it.
## Enabling the feature
The Trusted Folders feature is **disabled by default**. To use it, you must
first enable it in your settings.
Add the following to your user `settings.json` file:
```json
{
"security": {
"folderTrust": {
"enabled": true
}
}
}
```
## How it works: The trust dialog
Once the feature is enabled, the first time you run the Gemini CLI from a
folder, a dialog will automatically appear, prompting you to make a choice:
- **Trust folder**: Grants full trust to the current folder (e.g.,
`my-project`).
- **Trust parent folder**: Grants trust to the parent directory (e.g.,
`safe-projects`), which automatically trusts all of its subdirectories as
well. This is useful if you keep all your safe projects in one place.
- **Don't trust**: Marks the folder as untrusted. The CLI will operate in a
restricted "safe mode."
Your choice is saved in a central file (`~/.gemini/trustedFolders.json`), so you
will only be asked once per folder.
## Understanding folder contents: The discovery phase
Before you make a choice, the Gemini CLI performs a **discovery phase** to scan
the folder for potential configurations. This information is displayed in the
trust dialog to help you make an informed decision.
The discovery UI lists the following categories of items found in the project:
- **Commands**: Custom `.toml` command definitions that add new functionality.
- **MCP Servers**: Configured Model Context Protocol servers that the CLI will
attempt to connect to.
- **Hooks**: System or custom hooks that can intercept and modify CLI behavior.
- **Skills**: Local agent skills that provide specialized capabilities.
- **Setting overrides**: Any project-specific configurations that override your
global user settings.
### Security warnings and errors
The trust dialog also highlights critical information that requires your
attention:
- **Security Warnings**: The CLI will explicitly flag potentially dangerous
settings, such as auto-approving certain tools or disabling the security
sandbox.
- **Discovery Errors**: If the CLI encounters issues while scanning the folder
(e.g., a malformed `settings.json` file), these errors will be displayed
prominently.
By reviewing these details, you can ensure that you only grant trust to projects
that you know are safe.
## Why trust matters: The impact of an untrusted workspace
When a folder is **untrusted**, the Gemini CLI runs in a restricted "safe mode"
to protect you. In this mode, the following features are disabled:
1. **Workspace settings are ignored**: The CLI will **not** load the
`.gemini/settings.json` file from the project. This prevents the loading of
custom tools and other potentially dangerous configurations.
2. **Environment variables are ignored**: The CLI will **not** load any `.env`
files from the project.
3. **Extension management is restricted**: You **cannot install, update, or
uninstall** extensions.
4. **Tool auto-acceptance is disabled**: You will always be prompted before any
tool is run, even if you have auto-acceptance enabled globally.
5. **Automatic memory loading is disabled**: The CLI will not automatically
load files into context from directories specified in local settings.
6. **MCP servers do not connect**: The CLI will not attempt to connect to any
[Model Context Protocol (MCP)](../tools/mcp-server.md) servers.
7. **Custom commands are not loaded**: The CLI will not load any custom
commands from .toml files, including both project-specific and global user
commands.
Granting trust to a folder unlocks the full functionality of the Gemini CLI for
that workspace.
## Managing your trust settings
If you need to change a decision or see all your settings, you have a couple of
options:
- **Change the current folder's trust**: Run the `/permissions` command from
within the CLI. This will bring up the same interactive dialog, allowing you
to change the trust level for the current folder.
- **View all trust rules**: To see a complete list of all your trusted and
untrusted folder rules, you can inspect the contents of the
`~/.gemini/trustedFolders.json` file in your home directory.
## The trust check process (advanced)
For advanced users, it's helpful to know the exact order of operations for how
trust is determined:
1. **IDE trust signal**: If you are using the
[IDE Integration](../ide-integration/index.md), the CLI first asks the IDE
if the workspace is trusted. The IDE's response takes highest priority.
2. **Local trust file**: If the IDE is not connected, the CLI checks the
central `~/.gemini/trustedFolders.json` file.
================================================
FILE: docs/cli/tutorials/automation.md
================================================
# Automate tasks with headless mode
Automate tasks with Gemini CLI. Learn how to use headless mode, pipe data into
Gemini CLI, automate workflows with shell scripts, and generate structured JSON
output for other applications.
## Prerequisites
- Gemini CLI installed and authenticated.
- Familiarity with shell scripting (Bash/Zsh).
## Why headless mode?
Headless mode runs Gemini CLI once and exits. It's perfect for:
- **CI/CD:** Analyzing pull requests automatically.
- **Batch processing:** Summarizing a large number of log files.
- **Tool building:** Creating your own "AI wrapper" scripts.
## How to use headless mode
Run Gemini CLI in headless mode by providing a prompt with the `-p` (or
`--prompt`) flag. This bypasses the interactive chat interface and prints the
response to standard output (stdout). Positional arguments without the flag
default to interactive mode, unless the input or output is piped or redirected.
Run a single command:
```bash
gemini -p "Write a poem about TypeScript"
```
## How to pipe input to Gemini CLI
Feed data into Gemini using the standard Unix pipe `|`. Gemini reads the
standard input (stdin) as context and answers your question using standard
output.
Pipe a file:
**macOS/Linux**
```bash
cat error.log | gemini -p "Explain why this failed"
```
**Windows (PowerShell)**
```powershell
Get-Content error.log | gemini -p "Explain why this failed"
```
Pipe a command:
```bash
git diff | gemini -p "Write a commit message for these changes"
```
## Use Gemini CLI output in scripts
Because Gemini prints to stdout, you can chain it with other tools or save the
results to a file.
### Scenario: Bulk documentation generator
You have a folder of Python scripts and want to generate a `README.md` for each
one.
1. Save the following code as `generate_docs.sh` (or `generate_docs.ps1` for
Windows):
**macOS/Linux (`generate_docs.sh`)**
```bash
#!/bin/bash
# Loop through all Python files
for file in *.py; do
echo "Generating docs for $file..."
# Ask Gemini CLI to generate the documentation and print it to stdout
gemini -p "Generate a Markdown documentation summary for @$file. Print the
result to standard output." > "${file%.py}.md"
done
```
**Windows PowerShell (`generate_docs.ps1`)**
```powershell
# Loop through all Python files
Get-ChildItem -Filter *.py | ForEach-Object {
Write-Host "Generating docs for $($_.Name)..."
$newName = $_.Name -replace '\.py$', '.md'
# Ask Gemini CLI to generate the documentation and print it to stdout
gemini -p "Generate a Markdown documentation summary for @$($_.Name). Print the result to standard output." | Out-File -FilePath $newName -Encoding utf8
}
```
2. Make the script executable and run it in your directory:
**macOS/Linux**
```bash
chmod +x generate_docs.sh
./generate_docs.sh
```
**Windows (PowerShell)**
```powershell
.\generate_docs.ps1
```
This creates a corresponding Markdown file for every Python file in the
folder.
## Extract structured JSON data
When writing a script, you often need structured data (JSON) to pass to tools
like `jq`. To get pure JSON data from the model, combine the
`--output-format json` flag with `jq` to parse the response field.
### Scenario: Extract and return structured data
1. Save the following script as `generate_json.sh` (or `generate_json.ps1` for
Windows):
**macOS/Linux (`generate_json.sh`)**
```bash
#!/bin/bash
# Ensure we are in a project root
if [ ! -f "package.json" ]; then
echo "Error: package.json not found."
exit 1
fi
# Extract data
gemini --output-format json "Return a raw JSON object with keys 'version' and 'deps' from @package.json" | jq -r '.response' > data.json
```
**Windows PowerShell (`generate_json.ps1`)**
```powershell
# Ensure we are in a project root
if (-not (Test-Path "package.json")) {
Write-Error "Error: package.json not found."
exit 1
}
# Extract data (requires jq installed, or you can use ConvertFrom-Json)
$output = gemini --output-format json "Return a raw JSON object with keys 'version' and 'deps' from @package.json" | ConvertFrom-Json
$output.response | Out-File -FilePath data.json -Encoding utf8
```
2. Run the script:
**macOS/Linux**
```bash
chmod +x generate_json.sh
./generate_json.sh
```
**Windows (PowerShell)**
```powershell
.\generate_json.ps1
```
3. Check `data.json`. The file should look like this:
```json
{
"version": "1.0.0",
"deps": {
"react": "^18.2.0"
}
}
```
## Build your own custom AI tools
Use headless mode to perform custom, automated AI tasks.
### Scenario: Create a "Smart Commit" alias
You can add a function to your shell configuration to create a `git commit`
wrapper that writes the message for you.
**macOS/Linux (Bash/Zsh)**
1. Open your `.zshrc` file (or `.bashrc` if you use Bash) in your preferred
text editor.
```bash
nano ~/.zshrc
```
**Note**: If you use VS Code, you can run `code ~/.zshrc`.
2. Scroll to the very bottom of the file and paste this code:
```bash
function gcommit() {
# Get the diff of staged changes
diff=$(git diff --staged)
if [ -z "$diff" ]; then
echo "No staged changes to commit."
return 1
fi
# Ask Gemini to write the message
echo "Generating commit message..."
msg=$(echo "$diff" | gemini -p "Write a concise Conventional Commit message for this diff. Output ONLY the message.")
# Commit with the generated message
git commit -m "$msg"
}
```
Save your file and exit.
3. Run this command to make the function available immediately:
```bash
source ~/.zshrc
```
**Windows (PowerShell)**
1. Open your PowerShell profile in your preferred text editor.
```powershell
notepad $PROFILE
```
2. Scroll to the very bottom of the file and paste this code:
```powershell
function gcommit {
# Get the diff of staged changes
$diff = git diff --staged
if (-not $diff) {
Write-Host "No staged changes to commit."
return
}
# Ask Gemini to write the message
Write-Host "Generating commit message..."
$msg = $diff | gemini -p "Write a concise Conventional Commit message for this diff. Output ONLY the message."
# Commit with the generated message
git commit -m "$msg"
}
```
Save your file and exit.
3. Run this command to make the function available immediately:
```powershell
. $PROFILE
```
4. Use your new command:
```bash
gcommit
```
Gemini CLI will analyze your staged changes and commit them with a generated
message.
## Next steps
- Explore the [Headless mode reference](../../cli/headless.md) for full JSON
schema details.
- Learn about [Shell commands](shell-commands.md) to let the agent run scripts
instead of just writing them.
================================================
FILE: docs/cli/tutorials/file-management.md
================================================
# File management with Gemini CLI
Explore, analyze, and modify your codebase using Gemini CLI. In this guide,
you'll learn how to provide Gemini CLI with files and directories, modify and
create files, and control what Gemini CLI can see.
## Prerequisites
- Gemini CLI installed and authenticated.
- A project directory to work with (for example, a git repository).
## Providing context by reading files
Gemini CLI will generally try to read relevant files, sometimes prompting you
for access (depending on your settings). To ensure that Gemini CLI uses a file,
you can also include it directly.
### Direct file inclusion (`@`)
If you know the path to the file you want to work on, use the `@` symbol. This
forces the CLI to read the file immediately and inject its content into your
prompt.
```bash
`@src/components/UserProfile.tsx Explain how this component handles user data.`
```
### Working with multiple files
Complex features often span multiple files. You can chain `@` references to give
the agent a complete picture of the dependencies.
```bash
`@src/components/UserProfile.tsx @src/types/User.ts Refactor the component to use the updated User interface.`
```
### Including entire directories
For broad questions or refactoring, you can include an entire directory. Be
careful with large folders, as this consumes more tokens.
```bash
`@src/utils/ Check these utility functions for any deprecated API usage.`
```
## How to find files (Exploration)
If you _don't_ know the exact file path, you can ask Gemini CLI to find it for
you. This is useful when navigating a new codebase or looking for specific
logic.
### Scenario: Find a component definition
You know there's a `UserProfile` component, but you don't know where it lives.
```none
`Find the file that defines the UserProfile component.`
```
Gemini uses the `glob` or `list_directory` tools to search your project
structure. It will return the specific path (for example,
`src/components/UserProfile.tsx`), which you can then use with `@` in your next
turn.
> [!TIP]
> You can also ask for lists of files, like "Show me all the TypeScript
> configuration files in the root directory."
## How to modify code
Once Gemini CLI has context, you can direct it to make specific edits. The agent
is capable of complex refactoring, not just simple text replacement.
```none
`Update @src/components/UserProfile.tsx to show a loading spinner if the user data is null.`
```
Gemini CLI uses the `replace` tool to propose a targeted code change.
### Creating new files
You can also ask the agent to create entirely new files or folder structures.
```none
`Create a new file @src/components/LoadingSpinner.tsx with a simple Tailwind CSS spinner.`
```
Gemini CLI uses the `write_file` tool to generate the new file from scratch.
## Review and confirm changes
Gemini CLI prioritizes safety. Before any file is modified, it presents a
unified diff of the proposed changes.
```diff
- if (!user) return null;
+ if (!user) return ;
```
- **Red lines (-):** Code that will be removed.
- **Green lines (+):** Code that will be added.
Press **y** to confirm and apply the change to your local file system. If the
diff doesn't look right, press **n** to cancel and refine your prompt.
## Verify the result
After the edit is complete, verify the fix. You can simply read the file again
or, better yet, run your project's tests.
```none
`Run the tests for the UserProfile component.`
```
Gemini CLI uses the `run_shell_command` tool to execute your test runner (for
example, `npm test` or `jest`). This ensures the changes didn't break existing
functionality.
## Advanced: Controlling what Gemini sees
By default, Gemini CLI respects your `.gitignore` file. It won't read or search
through `node_modules`, build artifacts, or other ignored paths.
If you have sensitive files (like `.env`) or large assets that you want to keep
hidden from the AI _without_ ignoring them in Git, you can create a
`.geminiignore` file in your project root.
**Example `.geminiignore`:**
```text
.env
local-db-dump.sql
private-notes.md
```
## Next steps
- Learn how to [Manage context and memory](memory-management.md) to keep your
agent smarter over long sessions.
- See [Execute shell commands](shell-commands.md) for more on running tests and
builds.
- Explore the technical [File system reference](../../tools/file-system.md) for
advanced tool parameters.
================================================
FILE: docs/cli/tutorials/mcp-setup.md
================================================
# Set up an MCP server
Connect Gemini CLI to your external databases and services. In this guide,
you'll learn how to extend Gemini CLI's capabilities by installing the GitHub
MCP server and using it to manage your repositories.
## Prerequisites
- Gemini CLI installed.
- **Docker:** Required for this specific example (many MCP servers run as Docker
containers).
- **GitHub token:** A Personal Access Token (PAT) with repo permissions.
## How to prepare your credentials
Most MCP servers require authentication. For GitHub, you need a PAT.
1. Create a [fine-grained PAT](https://github.com/settings/tokens?type=beta).
2. Grant it **Read** access to **Metadata** and **Contents**, and
**Read/Write** access to **Issues** and **Pull Requests**.
3. Store it in your environment:
**macOS/Linux**
```bash
export GITHUB_PERSONAL_ACCESS_TOKEN="github_pat_..."
```
**Windows (PowerShell)**
```powershell
$env:GITHUB_PERSONAL_ACCESS_TOKEN="github_pat_..."
```
## How to configure Gemini CLI
You tell Gemini about new servers by editing your `settings.json`.
1. Open `~/.gemini/settings.json` (or the project-specific
`.gemini/settings.json`).
2. Add the `mcpServers` block. This tells Gemini: "Run this docker container
and talk to it."
```json
{
"mcpServers": {
"github": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"-e",
"GITHUB_PERSONAL_ACCESS_TOKEN",
"ghcr.io/github/github-mcp-server:latest"
],
"env": {
"GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}"
}
}
}
}
```
> [!NOTE]
> The `command` is `docker`, and the rest are arguments passed to it. We
> map the local environment variable into the container so your secret isn't
> hardcoded in the config file.
## How to verify the connection
Restart Gemini CLI. It will automatically try to start the defined servers.
**Command:** `/mcp list`
You should see: `✓ github: docker ... - Connected`
If you see `Disconnected` or an error, check that Docker is running and your API
token is valid.
## How to use the new tools
Now that the server is running, the agent has new capabilities ("tools"). You
don't need to learn special commands; just ask in natural language.
### Scenario: Listing pull requests
**Prompt:** `List the open PRs in the google/gemini-cli repository.`
The agent will:
1. Recognize the request matches a GitHub tool.
2. Call `mcp_github_list_pull_requests`.
3. Present the data to you.
### Scenario: Creating an issue
**Prompt:**
`Create an issue in my repo titled "Bug: Login fails" with the description "See logs".`
## Troubleshooting
- **Server won't start?** Try running the docker command manually in your
terminal to see if it prints an error (e.g., "image not found").
- **Tools not found?** Run `/mcp reload` to force the CLI to re-query the server
for its capabilities.
## Next steps
- Explore the [MCP servers reference](../../tools/mcp-server.md) to learn about
SSE and HTTP transports for remote servers.
- Browse the
[official MCP server list](https://github.com/modelcontextprotocol/servers) to
find connectors for Slack, Postgres, Google Drive, and more.
================================================
FILE: docs/cli/tutorials/memory-management.md
================================================
# Manage context and memory
Control what Gemini CLI knows about you and your projects. In this guide, you'll
learn how to define project-wide rules with `GEMINI.md`, teach the agent
persistent facts, and inspect the active context.
## Prerequisites
- Gemini CLI installed and authenticated.
- A project directory where you want to enforce specific rules.
## Why manage context?
Gemini CLI is powerful but general. It doesn't know your preferred testing
framework, your indentation style, or your preference against `any` in
TypeScript. Context management solves this by giving the agent persistent
memory.
You'll use these features when you want to:
- **Enforce standards:** Ensure every generated file matches your team's style
guide.
- **Set a persona:** Tell the agent to act as a "Senior Rust Engineer" or "QA
Specialist."
- **Remember facts:** Save details like "My database port is 5432" so you don't
have to repeat them.
## How to define project-wide rules (GEMINI.md)
The most powerful way to control the agent's behavior is through `GEMINI.md`
files. These are Markdown files containing instructions that are automatically
loaded into every conversation.
### Scenario: Create a project context file
1. In the root of your project, create a file named `GEMINI.md`.
2. Add your instructions:
```markdown
# Project Instructions
- **Framework:** We use React with Vite.
- **Styling:** Use Tailwind CSS for all styling. Do not write custom CSS.
- **Testing:** All new components must include a Vitest unit test.
- **Tone:** Be concise. Don't explain basic React concepts.
```
3. Start a new session. Gemini CLI will now know these rules automatically.
### Scenario: Using the hierarchy
Context is loaded hierarchically. This allows you to have general rules for
everything and specific rules for sub-projects.
1. **Global:** `~/.gemini/GEMINI.md` (Rules for _every_ project you work on).
2. **Project Root:** `./GEMINI.md` (Rules for the current repository).
3. **Subdirectory:** `./src/GEMINI.md` (Rules specific to the `src` folder).
**Example:** You might set "Always use strict typing" in your global config, but
"Use Python 3.11" only in your backend repository.
## How to teach the agent facts (Memory)
Sometimes you don't want to write a config file. You just want to tell the agent
something once and have it remember forever. You can do this naturally in chat.
### Scenario: Saving a memory
Just tell the agent to remember something.
**Prompt:** `Remember that I prefer using 'const' over 'let' wherever possible.`
The agent will use the `save_memory` tool to store this fact in your global
memory file.
**Prompt:** `Save the fact that the staging server IP is 10.0.0.5.`
### Scenario: Using memory in conversation
Once a fact is saved, you don't need to invoke it explicitly. The agent "knows"
it.
**Next Prompt:** `Write a script to deploy to staging.`
**Agent Response:** "I'll write a script to deploy to **10.0.0.5**..."
## How to manage and inspect context
As your project grows, you might want to see exactly what instructions the agent
is following.
### Scenario: View active context
To see the full, concatenated set of instructions currently loaded (from all
`GEMINI.md` files and saved memories), use the `/memory show` command.
**Command:** `/memory show`
This prints the raw text the model receives at the start of the session. It's
excellent for debugging why the agent might be ignoring a rule.
### Scenario: Refresh context
If you edit a `GEMINI.md` file while a session is running, the agent won't know
immediately. Force a reload with:
**Command:** `/memory reload`
## Best practices
- **Keep it focused:** Avoid adding excessive content to `GEMINI.md`. Keep
instructions actionable and relevant to code generation.
- **Use negative constraints:** Explicitly telling the agent what _not_ to do
(for example, "Do not use class components") is often more effective than
vague positive instructions.
- **Review often:** Periodically check your `GEMINI.md` files to remove outdated
rules.
## Next steps
- Learn about [Session management](session-management.md) to see how short-term
history works.
- Explore the [Command reference](../../reference/commands.md) for more
`/memory` options.
- Read the technical spec for [Project context](../../cli/gemini-md.md).
================================================
FILE: docs/cli/tutorials/plan-mode-steering.md
================================================
# Use Plan Mode with model steering for complex tasks
Architecting a complex solution requires precision. By combining Plan Mode's
structured environment with model steering's real-time feedback, you can guide
Gemini CLI through the research and design phases to ensure the final
implementation plan is exactly what you need.
> [!NOTE]
> This is an experimental feature currently under active development and
> may need to be enabled under `/settings`.
## Prerequisites
- Gemini CLI installed and authenticated.
- [Plan Mode](../plan-mode.md) enabled in your settings.
- [Model steering](../model-steering.md) enabled in your settings.
## Why combine Plan Mode and model steering?
[Plan Mode](../plan-mode.md) typically follows a linear path: research, propose,
and draft. Adding model steering lets you:
1. **Direct the research:** Correct the agent if it's looking in the wrong
directory or missing a key dependency.
2. **Iterate mid-draft:** Suggest a different architectural pattern while the
agent is still writing the plan.
3. **Speed up the loop:** Avoid waiting for a full research turn to finish
before providing critical context.
## Step 1: Start a complex task
Enter Plan Mode and start a task that requires research.
**Prompt:** `/plan I want to implement a new notification service using Redis.`
Gemini CLI enters Plan Mode and starts researching your existing codebase to
identify where the new service should live.
## Step 2: Steer the research phase
As you see the agent calling tools like `list_directory` or `grep_search`, you
might realize it's missing the relevant context.
**Action:** While the spinner is active, type your hint:
`"Don't forget to check packages/common/queues for the existing Redis config."`
**Result:** Gemini CLI acknowledges your hint and immediately incorporates it
into its research. You'll see it start exploring the directory you suggested in
its very next turn.
## Step 3: Refine the design mid-turn
After research, the agent starts drafting the implementation plan. If you notice
it's proposing a design that doesn't align with your goals, steer it.
**Action:** Type:
`"Actually, let's use a Publisher/Subscriber pattern instead of a simple queue for this service."`
**Result:** The agent stops drafting the current version of the plan,
re-evaluates the design based on your feedback, and starts a new draft that uses
the Pub/Sub pattern.
## Step 4: Approve and implement
Once the agent has used your hints to craft the perfect plan, review the final
`.md` file.
**Action:** Type: `"Looks perfect. Let's start the implementation."`
Gemini CLI exits Plan Mode and transitions to the implementation phase. Because
the plan was refined in real-time with your feedback, the agent can now execute
each step with higher confidence and fewer errors.
## Tips for effective steering
- **Be specific:** Instead of "do it differently," try "use the existing
`Logger` class in `src/utils`."
- **Steer early:** Providing feedback during the research phase is more
efficient than waiting for the final plan to be drafted.
- **Use for context:** Steering is a great way to provide knowledge that might
not be obvious from reading the code (e.g., "We are planning to deprecate this
module next month").
## Next steps
- Explore [Agent Skills](../skills.md) to add specialized expertise to your
planning turns.
- See the [Model steering reference](../model-steering.md) for technical
details.
================================================
FILE: docs/cli/tutorials/session-management.md
================================================
# Manage sessions and history
Resume, browse, and rewind your conversations with Gemini CLI. In this guide,
you'll learn how to switch between tasks, manage your session history, and undo
mistakes using the rewind feature.
## Prerequisites
- Gemini CLI installed and authenticated.
- At least one active or past session.
## How to resume where you left off
It's common to switch context—maybe you're waiting for a build and want to work
on a different feature. Gemini makes it easy to jump back in.
### Scenario: Resume the last session
The fastest way to pick up your most recent work is with the `--resume` flag (or
`-r`).
```bash
gemini -r
```
This restores your chat history and memory, so you can say "Continue with the
next step" immediately.
### Scenario: Browse past sessions
If you want to find a specific conversation from yesterday, use the interactive
browser.
**Command:** `/resume`
This opens a searchable list of all your past sessions. You'll see:
- A timestamp (e.g., "2 hours ago").
- The first user message (helping you identify the topic).
- The number of turns in the conversation.
Select a session and press **Enter** to load it.
## How to manage your workspace
Over time, you'll accumulate a lot of history. Keeping your session list clean
helps you find what you need.
### Scenario: Deleting sessions
In the `/resume` browser, navigate to a session you no longer need and press
**x**. This permanently deletes the history for that specific conversation.
You can also manage sessions from the command line:
```bash
# List all sessions with their IDs
gemini --list-sessions
# Delete a specific session by ID or index
gemini --delete-session 1
```
## How to rewind time (Undo mistakes)
Gemini CLI's **Rewind** feature is like `Ctrl+Z` for your workflow.
### Scenario: Triggering rewind
At any point in a chat, type `/rewind` or press **Esc** twice.
### Scenario: Choosing a restore point
You'll see a list of your recent interactions. Select the point _before_ the
undesired changes occurred.
### Scenario: Choosing what to revert
Gemini gives you granular control over the undo process. You can choose to:
1. **Rewind conversation:** Only remove the chat history. The files stay
changed. (Useful if the code is good but the chat got off track).
2. **Revert code changes:** Keep the chat history but undo the file edits.
(Useful if you want to keep the context but retry the implementation).
3. **Rewind both:** Restore everything to exactly how it was.
## How to fork conversations
Sometimes you want to try two different approaches to the same problem.
1. Start a session and get to a decision point.
2. Save the current state with `/resume save decision-point`.
3. Try your first approach.
4. Later, use `/resume resume decision-point` to fork the conversation back to
that moment and try a different approach.
This creates a new branch of history without losing your original work.
## Next steps
- Learn about [Checkpointing](../../cli/checkpointing.md) to understand the
underlying safety mechanism.
- Explore [Task planning](task-planning.md) to keep complex sessions organized.
- See the [Command reference](../../reference/commands.md) for `/resume`
options, grouped checkpoint menus, and `/chat` compatibility aliases.
================================================
FILE: docs/cli/tutorials/shell-commands.md
================================================
# Execute shell commands
Use the CLI to run builds, manage git, and automate system tasks without leaving
the conversation. In this guide, you'll learn how to run commands directly,
automate complex workflows, and manage background processes safely.
## Prerequisites
- Gemini CLI installed and authenticated.
- Basic familiarity with your system's shell (Bash, Zsh, PowerShell, and so on).
## How to run commands directly (`!`)
Sometimes you just need to check a file size or git status without asking the AI
to do it for you. You can pass commands directly to your shell using the `!`
prefix.
**Example:** `!ls -la`
This executes `ls -la` immediately and prints the output to your terminal.
Gemini CLI also records the command and its output in the current session
context, so the model can reference it in follow-up prompts. Very large outputs
may be truncated.
### Scenario: Entering Shell mode
If you're doing a lot of manual work, toggle "Shell Mode" by typing `!` and
pressing **Enter**. Now, everything you type is sent to the shell until you exit
(usually by pressing **Esc** or typing `exit`).
## How to automate complex tasks
You can automate tasks using a combination of Gemini CLI and shell commands.
### Scenario: Run tests and fix failures
You want to run tests and fix any failures.
**Prompt:**
`Run the unit tests. If any fail, analyze the error and try to fix the code.`
**Workflow:**
1. Gemini calls `run_shell_command('npm test')`.
2. You see a confirmation prompt: `Allow command 'npm test'? [y/N]`.
3. You press `y`.
4. The tests run. If they fail, Gemini reads the error output.
5. Gemini uses `read_file` to inspect the failing test.
6. Gemini uses `replace` to fix the bug.
7. Gemini runs `npm test` again to verify the fix.
This loop lets Gemini work autonomously.
## How to manage background processes
You can ask Gemini to start long-running tasks, like development servers or file
watchers.
**Prompt:** `Start the React dev server in the background.`
Gemini will run the command (e.g., `npm run dev`) and detach it.
### Scenario: Viewing active shells
To see what's running in the background, use the `/shells` command.
**Command:** `/shells`
This opens a dashboard where you can view logs or kill runaway processes.
## How to handle interactive commands
Gemini CLI attempts to handle interactive commands (like `git add -p` or
confirmation prompts) by streaming the output to you. However, for highly
interactive tools (like `vim` or `top`), it's often better to run them yourself
in a separate terminal window or use the `!` prefix.
## Safety features
Giving an AI access to your shell is powerful but risky. Gemini CLI includes
several safety layers.
### Confirmation prompts
By default, **every** shell command requested by the agent requires your
explicit approval.
- **Allow once:** Runs the command one time.
- **Allow always:** Trusts this specific command for the rest of the session.
- **Deny:** Stops the agent.
### Sandboxing
For maximum security, especially when running untrusted code or exploring new
projects, we strongly recommend enabling Sandboxing. This runs all shell
commands inside a secure Docker container.
**Enable sandboxing:** Use the `--sandbox` flag when starting the CLI:
`gemini --sandbox`.
## Next steps
- Learn about [Sandboxing](../../cli/sandbox.md) to safely run destructive
commands.
- See the [Shell tool reference](../../tools/shell.md) for configuration options
like timeouts and working directories.
- Explore [Task planning](task-planning.md) to see how shell commands fit into
larger workflows.
================================================
FILE: docs/cli/tutorials/skills-getting-started.md
================================================
# Get started with Agent Skills
Agent Skills extend Gemini CLI with specialized expertise. In this guide, you'll
learn how to create your first skill, bundle custom scripts, and activate them
during a session.
## How to create a skill
A skill is defined by a directory containing a `SKILL.md` file. Let's create an
**API Auditor** skill that helps you verify if local or remote endpoints are
responding correctly.
### Create the directory structure
1. Run the following command to create the folders:
**macOS/Linux**
```bash
mkdir -p .gemini/skills/api-auditor/scripts
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path ".gemini\skills\api-auditor\scripts"
```
### Create the definition
1. Create a file at `.gemini/skills/api-auditor/SKILL.md`. This tells the agent
_when_ to use the skill and _how_ to behave.
```markdown
---
name: api-auditor
description:
Expertise in auditing and testing API endpoints. Use when the user asks to
"check", "test", or "audit" a URL or API.
---
# API Auditor Instructions
You act as a QA engineer specialized in API reliability. When this skill is
active, you MUST:
1. **Audit**: Use the bundled `scripts/audit.js` utility to check the
status of the provided URL.
2. **Report**: Analyze the output (status codes, latency) and explain any
failures in plain English.
3. **Secure**: Remind the user if they are testing a sensitive endpoint
without an `https://` protocol.
```
### Add the tool logic
Skills can bundle resources like scripts.
1. Create a file at `.gemini/skills/api-auditor/scripts/audit.js`. This is the
code the agent will run.
```javascript
// .gemini/skills/api-auditor/scripts/audit.js
const url = process.argv[2];
if (!url) {
console.error('Usage: node audit.js ');
process.exit(1);
}
console.log(`Auditing ${url}...`);
fetch(url, { method: 'HEAD' })
.then((r) => console.log(`Result: Success (Status ${r.status})`))
.catch((e) => console.error(`Result: Failed (${e.message})`));
```
## How to verify discovery
Gemini CLI automatically discovers skills in the `.gemini/skills` directory. You
can also use `.agents/skills` as a more generic alternative. Check that it found
your new skill.
**Command:** `/skills list`
You should see `api-auditor` in the list of available skills.
## How to use the skill
Now, try it out. Start a new session and ask a question that triggers the
skill's description.
**User:** "Can you audit http://geminicli.com"
Gemini recognizes the request matches the `api-auditor` description and asks for
permission to activate it.
**Model:** (After calling `activate_skill`) "I've activated the **api-auditor**
skill. I'll run the audit script now..."
Gemini then uses the `run_shell_command` tool to execute your bundled Node
script:
`node .gemini/skills/api-auditor/scripts/audit.js http://geminili.com`
## Next steps
- Explore the
[Agent Skills Authoring Guide](../../cli/skills.md#creating-a-skill) to learn
about more advanced features.
- Learn how to share skills via [Extensions](../../extensions/index.md).
================================================
FILE: docs/cli/tutorials/task-planning.md
================================================
# Plan tasks with todos
Keep complex jobs on the rails with Gemini CLI's built-in task planning. In this
guide, you'll learn how to ask for a plan, execute it step-by-step, and monitor
progress with the todo list.
## Prerequisites
- Gemini CLI installed and authenticated.
- A complex task in mind (e.g., a multi-file refactor or new feature).
## Why use task planning?
Standard LLMs have a limited context window and can "forget" the original goal
after 10 turns of code generation. Task planning provides:
1. **Visibility:** You see exactly what the agent plans to do _before_ it
starts.
2. **Focus:** The agent knows exactly which step it's working on right now.
3. **Resilience:** If the agent gets stuck, the plan helps it get back on
track.
## How to ask for a plan
The best way to trigger task planning is to explicitly ask for it.
**Prompt:**
`I want to migrate this project from JavaScript to TypeScript. Please make a plan first.`
Gemini will analyze your codebase and use the `write_todos` tool to generate a
structured list.
**Example Plan:**
1. [ ] Create `tsconfig.json`.
2. [ ] Rename `.js` files to `.ts`.
3. [ ] Fix type errors in `utils.js`.
4. [ ] Fix type errors in `server.js`.
5. [ ] Verify build passes.
## How to review and iterate
Once the plan is generated, it appears in your CLI. Review it.
- **Missing steps?** Tell the agent: "You forgot to add a step for installing
`@types/node`."
- **Wrong order?** Tell the agent: "Let's verify the build _after_ each file,
not just at the end."
The agent will update the todo list dynamically.
## How to execute the plan
Tell the agent to proceed.
**Prompt:** `Looks good. Start with the first step.`
As the agent works, you'll see the todo list update in real-time above the input
box.
- **Current focus:** The active task is highlighted (e.g.,
`[IN_PROGRESS] Create tsconfig.json`).
- **Progress:** Completed tasks are marked as done.
## How to monitor progress (`Ctrl+T`)
For a long-running task, the full todo list might be hidden to save space. You
can toggle the full view at any time.
**Action:** Press **Ctrl+T**.
This shows the complete list, including pending, in-progress, and completed
items. It's a great way to check "how much is left?" without scrolling back up.
## How to handle unexpected changes
Plans change. Maybe you discover a library is incompatible halfway through.
**Prompt:**
`Actually, let's skip the 'server.js' refactor for now. It's too risky.`
The agent will mark that task as `cancelled` or remove it, and move to the next
item. This dynamic adjustment is what makes the todo system powerful—it's a
living document, not a static text block.
## Next steps
- Explore [Session management](session-management.md) to save your plan and
finish it tomorrow.
- See the [Todo tool reference](../../tools/todos.md) for technical schema
details.
- Learn about [Memory management](memory-management.md) to persist planning
preferences (e.g., "Always create a test plan first").
================================================
FILE: docs/cli/tutorials/web-tools.md
================================================
# Web search and fetch
Access the live internet directly from your prompt. In this guide, you'll learn
how to search for up-to-date documentation, fetch deep context from specific
URLs, and apply that knowledge to your code.
## Prerequisites
- Gemini CLI installed and authenticated.
- An internet connection.
## How to research new technologies
Imagine you want to use a library released yesterday. The model doesn't know
about it yet. You need to teach it.
### Scenario: Find documentation
**Prompt:**
`Search for the 'Bun 1.0' release notes and summarize the key changes.`
Gemini uses the `google_web_search` tool to find relevant pages and synthesizes
an answer. This "grounding" process ensures the agent isn't hallucinating
features that don't exist.
**Prompt:** `Find the documentation for the 'React Router v7' loader API.`
## How to fetch deep context
Search gives you a summary, but sometimes you need the raw details. The
`web_fetch` tool lets you feed a specific URL directly into the agent's context.
### Scenario: Reading a blog post
You found a blog post with the exact solution to your bug.
**Prompt:**
`Read https://example.com/fixing-memory-leaks and explain how to apply it to my code.`
Gemini will retrieve the page content (stripping away ads and navigation) and
use it to answer your question.
### Scenario: Comparing sources
You can even fetch multiple pages to compare approaches.
**Prompt:**
`Compare the pagination patterns in https://api.example.com/v1/docs and https://api.example.com/v2/docs.`
## How to apply knowledge to code
The real power comes when you combine web tools with file editing.
**Workflow:**
1. **Search:** "How do I implement auth with Supabase?"
2. **Fetch:** "Read this guide: https://supabase.com/docs/guides/auth."
3. **Implement:** "Great. Now use that pattern to create an `auth.ts` file in
my project."
## How to troubleshoot errors
When you hit an obscure error message, paste it into the chat.
**Prompt:**
`I'm getting 'Error: hydration mismatch' in Next.js. Search for recent solutions.`
The agent will search sources such as GitHub issues, StackOverflow, and forums
to find relevant fixes that might be too new to be in its base training set.
## Next steps
- Explore [File management](file-management.md) to see how to apply the code you
generate.
- See the [Web search tool reference](../../tools/web-search.md) for citation
details.
- See the [Web fetch tool reference](../../tools/web-fetch.md) for technical
limitations.
================================================
FILE: docs/core/index.md
================================================
# Gemini CLI core
Gemini CLI's core package (`packages/core`) is the backend portion of Gemini
CLI, handling communication with the Gemini API, managing tools, and processing
requests sent from `packages/cli`. For a general overview of Gemini CLI, see the
[main documentation page](../index.md).
## Navigating this section
- **[Sub-agents (experimental)](./subagents.md):** Learn how to create and use
specialized sub-agents for complex tasks.
- **[Core tools reference](../reference/tools.md):** Information on how tools
are defined, registered, and used by the core.
- **[Memory Import Processor](../reference/memport.md):** Documentation for the
modular GEMINI.md import feature using @file.md syntax.
- **[Policy Engine](../reference/policy-engine.md):** Use the Policy Engine for
fine-grained control over tool execution.
- **[Local Model Routing (experimental)](./local-model-routing.md):** Learn how
to enable use of a local Gemma model for model routing decisions.
## Role of the core
While the `packages/cli` portion of Gemini CLI provides the user interface,
`packages/core` is responsible for:
- **Gemini API interaction:** Securely communicating with the Google Gemini API,
sending user prompts, and receiving model responses.
- **Prompt engineering:** Constructing effective prompts for the Gemini model,
potentially incorporating conversation history, tool definitions, and
instructional context from `GEMINI.md` files.
- **Tool management & orchestration:**
- Registering available tools (e.g., file system tools, shell command
execution).
- Interpreting tool use requests from the Gemini model.
- Executing the requested tools with the provided arguments.
- Returning tool execution results to the Gemini model for further processing.
- **Session and state management:** Keeping track of the conversation state,
including history and any relevant context required for coherent interactions.
- **Configuration:** Managing core-specific configurations, such as API key
access, model selection, and tool settings.
## Security considerations
The core plays a vital role in security:
- **API key management:** It handles the `GEMINI_API_KEY` and ensures it's used
securely when communicating with the Gemini API.
- **Tool execution:** When tools interact with the local system (e.g.,
`run_shell_command`), the core (and its underlying tool implementations) must
do so with appropriate caution, often involving sandboxing mechanisms to
prevent unintended modifications.
## Chat history compression
To ensure that long conversations don't exceed the token limits of the Gemini
model, the core includes a chat history compression feature.
When a conversation approaches the token limit for the configured model, the
core automatically compresses the conversation history before sending it to the
model. This compression is designed to be lossless in terms of the information
conveyed, but it reduces the overall number of tokens used.
You can find the token limits for each model in the
[Google AI documentation](https://ai.google.dev/gemini-api/docs/models).
## Model fallback
Gemini CLI includes a model fallback mechanism to ensure that you can continue
to use the CLI even if the default "pro" model is rate-limited.
If you are using the default "pro" model and the CLI detects that you are being
rate-limited, it automatically switches to the "flash" model for the current
session. This allows you to continue working without interruption.
Internal utility calls that use `gemini-2.5-flash-lite` (for example, prompt
completion and classification) silently fall back to `gemini-2.5-flash` and
`gemini-2.5-pro` when quota is exhausted, without changing the configured model.
## File discovery service
The file discovery service is responsible for finding files in the project that
are relevant to the current context. It is used by the `@` command and other
tools that need to access files.
## Memory discovery service
The memory discovery service is responsible for finding and loading the
`GEMINI.md` files that provide context to the model. It searches for these files
in a hierarchical manner, starting from the current working directory and moving
up to the project root and the user's home directory. It also searches in
subdirectories.
This allows you to have global, project-level, and component-level context
files, which are all combined to provide the model with the most relevant
information.
You can use the [`/memory` command](../reference/commands.md) to `show`, `add`,
and `refresh` the content of loaded `GEMINI.md` files.
## Citations
When Gemini finds it is reciting text from a source it appends the citation to
the output. It is enabled by default but can be disabled with the
ui.showCitations setting.
- When proposing an edit the citations display before giving the user the option
to accept.
- Citations are always shown at the end of the model’s turn.
- We deduplicate citations and display them in alphabetical order.
================================================
FILE: docs/core/local-model-routing.md
================================================
# Local Model Routing (experimental)
Gemini CLI supports using a local model for
[routing decisions](../cli/model-routing.md). When configured, Gemini CLI will
use a locally-running **Gemma** model to make routing decisions (instead of
sending routing decisions to a hosted model).
This feature can help reduce costs associated with hosted model usage while
offering similar routing decision latency and quality.
> **Note: Local model routing is currently an experimental feature.**
## Setup
Using a Gemma model for routing decisions requires that an implementation of a
Gemma model be running locally on your machine, served behind an HTTP endpoint
and accessed via the Gemini API.
To serve the Gemma model, follow these steps:
### Download the LiteRT-LM runtime
The [LiteRT-LM](https://github.com/google-ai-edge/LiteRT-LM) runtime offers
pre-built binaries for locally-serving models. Download the binary appropriate
for your system.
#### Windows
1. Download
[lit.windows_x86_64.exe](https://github.com/google-ai-edge/LiteRT-LM/releases/download/v0.9.0-alpha03/lit.windows_x86_64.exe).
2. Using GPU on Windows requires the DirectXShaderCompiler. Download the
[dxc zip from the latest release](https://github.com/microsoft/DirectXShaderCompiler/releases/download/v1.8.2505.1/dxc_2025_07_14.zip).
Unzip the archive and from the architecture-appropriate `bin\` directory, and
copy the `dxil.dll` and `dxcompiler.dll` into the same location as you saved
`lit.windows_x86_64.exe`.
3. (Optional) Test starting the runtime:
`.\lit.windows_x86_64.exe serve --verbose`
#### Linux
1. Download
[lit.linux_x86_64](https://github.com/google-ai-edge/LiteRT-LM/releases/download/v0.9.0-alpha03/lit.linux_x86_64).
2. Ensure the binary is executable: `chmod a+x lit.linux_x86_64`
3. (Optional) Test starting the runtime: `./lit.linux_x86_64 serve --verbose`
#### MacOS
1. Download
[lit-macos-arm64](https://github.com/google-ai-edge/LiteRT-LM/releases/download/v0.9.0-alpha03/lit.macos_arm64).
2. Ensure the binary is executable: `chmod a+x lit.macos_arm64`
3. (Optional) Test starting the runtime: `./lit.macos_arm64 serve --verbose`
> **Note**: MacOS can be configured to only allows binaries from "App Store &
> Known Developers". If you encounter an error message when attempting to run
> the binary, you will need to allow the application. One option is to visit
> `System Settings -> Privacy & Security`, scroll to `Security`, and click
> `"Allow Anyway"` for `"lit.macos_arm64"`. Another option is to run
> `xattr -d com.apple.quarantine lit.macos_arm64` from the commandline.
### Download the Gemma Model
Before using Gemma, you will need to download the model (and agree to the Terms
of Service).
This can be done via the LiteRT-LM runtime.
#### Windows
```bash
$ .\lit.windows_x86_64.exe pull gemma3-1b-gpu-custom
[Legal] The model you are about to download is governed by
the Gemma Terms of Use and Prohibited Use Policy. Please review these terms and ensure you agree before continuing.
Full Terms: https://ai.google.dev/gemma/terms
Prohibited Use Policy: https://ai.google.dev/gemma/prohibited_use_policy
Do you accept these terms? (Y/N): Y
Terms accepted.
Downloading model 'gemma3-1b-gpu-custom' ...
Downloading... 968.6 MB
Download complete.
```
#### Linux
```bash
$ ./lit.linux_x86_64 pull gemma3-1b-gpu-custom
[Legal] The model you are about to download is governed by
the Gemma Terms of Use and Prohibited Use Policy. Please review these terms and ensure you agree before continuing.
Full Terms: https://ai.google.dev/gemma/terms
Prohibited Use Policy: https://ai.google.dev/gemma/prohibited_use_policy
Do you accept these terms? (Y/N): Y
Terms accepted.
Downloading model 'gemma3-1b-gpu-custom' ...
Downloading... 968.6 MB
Download complete.
```
#### MacOS
```bash
$ ./lit.lit.macos_arm64 pull gemma3-1b-gpu-custom
[Legal] The model you are about to download is governed by
the Gemma Terms of Use and Prohibited Use Policy. Please review these terms and ensure you agree before continuing.
Full Terms: https://ai.google.dev/gemma/terms
Prohibited Use Policy: https://ai.google.dev/gemma/prohibited_use_policy
Do you accept these terms? (Y/N): Y
Terms accepted.
Downloading model 'gemma3-1b-gpu-custom' ...
Downloading... 968.6 MB
Download complete.
```
### Start LiteRT-LM Runtime
Using the command appropriate to your system, start the LiteRT-LM runtime.
Configure the port that you want to use for your Gemma model. For the purposes
of this document, we will use port `9379`.
Example command for MacOS: `./lit.macos_arm64 serve --port=9379 --verbose`
### (Optional) Verify Model Serving
Send a quick prompt to the model via HTTP to validate successful model serving.
This will cause the runtime to download the model and run it once.
You should see a short joke in the server output as an indicator of success.
#### Windows
```
# Run this in PowerShell to send a request to the server
$uri = "http://localhost:9379/v1beta/models/gemma3-1b-gpu-custom:generateContent"
$body = @{contents = @( @{
role = "user"
parts = @( @{ text = "Tell me a joke." } )
})} | ConvertTo-Json -Depth 10
Invoke-RestMethod -Uri $uri -Method Post -Body $body -ContentType "application/json"
```
#### Linux/MacOS
```bash
$ curl "http://localhost:9379/v1beta/models/gemma3-1b-gpu-custom:generateContent" \
-H 'Content-Type: application/json' \
-X POST \
-d '{"contents":[{"role":"user","parts":[{"text":"Tell me a joke."}]}]}'
```
## Configuration
To use a local Gemma model for routing, you must explicitly enable it in your
`settings.json`:
```json
{
"experimental": {
"gemmaModelRouter": {
"enabled": true,
"classifier": {
"host": "http://localhost:9379",
"model": "gemma3-1b-gpu-custom"
}
}
}
}
```
> Use the port you started your LiteRT-LM runtime on in the setup steps.
### Configuration schema
| Field | Type | Required | Description |
| :----------------- | :------ | :------- | :----------------------------------------------------------------------------------------- |
| `enabled` | boolean | Yes | Must be `true` to enable the feature. |
| `classifier` | object | Yes | The configuration for the local model endpoint. It includes the host and model specifiers. |
| `classifier.host` | string | Yes | The URL to the local model server. Should be `http://localhost:`. |
| `classifier.model` | string | Yes | The model name to use for decisions. Must be `"gemma3-1b-gpu-custom"`. |
> **Note: You will need to restart after configuration changes for local model
> routing to take effect.**
================================================
FILE: docs/core/remote-agents.md
================================================
# Remote Subagents (experimental)
Gemini CLI supports connecting to remote subagents using the Agent-to-Agent
(A2A) protocol. This allows Gemini CLI to interact with other agents, expanding
its capabilities by delegating tasks to remote services.
Gemini CLI can connect to any compliant A2A agent. You can find samples of A2A
agents in the following repositories:
- [ADK Samples (Python)](https://github.com/google/adk-samples/tree/main/python)
- [ADK Python Contributing Samples](https://github.com/google/adk-python/tree/main/contributing/samples)
> [!NOTE]
> Remote subagents are currently an experimental feature.
## Configuration
To use remote subagents, you must explicitly enable them in your
`settings.json`:
```json
{
"experimental": {
"enableAgents": true
}
}
```
## Proxy support
Gemini CLI routes traffic to remote agents through an HTTP/HTTPS proxy if one is
configured. It uses the `general.proxy` setting in your `settings.json` file or
standard environment variables (`HTTP_PROXY`, `HTTPS_PROXY`).
```json
{
"general": {
"proxy": "http://my-proxy:8080"
}
}
```
## Defining remote subagents
Remote subagents are defined as Markdown files (`.md`) with YAML frontmatter.
You can place them in:
1. **Project-level:** `.gemini/agents/*.md` (Shared with your team)
2. **User-level:** `~/.gemini/agents/*.md` (Personal agents)
### Configuration schema
| Field | Type | Required | Description |
| :--------------- | :----- | :------- | :------------------------------------------------------------------------------------------------------------- |
| `kind` | string | Yes | Must be `remote`. |
| `name` | string | Yes | A unique name for the agent. Must be a valid slug (lowercase letters, numbers, hyphens, and underscores only). |
| `agent_card_url` | string | Yes | The URL to the agent's A2A card endpoint. |
| `auth` | object | No | Authentication configuration. See [Authentication](#authentication). |
### Single-subagent example
```markdown
---
kind: remote
name: my-remote-agent
agent_card_url: https://example.com/agent-card
---
```
### Multi-subagent example
The loader explicitly supports multiple remote subagents defined in a single
Markdown file.
```markdown
---
- kind: remote
name: remote-1
agent_card_url: https://example.com/1
- kind: remote
name: remote-2
agent_card_url: https://example.com/2
---
```
> [!NOTE] Mixed local and remote agents, or multiple local agents, are not
> supported in a single file; the list format is currently remote-only.
## Authentication
Many remote agents require authentication. Gemini CLI supports several
authentication methods aligned with the
[A2A security specification](https://a2a-protocol.org/latest/specification/#451-securityscheme).
Add an `auth` block to your agent's frontmatter to configure credentials.
### Supported auth types
Gemini CLI supports the following authentication types:
| Type | Description |
| :------------------- | :--------------------------------------------------------------------------------------------- |
| `apiKey` | Send a static API key as an HTTP header. |
| `http` | HTTP authentication (Bearer token, Basic credentials, or any IANA-registered scheme). |
| `google-credentials` | Google Application Default Credentials (ADC). Automatically selects access or identity tokens. |
| `oauth2` | OAuth 2.0 Authorization Code flow with PKCE. Opens a browser for interactive sign-in. |
### Dynamic values
For `apiKey` and `http` auth types, secret values (`key`, `token`, `username`,
`password`, `value`) support dynamic resolution:
| Format | Description | Example |
| :---------- | :-------------------------------------------------- | :------------------------- |
| `$ENV_VAR` | Read from an environment variable. | `$MY_API_KEY` |
| `!command` | Execute a shell command and use the trimmed output. | `!gcloud auth print-token` |
| literal | Use the string as-is. | `sk-abc123` |
| `$$` / `!!` | Escape prefix. `$$FOO` becomes the literal `$FOO`. | `$$NOT_AN_ENV_VAR` |
> **Security tip:** Prefer `$ENV_VAR` or `!command` over embedding secrets
> directly in agent files, especially for project-level agents checked into
> version control.
### API key (`apiKey`)
Sends an API key as an HTTP header on every request.
| Field | Type | Required | Description |
| :----- | :----- | :------- | :---------------------------------------------------- |
| `type` | string | Yes | Must be `apiKey`. |
| `key` | string | Yes | The API key value. Supports dynamic values. |
| `name` | string | No | Header name to send the key in. Default: `X-API-Key`. |
```yaml
---
kind: remote
name: my-agent
agent_card_url: https://example.com/agent-card
auth:
type: apiKey
key: $MY_API_KEY
---
```
### HTTP authentication (`http`)
Supports Bearer tokens, Basic auth, and arbitrary IANA-registered HTTP
authentication schemes.
#### Bearer token
Use the following fields to configure a Bearer token:
| Field | Type | Required | Description |
| :------- | :----- | :------- | :----------------------------------------- |
| `type` | string | Yes | Must be `http`. |
| `scheme` | string | Yes | Must be `Bearer`. |
| `token` | string | Yes | The bearer token. Supports dynamic values. |
```yaml
auth:
type: http
scheme: Bearer
token: $MY_BEARER_TOKEN
```
#### Basic authentication
Use the following fields to configure Basic authentication:
| Field | Type | Required | Description |
| :--------- | :----- | :------- | :------------------------------------- |
| `type` | string | Yes | Must be `http`. |
| `scheme` | string | Yes | Must be `Basic`. |
| `username` | string | Yes | The username. Supports dynamic values. |
| `password` | string | Yes | The password. Supports dynamic values. |
```yaml
auth:
type: http
scheme: Basic
username: $MY_USERNAME
password: $MY_PASSWORD
```
#### Raw scheme
For any other IANA-registered scheme (for example, Digest, HOBA), provide the
raw authorization value.
| Field | Type | Required | Description |
| :------- | :----- | :------- | :---------------------------------------------------------------------------- |
| `type` | string | Yes | Must be `http`. |
| `scheme` | string | Yes | The scheme name (for example, `Digest`). |
| `value` | string | Yes | Raw value sent as `Authorization: `. Supports dynamic values. |
```yaml
auth:
type: http
scheme: Digest
value: $MY_DIGEST_VALUE
```
### Google Application Default Credentials (`google-credentials`)
Uses
[Google Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials)
to authenticate with Google Cloud services and Cloud Run endpoints. This is the
recommended auth method for agents hosted on Google Cloud infrastructure.
| Field | Type | Required | Description |
| :------- | :------- | :------- | :-------------------------------------------------------------------------- |
| `type` | string | Yes | Must be `google-credentials`. |
| `scopes` | string[] | No | OAuth scopes. Defaults to `https://www.googleapis.com/auth/cloud-platform`. |
```yaml
---
kind: remote
name: my-gcp-agent
agent_card_url: https://my-agent-xyz.run.app/.well-known/agent.json
auth:
type: google-credentials
---
```
#### How token selection works
The provider automatically selects the correct token type based on the agent's
host:
| Host pattern | Token type | Use case |
| :----------------- | :----------------- | :------------------------------------------ |
| `*.googleapis.com` | **Access token** | Google APIs (Agent Engine, Vertex AI, etc.) |
| `*.run.app` | **Identity token** | Cloud Run services |
- **Access tokens** authorize API calls to Google services. They are scoped
(default: `cloud-platform`) and fetched via `GoogleAuth.getClient()`.
- **Identity tokens** prove the caller's identity to a service that validates
the token's audience. The audience is set to the target host. These are
fetched via `GoogleAuth.getIdTokenClient()`.
Both token types are cached and automatically refreshed before expiry.
#### Setup
`google-credentials` relies on ADC, which means your environment must have
credentials configured. Common setups:
- **Local development:** Run `gcloud auth application-default login` to
authenticate with your Google account.
- **CI / Cloud environments:** Use a service account. Set the
`GOOGLE_APPLICATION_CREDENTIALS` environment variable to the path of your
service account key file, or use workload identity on GKE / Cloud Run.
#### Allowed hosts
For security, `google-credentials` only sends tokens to known Google-owned
hosts:
- `*.googleapis.com`
- `*.run.app`
Requests to any other host will be rejected with an error. If your agent is
hosted on a different domain, use one of the other auth types (`apiKey`, `http`,
or `oauth2`).
#### Examples
The following examples demonstrate how to configure Google Application Default
Credentials.
**Cloud Run agent:**
```yaml
---
kind: remote
name: cloud-run-agent
agent_card_url: https://my-agent-xyz.run.app/.well-known/agent.json
auth:
type: google-credentials
---
```
**Google API with custom scopes:**
```yaml
---
kind: remote
name: vertex-agent
agent_card_url: https://us-central1-aiplatform.googleapis.com/.well-known/agent.json
auth:
type: google-credentials
scopes:
- https://www.googleapis.com/auth/cloud-platform
- https://www.googleapis.com/auth/compute
---
```
### OAuth 2.0 (`oauth2`)
Performs an interactive OAuth 2.0 Authorization Code flow with PKCE. On first
use, Gemini CLI opens your browser for sign-in and persists the resulting tokens
for subsequent requests.
| Field | Type | Required | Description |
| :------------------ | :------- | :------- | :------------------------------------------------------------------------------------------------------------------------------------------------- |
| `type` | string | Yes | Must be `oauth2`. |
| `client_id` | string | Yes\* | OAuth client ID. Required for interactive auth. |
| `client_secret` | string | No\* | OAuth client secret. Required by most authorization servers (confidential clients). Can be omitted for public clients that don't require a secret. |
| `scopes` | string[] | No | Requested scopes. Can also be discovered from the agent card. |
| `authorization_url` | string | No | Authorization endpoint. Discovered from the agent card if omitted. |
| `token_url` | string | No | Token endpoint. Discovered from the agent card if omitted. |
```yaml
---
kind: remote
name: oauth-agent
agent_card_url: https://example.com/.well-known/agent.json
auth:
type: oauth2
client_id: my-client-id.apps.example.com
---
```
If the agent card advertises an `oauth2` security scheme with
`authorizationCode` flow, the `authorization_url`, `token_url`, and `scopes` are
automatically discovered. You only need to provide `client_id` (and
`client_secret` if required).
Tokens are persisted to disk and refreshed automatically when they expire.
### Auth validation
When Gemini CLI loads a remote agent, it validates your auth configuration
against the agent card's declared `securitySchemes`. If the agent requires
authentication that you haven't configured, you'll see an error describing
what's needed.
`google-credentials` is treated as compatible with `http` Bearer security
schemes, since it produces Bearer tokens.
### Auth retry behavior
All auth providers automatically retry on `401` and `403` responses by
re-fetching credentials (up to 2 retries). This handles cases like expired
tokens or rotated credentials. For `apiKey` with `!command` values, the command
is re-executed on retry to fetch a fresh key.
### Agent card fetching and auth
When connecting to a remote agent, Gemini CLI first fetches the agent card
**without** authentication. If the card endpoint returns a `401` or `403`, it
retries the fetch **with** the configured auth headers. This lets agents have
publicly accessible cards while protecting their task endpoints, or to protect
both behind auth.
## Managing Subagents
Users can manage subagents using the following commands within the Gemini CLI:
- `/agents list`: Displays all available local and remote subagents.
- `/agents reload`: Reloads the agent registry. Use this after adding or
modifying agent definition files.
- `/agents enable `: Enables a specific subagent.
- `/agents disable `: Disables a specific subagent.
> [!TIP]
> You can use the `@cli_help` agent within Gemini CLI for assistance
> with configuring subagents.
================================================
FILE: docs/core/subagents.md
================================================
# Subagents (experimental)
Subagents are specialized agents that operate within your main Gemini CLI
session. They are designed to handle specific, complex tasks—like deep codebase
analysis, documentation lookup, or domain-specific reasoning—without cluttering
the main agent's context or toolset.
> [!NOTE]
> Subagents are currently an experimental feature.
>
To use custom subagents, you must ensure they are enabled in your
`settings.json` (enabled by default):
```json
{
"experimental": { "enableAgents": true }
}
```
## What are subagents?
Subagents are "specialists" that the main Gemini agent can hire for a specific
job.
- **Focused context:** Each subagent has its own system prompt and persona.
- **Specialized tools:** Subagents can have a restricted or specialized set of
tools.
- **Independent context window:** Interactions with a subagent happen in a
separate context loop, which saves tokens in your main conversation history.
Subagents are exposed to the main agent as a tool of the same name. When the
main agent calls the tool, it delegates the task to the subagent. Once the
subagent completes its task, it reports back to the main agent with its
findings.
## How to use subagents
You can use subagents through automatic delegation or by explicitly forcing them
in your prompt.
### Automatic delegation
Gemini CLI's main agent is instructed to use specialized subagents when a task
matches their expertise. For example, if you ask "How does the auth system
work?", the main agent may decide to call the `codebase_investigator` subagent
to perform the research.
### Forcing a subagent (@ syntax)
You can explicitly direct a task to a specific subagent by using the `@` symbol
followed by the subagent's name at the beginning of your prompt. This is useful
when you want to bypass the main agent's decision-making and go straight to a
specialist.
**Example:**
```bash
@codebase_investigator Map out the relationship between the AgentRegistry and the LocalAgentExecutor.
```
When you use the `@` syntax, the CLI injects a system note that nudges the
primary model to use that specific subagent tool immediately.
## Built-in subagents
Gemini CLI comes with the following built-in subagents:
### Codebase Investigator
- **Name:** `codebase_investigator`
- **Purpose:** Analyze the codebase, reverse engineer, and understand complex
dependencies.
- **When to use:** "How does the authentication system work?", "Map out the
dependencies of the `AgentRegistry` class."
- **Configuration:** Enabled by default. You can override its settings in
`settings.json` under `agents.overrides`. Example (forcing a specific model
and increasing turns):
```json
{
"agents": {
"overrides": {
"codebase_investigator": {
"modelConfig": { "model": "gemini-3-flash-preview" },
"runConfig": { "maxTurns": 50 }
}
}
}
}
```
### CLI Help Agent
- **Name:** `cli_help`
- **Purpose:** Get expert knowledge about Gemini CLI itself, its commands,
configuration, and documentation.
- **When to use:** "How do I configure a proxy?", "What does the `/rewind`
command do?"
- **Configuration:** Enabled by default.
### Generalist Agent
- **Name:** `generalist_agent`
- **Purpose:** Route tasks to the appropriate specialized subagent.
- **When to use:** Implicitly used by the main agent for routing. Not directly
invoked by the user.
- **Configuration:** Enabled by default. No specific configuration options.
### Browser Agent (experimental)
- **Name:** `browser_agent`
- **Purpose:** Automate web browser tasks — navigating websites, filling forms,
clicking buttons, and extracting information from web pages — using the
accessibility tree.
- **When to use:** "Go to example.com and fill out the contact form," "Extract
the pricing table from this page," "Click the login button and enter my
credentials."
> [!NOTE]
> This is a preview feature currently under active development.
#### Prerequisites
The browser agent requires:
- **Chrome** version 144 or later (any recent stable release will work).
- **Node.js** with `npx` available (used to launch the
[`chrome-devtools-mcp`](https://www.npmjs.com/package/chrome-devtools-mcp)
server).
#### Enabling the browser agent
The browser agent is disabled by default. Enable it in your `settings.json`:
```json
{
"agents": {
"overrides": {
"browser_agent": {
"enabled": true
}
}
}
}
```
#### Session modes
The `sessionMode` setting controls how Chrome is launched and managed. Set it
under `agents.browser`:
```json
{
"agents": {
"overrides": {
"browser_agent": {
"enabled": true
}
},
"browser": {
"sessionMode": "persistent"
}
}
}
```
The available modes are:
| Mode | Description |
| :----------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `persistent` | **(Default)** Launches Chrome with a persistent profile stored at `~/.gemini/cli-browser-profile/`. Cookies, history, and settings are preserved between sessions. |
| `isolated` | Launches Chrome with a temporary profile that is deleted after each session. Use this for clean-state automation. |
| `existing` | Attaches to an already-running Chrome instance. You must enable remote debugging first by navigating to `chrome://inspect/#remote-debugging` in Chrome. No new browser process is launched. |
#### Configuration reference
All browser-specific settings go under `agents.browser` in your `settings.json`.
| Setting | Type | Default | Description |
| :------------ | :-------- | :------------- | :---------------------------------------------------------------------------------------------- |
| `sessionMode` | `string` | `"persistent"` | How Chrome is managed: `"persistent"`, `"isolated"`, or `"existing"`. |
| `headless` | `boolean` | `false` | Run Chrome in headless mode (no visible window). |
| `profilePath` | `string` | — | Custom path to a browser profile directory. |
| `visualModel` | `string` | — | Model override for the visual agent (for example, `"gemini-2.5-computer-use-preview-10-2025"`). |
#### Security
The browser agent enforces the following security restrictions:
- **Blocked URL patterns:** `file://`, `javascript:`, `data:text/html`,
`chrome://extensions`, and `chrome://settings/passwords` are always blocked.
- **Sensitive action confirmation:** Actions like form filling, file uploads,
and form submissions require user confirmation through the standard policy
engine.
#### Visual agent
By default, the browser agent interacts with pages through the accessibility
tree using element `uid` values. For tasks that require visual identification
(for example, "click the yellow button" or "find the red error message"), you
can enable the visual agent by setting a `visualModel`:
```json
{
"agents": {
"overrides": {
"browser_agent": {
"enabled": true
}
},
"browser": {
"visualModel": "gemini-2.5-computer-use-preview-10-2025"
}
}
}
```
When enabled, the agent gains access to the `analyze_screenshot` tool, which
captures a screenshot and sends it to the vision model for analysis. The model
returns coordinates and element descriptions that the browser agent uses with
the `click_at` tool for precise, coordinate-based interactions.
> [!NOTE]
> The visual agent requires API key or Vertex AI authentication. It is
> not available when using "Sign in with Google".
## Creating custom subagents
You can create your own subagents to automate specific workflows or enforce
specific personas. To use custom subagents, you must enable them in your
`settings.json`:
```json
{
"experimental": {
"enableAgents": true
}
}
```
### Agent definition files
Custom agents are defined as Markdown files (`.md`) with YAML frontmatter. You
can place them in:
1. **Project-level:** `.gemini/agents/*.md` (Shared with your team)
2. **User-level:** `~/.gemini/agents/*.md` (Personal agents)
### File format
The file **MUST** start with YAML frontmatter enclosed in triple-dashes `---`.
The body of the markdown file becomes the agent's **System Prompt**.
**Example: `.gemini/agents/security-auditor.md`**
```markdown
---
name: security-auditor
description: Specialized in finding security vulnerabilities in code.
kind: local
tools:
- read_file
- grep_search
model: gemini-3-flash-preview
temperature: 0.2
max_turns: 10
---
You are a ruthless Security Auditor. Your job is to analyze code for potential
vulnerabilities.
Focus on:
1. SQL Injection
2. XSS (Cross-Site Scripting)
3. Hardcoded credentials
4. Unsafe file operations
When you find a vulnerability, explain it clearly and suggest a fix. Do not fix
it yourself; just report it.
```
### Configuration schema
| Field | Type | Required | Description |
| :------------- | :----- | :------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `name` | string | Yes | Unique identifier (slug) used as the tool name for the agent. Only lowercase letters, numbers, hyphens, and underscores. |
| `description` | string | Yes | Short description of what the agent does. This is visible to the main agent to help it decide when to call this subagent. |
| `kind` | string | No | `local` (default) or `remote`. |
| `tools` | array | No | List of tool names this agent can use. Supports wildcards: `*` (all tools), `mcp_*` (all MCP tools), `mcp_server_*` (all tools from a server). **If omitted, it inherits all tools from the parent session.** |
| `model` | string | No | Specific model to use (e.g., `gemini-3-preview`). Defaults to `inherit` (uses the main session model). |
| `temperature` | number | No | Model temperature (0.0 - 2.0). Defaults to `1`. |
| `max_turns` | number | No | Maximum number of conversation turns allowed for this agent before it must return. Defaults to `30`. |
| `timeout_mins` | number | No | Maximum execution time in minutes. Defaults to `10`. |
### Tool wildcards
When defining `tools` for a subagent, you can use wildcards to quickly grant
access to groups of tools:
- `*`: Grant access to all available built-in and discovered tools.
- `mcp_*`: Grant access to all tools from all connected MCP servers.
- `mcp_my-server_*`: Grant access to all tools from a specific MCP server named
`my-server`.
### Isolation and recursion protection
Each subagent runs in its own isolated context loop. This means:
- **Independent history:** The subagent's conversation history does not bloat
the main agent's context.
- **Isolated tools:** The subagent only has access to the tools you explicitly
grant it.
- **Recursion protection:** To prevent infinite loops and excessive token usage,
subagents **cannot** call other subagents. If a subagent is granted the `*`
tool wildcard, it will still be unable to see or invoke other agents.
## Managing subagents
You can manage subagents interactively using the `/agents` command or
persistently via `settings.json`.
### Interactive management (/agents)
If you are in an interactive CLI session, you can use the `/agents` command to
manage subagents without editing configuration files manually. This is the
recommended way to quickly enable, disable, or re-configure agents on the fly.
For a full list of sub-commands and usage, see the
[`/agents` command reference](../reference/commands.md#agents).
### Persistent configuration (settings.json)
While the `/agents` command and agent definition files provide a starting point,
you can use `settings.json` for global, persistent overrides. This is useful for
enforcing specific models or execution limits across all sessions.
#### `agents.overrides`
Use this to enable or disable specific agents or override their run
configurations.
```json
{
"agents": {
"overrides": {
"security-auditor": {
"enabled": false,
"runConfig": {
"maxTurns": 20,
"maxTimeMinutes": 10
}
}
}
}
}
```
#### `modelConfigs.overrides`
You can target specific subagents with custom model settings (like system
instruction prefixes or specific safety settings) using the `overrideScope`
field.
```json
{
"modelConfigs": {
"overrides": [
{
"match": { "overrideScope": "security-auditor" },
"modelConfig": {
"generateContentConfig": {
"temperature": 0.1
}
}
}
]
}
}
```
### Optimizing your subagent
The main agent's system prompt encourages it to use an expert subagent when one
is available. It decides whether an agent is a relevant expert based on the
agent's description. You can improve the reliability with which an agent is used
by updating the description to more clearly indicate:
- Its area of expertise.
- When it should be used.
- Some example scenarios.
For example, the following subagent description should be called fairly
consistently for Git operations.
> Git expert agent which should be used for all local and remote git operations.
> For example:
>
> - Making commits
> - Searching for regressions with bisect
> - Interacting with source control and issues providers such as GitHub.
If you need to further tune your subagent, you can do so by selecting the model
to optimize for with `/model` and then asking the model why it does not think
that your subagent was called with a specific prompt and the given description.
## Remote subagents (Agent2Agent) (experimental)
Gemini CLI can also delegate tasks to remote subagents using the Agent-to-Agent
(A2A) protocol.
> [!NOTE]
> Remote subagents are currently an experimental feature.
See the [Remote Subagents documentation](remote-agents) for detailed
configuration, authentication, and usage instructions.
## Extension subagents
Extensions can bundle and distribute subagents. See the
[Extensions documentation](../extensions/index.md#subagents) for details on how
to package agents within an extension.
================================================
FILE: docs/examples/proxy-script.md
================================================
# Example proxy script
The following is an example of a proxy script that can be used with the
`GEMINI_SANDBOX_PROXY_COMMAND` environment variable. This script only allows
`HTTPS` connections to `example.com:443` and declines all other requests.
```javascript
#!/usr/bin/env node
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
// Example proxy server that listens on :::8877 and only allows HTTPS connections to example.com.
// Set `GEMINI_SANDBOX_PROXY_COMMAND=scripts/example-proxy.js` to run proxy alongside sandbox
// Test via `curl https://example.com` inside sandbox (in shell mode or via shell tool)
import http from 'node:http';
import net from 'node:net';
import { URL } from 'node:url';
import console from 'node:console';
const PROXY_PORT = 8877;
const ALLOWED_DOMAINS = ['example.com', 'googleapis.com'];
const ALLOWED_PORT = '443';
const server = http.createServer((req, res) => {
// Deny all requests other than CONNECT for HTTPS
console.log(
`[PROXY] Denying non-CONNECT request for: ${req.method} ${req.url}`,
);
res.writeHead(405, { 'Content-Type': 'text/plain' });
res.end('Method Not Allowed');
});
server.on('connect', (req, clientSocket, head) => {
// req.url will be in the format "hostname:port" for a CONNECT request.
const { port, hostname } = new URL(`http://${req.url}`);
console.log(`[PROXY] Intercepted CONNECT request for: ${hostname}:${port}`);
if (
ALLOWED_DOMAINS.some(
(domain) => hostname == domain || hostname.endsWith(`.${domain}`),
) &&
port === ALLOWED_PORT
) {
console.log(`[PROXY] Allowing connection to ${hostname}:${port}`);
// Establish a TCP connection to the original destination.
const serverSocket = net.connect(port, hostname, () => {
clientSocket.write('HTTP/1.1 200 Connection Established\r\n\r\n');
// Create a tunnel by piping data between the client and the destination server.
serverSocket.write(head);
serverSocket.pipe(clientSocket);
clientSocket.pipe(serverSocket);
});
serverSocket.on('error', (err) => {
console.error(`[PROXY] Error connecting to destination: ${err.message}`);
clientSocket.end(`HTTP/1.1 502 Bad Gateway\r\n\r\n`);
});
} else {
console.log(`[PROXY] Denying connection to ${hostname}:${port}`);
clientSocket.end('HTTP/1.1 403 Forbidden\r\n\r\n');
}
clientSocket.on('error', (err) => {
// This can happen if the client hangs up.
console.error(`[PROXY] Client socket error: ${err.message}`);
});
});
server.listen(PROXY_PORT, () => {
const address = server.address();
console.log(`[PROXY] Proxy listening on ${address.address}:${address.port}`);
console.log(
`[PROXY] Allowing HTTPS connections to domains: ${ALLOWED_DOMAINS.join(', ')}`,
);
});
```
================================================
FILE: docs/extensions/best-practices.md
================================================
# Gemini CLI extension best practices
This guide covers best practices for developing, securing, and maintaining
Gemini CLI extensions.
## Development
Developing extensions for Gemini CLI is a lightweight, iterative process. Use
these strategies to build robust and efficient extensions.
### Structure your extension
While simple extensions may contain only a few files, we recommend a organized
structure for complex projects.
```text
my-extension/
├── package.json
├── tsconfig.json
├── gemini-extension.json
├── src/
│ ├── index.ts
│ └── tools/
└── dist/
```
- **Use TypeScript:** We strongly recommend using TypeScript for type safety and
improved developer experience.
- **Separate source and build:** Keep your source code in `src/` and output
build artifacts to `dist/`.
- **Bundle dependencies:** If your extension has many dependencies, bundle them
using a tool like `esbuild` to reduce installation time and avoid conflicts.
### Iterate with `link`
Use the `gemini extensions link` command to develop locally without reinstalling
your extension after every change.
```bash
cd my-extension
gemini extensions link .
```
Changes to your code are immediately available in the CLI after you rebuild the
project and restart the session.
### Use `GEMINI.md` effectively
Your `GEMINI.md` file provides essential context to the model.
- **Focus on goals:** Explain the high-level purpose of the extension and how to
interact with its tools.
- **Be concise:** Avoid dumping exhaustive documentation into the file. Use
clear, direct language.
- **Provide examples:** Include brief examples of how the model should use
specific tools or commands.
## Security
Follow the principle of least privilege and rigorous input validation when
building extensions.
### Minimal permissions
Only request the permissions your MCP server needs to function. Avoid giving the
model broad access (such as full shell access) if restricted tools are
sufficient.
If your extension uses powerful tools like `run_shell_command`, restrict them in
your `gemini-extension.json` file:
```json
{
"name": "my-safe-extension",
"excludeTools": ["run_shell_command(rm -rf *)"]
}
```
This ensures the CLI blocks dangerous commands even if the model attempts to
execute them.
### Validate inputs
Your MCP server runs on the user's machine. Always validate tool inputs to
prevent arbitrary code execution or unauthorized filesystem access.
```typescript
// Example: Validating paths
if (!path.resolve(inputPath).startsWith(path.resolve(allowedDir) + path.sep)) {
throw new Error('Access denied');
}
```
### Secure sensitive settings
If your extension requires API keys or other secrets, use the `sensitive: true`
option in your manifest. This ensures keys are stored in the system keychain and
obfuscated in the CLI output.
```json
"settings": [
{
"name": "API Key",
"envVar": "MY_API_KEY",
"sensitive": true
}
]
```
## Release
Follow standard versioning and release practices to ensure a smooth experience
for your users.
### Semantic versioning
Follow [Semantic Versioning (SemVer)](https://semver.org/) to communicate
changes clearly.
- **Major:** Breaking changes (e.g., renaming tools or changing arguments).
- **Minor:** New features (e.g., adding new tools or commands).
- **Patch:** Bug fixes and performance improvements.
### Release channels
Use Git branches to manage release channels. This lets users choose between
stability and the latest features.
```bash
# Install the stable version (default branch)
gemini extensions install github.com/user/repo
# Install the development version
gemini extensions install github.com/user/repo --ref dev
```
### Clean artifacts
When using GitHub Releases, ensure your archives only contain necessary files
(such as `dist/`, `gemini-extension.json`, and `package.json`). Exclude
`node_modules/` and `src/` to minimize download size.
## Test and verify
Test your extension thoroughly before releasing it to users.
- **Manual verification:** Use `gemini extensions link` to test your extension
in a live CLI session. Verify that tools appear in the debug console (F12) and
that custom commands resolve correctly.
- **Automated testing:** If your extension includes an MCP server, write unit
tests for your tool logic using a framework like Vitest or Jest. You can test
MCP tools in isolation by mocking the transport layer.
## Troubleshooting
Use these tips to diagnose and fix common extension issues.
### Extension not loading
If your extension doesn't appear in `/extensions list`:
- **Check the manifest:** Ensure `gemini-extension.json` is in the root
directory and contains valid JSON.
- **Verify the name:** The `name` field in the manifest must match the extension
directory name exactly.
- **Restart the CLI:** Extensions are loaded at the start of a session. Restart
Gemini CLI after making changes to the manifest or linking a new extension.
### MCP server failures
If your tools aren't working as expected:
- **Check the logs:** View the CLI logs to see if the MCP server failed to
start.
- **Test the command:** Run the server's `command` and `args` directly in your
terminal to ensure it starts correctly outside of Gemini CLI.
- **Debug console:** In interactive mode, press **F12** to open the debug
console and inspect tool calls and responses.
### Command conflicts
If a custom command isn't responding:
- **Check precedence:** Remember that user and project commands take precedence
over extension commands. Use the prefixed name (e.g., `/extension.command`) to
verify the extension's version.
- **Help command:** Run `/help` to see a list of all available commands and
their sources.
================================================
FILE: docs/extensions/index.md
================================================
# Gemini CLI extensions
Gemini CLI extensions package prompts, MCP servers, custom commands, themes,
hooks, sub-agents, and agent skills into a familiar and user-friendly format.
With extensions, you can expand the capabilities of Gemini CLI and share those
capabilities with others. They are designed to be easily installable and
shareable.
To see what's possible, browse the
[Gemini CLI extension gallery](https://geminicli.com/extensions/browse/).
## Choose your path
Choose the guide that best fits your needs.
### I want to use extensions
Learn how to discover, install, and manage extensions to enhance your Gemini CLI
experience.
- **[Manage extensions](#manage-extensions):** List and verify your installed
extensions.
- **[Install extensions](#installation):** Add new capabilities from GitHub or
local paths.
### I want to build extensions
Learn how to create, test, and share your own extensions with the community.
- **[Build extensions](writing-extensions.md):** Create your first extension
from a template.
- **[Best practices](best-practices.md):** Learn how to build secure and
reliable extensions.
- **[Publish to the gallery](releasing.md):** Share your work with the world.
## Manage extensions
Use the interactive `/extensions` command to verify your installed extensions
and their status:
```bash
/extensions list
```
You can also manage extensions from your terminal using the `gemini extensions`
command group:
```bash
gemini extensions list
```
## Installation
Install an extension by providing its GitHub repository URL. For example:
```bash
gemini extensions install https://github.com/gemini-cli-extensions/workspace
```
For more advanced installation options, see the
[Extension reference](reference.md#install-an-extension).
================================================
FILE: docs/extensions/reference.md
================================================
# Extension reference
This guide covers the `gemini extensions` commands and the structure of the
`gemini-extension.json` configuration file.
## Manage extensions
Use the `gemini extensions` command group to manage your extensions from the
terminal.
Note that commands like `gemini extensions install` are not supported within the
CLI's interactive mode. However, you can use the `/extensions list` command to
view installed extensions. All management operations, including updates to slash
commands, take effect only after you restart the CLI session.
### Install an extension
Install an extension by providing its GitHub repository URL or a local file
path.
Gemini CLI creates a copy of the extension during installation. You must run
`gemini extensions update` to pull changes from the source. To install from
GitHub, you must have `git` installed on your machine.
```bash
gemini extensions install [--ref ] [--auto-update] [--pre-release] [--consent]
```
- ``: The GitHub URL or local path of the extension.
- `--ref`: The git ref (branch, tag, or commit) to install.
- `--auto-update`: Enable automatic updates for this extension.
- `--pre-release`: Enable installation of pre-release versions.
- `--consent`: Acknowledge security risks and skip the confirmation prompt.
### Uninstall an extension
To uninstall one or more extensions, use the `uninstall` command:
```bash
gemini extensions uninstall
```
### Disable an extension
Extensions are enabled globally by default. You can disable an extension
entirely or for a specific workspace.
```bash
gemini extensions disable [--scope ]
```
- ``: The name of the extension to disable.
- `--scope`: The scope to disable the extension in (`user` or `workspace`).
### Enable an extension
Re-enable a disabled extension using the `enable` command:
```bash
gemini extensions enable [--scope ]
```
- ``: The name of the extension to enable.
- `--scope`: The scope to enable the extension in (`user` or `workspace`).
### Update an extension
Update an extension to the version specified in its `gemini-extension.json`
file.
```bash
gemini extensions update
```
To update all installed extensions at once:
```bash
gemini extensions update --all
```
### Create an extension from a template
Create a new extension directory using a built-in template.
```bash
gemini extensions new [template]
```
- ``: The directory to create.
- `[template]`: The template to use (e.g., `mcp-server`, `context`,
`custom-commands`).
### Link a local extension
Create a symbolic link between your development directory and the Gemini CLI
extensions directory. This lets you test changes immediately without
reinstalling.
```bash
gemini extensions link
```
## Extension format
Gemini CLI loads extensions from `/.gemini/extensions`. Each extension
must have a `gemini-extension.json` file in its root directory.
### `gemini-extension.json`
The manifest file defines the extension's behavior and configuration.
```json
{
"name": "my-extension",
"version": "1.0.0",
"description": "My awesome extension",
"mcpServers": {
"my-server": {
"command": "node",
"args": ["${extensionPath}/my-server.js"],
"cwd": "${extensionPath}"
}
},
"contextFileName": "GEMINI.md",
"excludeTools": ["run_shell_command"],
"migratedTo": "https://github.com/new-owner/new-extension-repo",
"plan": {
"directory": ".gemini/plans"
}
}
```
- `name`: The name of the extension. This is used to uniquely identify the
extension and for conflict resolution when extension commands have the same
name as user or project commands. The name should be lowercase or numbers and
use dashes instead of underscores or spaces. This is how users will refer to
your extension in the CLI. Note that we expect this name to match the
extension directory name.
- `version`: The version of the extension.
- `description`: A short description of the extension. This will be displayed on
[geminicli.com/extensions](https://geminicli.com/extensions).
- `migratedTo`: The URL of the new repository source for the extension. If this
is set, the CLI will automatically check this new source for updates and
migrate the extension's installation to the new source if an update is found.
- `mcpServers`: A map of MCP servers to settings. The key is the name of the
server, and the value is the server configuration. These servers will be
loaded on startup just like MCP servers defined in a
[`settings.json` file](../reference/configuration.md). If both an extension
and a `settings.json` file define an MCP server with the same name, the server
defined in the `settings.json` file takes precedence.
- Note that all MCP server configuration options are supported except for
`trust`.
- For portability, you should use `${extensionPath}` to refer to files within
your extension directory.
- Separate your executable and its arguments using `command` and `args`
instead of putting them both in `command`.
- `contextFileName`: The name of the file that contains the context for the
extension. This will be used to load the context from the extension directory.
If this property is not used but a `GEMINI.md` file is present in your
extension directory, then that file will be loaded.
- `excludeTools`: An array of tool names to exclude from the model. You can also
specify command-specific restrictions for tools that support it, like the
`run_shell_command` tool. For example,
`"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf`
command. Note that this differs from the MCP server `excludeTools`
functionality, which can be listed in the MCP server config.
- `plan`: Planning features configuration.
- `directory`: The directory where planning artifacts are stored. This serves
as a fallback if the user hasn't specified a plan directory in their
settings. If not specified by either the extension or the user, the default
is `~/.gemini/tmp///plans/`.
When Gemini CLI starts, it loads all the extensions and merges their
configurations. If there are any conflicts, the workspace configuration takes
precedence.
### Extension settings
Extensions can define settings that users provide during installation, such as
API keys or URLs. These values are stored in a `.env` file within the extension
directory.
To define settings, add a `settings` array to your manifest:
```json
{
"name": "my-api-extension",
"version": "1.0.0",
"settings": [
{
"name": "API Key",
"description": "Your API key for the service.",
"envVar": "MY_API_KEY",
"sensitive": true
}
]
}
```
- `name`: The setting's display name.
- `description`: A clear explanation of the setting.
- `envVar`: The environment variable name where the value is stored.
- `sensitive`: If `true`, the value is stored in the system keychain and
obfuscated in the UI.
To update an extension's settings:
```bash
gemini extensions config [setting] [--scope ]
```
### Custom commands
Provide [custom commands](../cli/custom-commands.md) by placing TOML files in a
`commands/` subdirectory. Gemini CLI uses the directory structure to determine
the command name.
For an extension named `gcp`:
- `commands/deploy.toml` becomes `/deploy`
- `commands/gcs/sync.toml` becomes `/gcs:sync` (namespaced with a colon)
### Hooks
Intercept and customize CLI behavior using [hooks](../hooks/index.md). Define
hooks in a `hooks/hooks.json` file within your extension directory. Note that
hooks are not defined in the `gemini-extension.json` manifest.
### Agent skills
Bundle [agent skills](../cli/skills.md) to provide specialized workflows. Place
skill definitions in a `skills/` directory. For example,
`skills/security-audit/SKILL.md` exposes a `security-audit` skill.
### Sub-agents
> [!NOTE]
> Sub-agents are a preview feature currently under active development.
Provide [sub-agents](../core/subagents.md) that users can delegate tasks to. Add
agent definition files (`.md`) to an `agents/` directory in your extension root.
### Policy Engine
Extensions can contribute policy rules and safety checkers to the Gemini CLI
[Policy Engine](../reference/policy-engine.md). These rules are defined in
`.toml` files and take effect when the extension is activated.
To add policies, create a `policies/` directory in your extension's root and
place your `.toml` policy files inside it. Gemini CLI automatically loads all
`.toml` files from this directory.
Rules contributed by extensions run in their own tier (tier 2), alongside
workspace-defined policies. This tier has higher priority than the default rules
but lower priority than user or admin policies.
> [!WARNING]
> For security, Gemini CLI ignores any `allow` decisions or `yolo`
> mode configurations in extension policies. This ensures that an extension
> cannot automatically approve tool calls or bypass security measures without
> your confirmation.
**Example `policies.toml`**
```toml
[[rule]]
mcpName = "my_server"
toolName = "dangerous_tool"
decision = "ask_user"
priority = 100
[[safety_checker]]
mcpName = "my_server"
toolName = "write_data"
priority = 200
[safety_checker.checker]
type = "in-process"
name = "allowed-path"
required_context = ["environment"]
```
### Themes
Extensions can provide custom themes to personalize the CLI UI. Themes are
defined in the `themes` array in `gemini-extension.json`.
**Example**
```json
{
"name": "my-green-extension",
"version": "1.0.0",
"themes": [
{
"name": "shades-of-green",
"type": "custom",
"background": {
"primary": "#1a362a"
},
"text": {
"primary": "#a6e3a1",
"secondary": "#6e8e7a",
"link": "#89e689"
},
"status": {
"success": "#76c076",
"warning": "#d9e689",
"error": "#b34e4e"
},
"border": {
"default": "#4a6c5a"
},
"ui": {
"comment": "#6e8e7a"
}
}
]
}
```
Custom themes provided by extensions can be selected using the `/theme` command
or by setting the `ui.theme` property in your `settings.json` file. Note that
when referring to a theme from an extension, the extension name is appended to
the theme name in parentheses, e.g., `shades-of-green (my-green-extension)`.
### Conflict resolution
Extension commands have the lowest precedence. If an extension command name
conflicts with a user or project command, the extension command is prefixed with
the extension name (e.g., `/gcp.deploy`) using a dot separator.
## Variables
Gemini CLI supports variable substitution in `gemini-extension.json` and
`hooks/hooks.json`.
| Variable | Description |
| :----------------- | :---------------------------------------------- |
| `${extensionPath}` | The absolute path to the extension's directory. |
| `${workspacePath}` | The absolute path to the current workspace. |
| `${/}` | The platform-specific path separator. |
================================================
FILE: docs/extensions/releasing.md
================================================
# Release extensions
Release Gemini CLI extensions to your users through a Git repository or GitHub
Releases.
Git repository releases are the simplest approach and offer the most flexibility
for managing development branches. GitHub Releases are more efficient for
initial installations because they ship as single archives rather than requiring
a full `git clone`. Use GitHub Releases if you need to include platform-specific
binary files.
## List your extension in the gallery
The [Gemini CLI extension gallery](https://geminicli.com/extensions/browse/)
automatically indexes public extensions to help users discover your work. You
don't need to submit an issue or email us to list your extension.
To have your extension automatically discovered and listed:
1. **Use a public repository:** Ensure your extension is hosted in a public
GitHub repository.
2. **Add the GitHub topic:** Add the `gemini-cli-extension` topic to your
repository's **About** section. Our crawler uses this topic to find new
extensions.
3. **Place the manifest at the root:** Ensure your `gemini-extension.json` file
is in the absolute root of the repository or the release archive.
Our system crawls tagged repositories daily. Once you tag your repository, your
extension will appear in the gallery if it passes validation.
## Release through a Git repository
Releasing through Git is the most flexible option. Create a public Git
repository and provide the URL to your users. They can then install your
extension using `gemini extensions install `.
Users can optionally depend on a specific branch, tag, or commit using the
`--ref` argument. For example:
```bash
gemini extensions install --ref=stable
```
Whenever you push commits to the referenced branch, the CLI prompts users to
update their installation. The `HEAD` commit is always treated as the latest
version.
### Manage release channels
You can use branches or tags to manage different release channels, such as
`stable`, `preview`, or `dev`.
We recommend using your default branch as the stable release channel. This
ensures that the default installation command always provides the most reliable
version of your extension. You can then use a `dev` branch for active
development and merge it into the default branch when you are ready for a
release.
## Release through GitHub Releases
Distributing extensions through
[GitHub Releases](https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases)
provides a faster installation experience by avoiding a repository clone.
Gemini CLI checks for updates by looking for the **Latest** release on GitHub.
Users can also install specific versions using the `--ref` argument with a
release tag. Use the `--pre-release` flag to install the latest version even if
it isn't marked as **Latest**.
### Custom pre-built archives
You can attach custom archives directly to your GitHub Release as assets. This
is useful if your extension requires a build step or includes platform-specific
binaries.
Custom archives must be fully self-contained and follow the required
[archive structure](#archive-structure). If your extension is
platform-independent, provide a single generic asset.
#### Platform-specific archives
To let Gemini CLI find the correct asset for a user's platform, use the
following naming convention:
1. **Platform and architecture-specific:**
`{platform}.{arch}.{name}.{extension}`
2. **Platform-specific:** `{platform}.{name}.{extension}`
3. **Generic:** A single asset will be used as a fallback if no specific match
is found.
Use these values for the placeholders:
- `{name}`: Your extension name.
- `{platform}`: Use `darwin` (macOS), `linux`, or `win32` (Windows).
- `{arch}`: Use `x64` or `arm64`.
- `{extension}`: Use `.tar.gz` or `.zip`.
**Examples:**
- `darwin.arm64.my-tool.tar.gz` (specific to Apple Silicon Macs)
- `darwin.my-tool.tar.gz` (fallback for all Macs, e.g. Intel)
- `linux.x64.my-tool.tar.gz`
- `win32.my-tool.zip`
#### Archive structure
Archives must be fully contained extensions. The `gemini-extension.json` file
must be at the root of the archive. The rest of the layout should match a
standard extension structure.
#### Example GitHub Actions workflow
Use this example workflow to build and release your extension for multiple
platforms:
```yaml
name: Release Extension
on:
push:
tags:
- 'v*'
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
- name: Install dependencies
run: npm ci
- name: Build extension
run: npm run build
- name: Create release assets
run: |
npm run package -- --platform=darwin --arch=arm64
npm run package -- --platform=linux --arch=x64
npm run package -- --platform=win32 --arch=x64
- name: Create GitHub Release
uses: softprops/action-gh-release@v1
with:
files: |
release/darwin.arm64.my-tool.tar.gz
release/linux.arm64.my-tool.tar.gz
release/win32.arm64.my-tool.zip
```
## Migrating an Extension Repository
If you need to move your extension to a new repository (e.g., from a personal
account to an organization) or rename it, you can use the `migratedTo` property
in your `gemini-extension.json` file to seamlessly transition your users.
1. **Create the new repository**: Setup your extension in its new location.
2. **Update the old repository**: In your original repository, update the
`gemini-extension.json` file to include the `migratedTo` property, pointing
to the new repository URL, and bump the version number. You can optionally
change the `name` of your extension at this time in the new repository.
```json
{
"name": "my-extension",
"version": "1.1.0",
"migratedTo": "https://github.com/new-owner/new-extension-repo"
}
```
3. **Release the update**: Publish this new version in your old repository.
When users check for updates, the Gemini CLI will detect the `migratedTo` field,
verify that the new repository contains a valid extension update, and
automatically update their local installation to track the new source and name
moving forward. All extension settings will automatically migrate to the new
installation.
================================================
FILE: docs/extensions/writing-extensions.md
================================================
# Build Gemini CLI extensions
Gemini CLI extensions let you expand the capabilities of Gemini CLI by adding
custom tools, commands, and context. This guide walks you through creating your
first extension, from setting up a template to adding custom functionality and
linking it for local development.
## Prerequisites
Before you start, ensure you have the Gemini CLI installed and a basic
understanding of Node.js.
## Extension features
Extensions offer several ways to customize Gemini CLI. Use this table to decide
which features your extension needs.
| Feature | What it is | When to use it | Invoked by |
| :------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------- |
| **[MCP server](reference.md#mcp-servers)** | A standard way to expose new tools and data sources to the model. | Use this when you want the model to be able to _do_ new things, like fetching data from an internal API, querying a database, or controlling a local application. We also support MCP resources (which can replace custom commands) and system instructions (which can replace custom context) | Model |
| **[Custom commands](../cli/custom-commands.md)** | A shortcut (like `/my-cmd`) that executes a pre-defined prompt or shell command. | Use this for repetitive tasks or to save long, complex prompts that you use frequently. Great for automation. | User |
| **[Context file (`GEMINI.md`)](reference.md#contextfilename)** | A markdown file containing instructions that are loaded into the model's context at the start of every session. | Use this to define the "personality" of your extension, set coding standards, or provide essential knowledge that the model should always have. | CLI provides to model |
| **[Agent skills](../cli/skills.md)** | A specialized set of instructions and workflows that the model activates only when needed. | Use this for complex, occasional tasks (like "create a PR" or "audit security") to avoid cluttering the main context window when the skill isn't being used. | Model |
| **[Hooks](../hooks/index.md)** | A way to intercept and customize the CLI's behavior at specific lifecycle events (e.g., before/after a tool call). | Use this when you want to automate actions based on what the model is doing, like validating tool arguments, logging activity, or modifying the model's input/output. | CLI |
| **[Custom themes](reference.md#themes)** | A set of color definitions to personalize the CLI UI. | Use this to provide a unique visual identity for your extension or to offer specialized high-contrast or thematic color schemes. | User (via /theme) |
## Step 1: Create a new extension
The easiest way to start is by using a built-in template. We'll use the
`mcp-server` example as our foundation.
Run the following command to create a new directory called `my-first-extension`
with the template files:
```bash
gemini extensions new my-first-extension mcp-server
```
This creates a directory with the following structure:
```
my-first-extension/
├── example.js
├── gemini-extension.json
└── package.json
```
## Step 2: Understand the extension files
Your new extension contains several key files that define its behavior.
### `gemini-extension.json`
The manifest file tells Gemini CLI how to load and use your extension.
```json
{
"name": "mcp-server-example",
"version": "1.0.0",
"mcpServers": {
"nodeServer": {
"command": "node",
"args": ["${extensionPath}${/}example.js"],
"cwd": "${extensionPath}"
}
}
}
```
- `name`: The unique name for your extension.
- `version`: The version of your extension.
- `mcpServers`: Defines Model Context Protocol (MCP) servers to add new tools.
- `command`, `args`, `cwd`: Specify how to start your server. The
`${extensionPath}` variable is replaced with the absolute path to your
extension's directory.
### `example.js`
This file contains the source code for your MCP server. It uses the
`@modelcontextprotocol/sdk` to define tools.
```javascript
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { z } from 'zod';
const server = new McpServer({
name: 'prompt-server',
version: '1.0.0',
});
// Registers a new tool named 'fetch_posts'
server.registerTool(
'fetch_posts',
{
description: 'Fetches a list of posts from a public API.',
inputSchema: z.object({}).shape,
},
async () => {
const apiResponse = await fetch(
'https://jsonplaceholder.typicode.com/posts',
);
const posts = await apiResponse.json();
const response = { posts: posts.slice(0, 5) };
return {
content: [
{
type: 'text',
text: JSON.stringify(response),
},
],
};
},
);
const transport = new StdioServerTransport();
await server.connect(transport);
```
### `package.json`
The standard configuration file for a Node.js project. It defines dependencies
and scripts for your extension.
## Step 3: Add extension settings
Some extensions need configuration, such as API keys or user preferences. Let's
add a setting for an API key.
1. Open `gemini-extension.json`.
2. Add a `settings` array to the configuration:
```json
{
"name": "mcp-server-example",
"version": "1.0.0",
"settings": [
{
"name": "API Key",
"description": "The API key for the service.",
"envVar": "MY_SERVICE_API_KEY",
"sensitive": true
}
],
"mcpServers": {
// ...
}
}
```
When a user installs this extension, Gemini CLI will prompt them to enter the
"API Key". The value will be stored securely in the system keychain (because
`sensitive` is true) and injected into the MCP server's process as the
`MY_SERVICE_API_KEY` environment variable.
## Step 4: Link your extension
Link your extension to your Gemini CLI installation for local development.
1. **Install dependencies:**
```bash
cd my-first-extension
npm install
```
2. **Link the extension:**
The `link` command creates a symbolic link from the Gemini CLI extensions
directory to your development directory. Changes you make are reflected
immediately.
```bash
gemini extensions link .
```
Restart your Gemini CLI session to use the new `fetch_posts` tool. Test it by
asking: "fetch posts".
## Step 5: Add a custom command
Custom commands create shortcuts for complex prompts.
1. Create a `commands` directory and a subdirectory for your command group:
**macOS/Linux**
```bash
mkdir -p commands/fs
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path "commands\fs"
```
2. Create a file named `commands/fs/grep-code.toml`:
```toml
prompt = """
Please summarize the findings for the pattern `{{args}}`.
Search Results:
!{grep -r {{args}} .}
"""
```
This command, `/fs:grep-code`, takes an argument, runs the `grep` shell
command, and pipes the results into a prompt for summarization.
After saving the file, restart Gemini CLI. Run `/fs:grep-code "some pattern"` to
use your new command.
## Step 6: Add a custom `GEMINI.md`
Provide persistent context to the model by adding a `GEMINI.md` file to your
extension. This is useful for setting behavior or providing essential tool
information.
1. Create a file named `GEMINI.md` in the root of your extension directory:
```markdown
# My First Extension Instructions
You are an expert developer assistant. When the user asks you to fetch
posts, use the `fetch_posts` tool. Be concise in your responses.
```
2. Update your `gemini-extension.json` to load this file:
```json
{
"name": "my-first-extension",
"version": "1.0.0",
"contextFileName": "GEMINI.md",
"mcpServers": {
"nodeServer": {
"command": "node",
"args": ["${extensionPath}${/}example.js"],
"cwd": "${extensionPath}"
}
}
}
```
Restart Gemini CLI. The model now has the context from your `GEMINI.md` file in
every session where the extension is active.
## (Optional) Step 7: Add an Agent Skill
[Agent Skills](../cli/skills.md) bundle specialized expertise and workflows.
Skills are activated only when needed, which saves context tokens.
1. Create a `skills` directory and a subdirectory for your skill:
**macOS/Linux**
```bash
mkdir -p skills/security-audit
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path "skills\security-audit"
```
2. Create a `skills/security-audit/SKILL.md` file:
```markdown
---
name: security-audit
description:
Expertise in auditing code for security vulnerabilities. Use when the user
asks to "check for security issues" or "audit" their changes.
---
# Security Auditor
You are an expert security researcher. When auditing code:
1. Look for common vulnerabilities (OWASP Top 10).
2. Check for hardcoded secrets or API keys.
3. Suggest remediation steps for any findings.
```
Gemini CLI automatically discovers skills bundled with your extension. The model
activates them when it identifies a relevant task.
## Step 8: Release your extension
When your extension is ready, share it with others via a Git repository or
GitHub Releases. Refer to the [Extension Releasing Guide](./releasing.md) for
detailed instructions and learn how to list your extension in the gallery.
## Next steps
- [Extension reference](reference.md): Deeply understand the extension format,
commands, and configuration.
- [Best practices](best-practices.md): Learn strategies for building great
extensions.
================================================
FILE: docs/get-started/authentication.md
================================================
# Gemini CLI authentication setup
To use Gemini CLI, you'll need to authenticate with Google. This guide helps you
quickly find the best way to sign in based on your account type and how you're
using the CLI.
> [!TIP]
> Looking for a high-level comparison of all available subscriptions?
> To compare features and find the right quota for your needs, see our
> [Plans page](https://geminicli.com/plans/).
For most users, we recommend starting Gemini CLI and logging in with your
personal Google account.
## Choose your authentication method
Select the authentication method that matches your situation in the table below:
| User Type / Scenario | Recommended Authentication Method | Google Cloud Project Required |
| :--------------------------------------------------------------------- | :--------------------------------------------------------------- | :---------------------------------------------------------- |
| Individual Google accounts | [Sign in with Google](#login-google) | No, with exceptions |
| Organization users with a company, school, or Google Workspace account | [Sign in with Google](#login-google) | [Yes](#set-gcp) |
| AI Studio user with a Gemini API key | [Use Gemini API Key](#gemini-api) | No |
| Google Cloud Vertex AI user | [Vertex AI](#vertex-ai) | [Yes](#set-gcp) |
| [Headless mode](#headless) | [Use Gemini API Key](#gemini-api) or [Vertex AI](#vertex-ai) | No (for Gemini API Key) [Yes](#set-gcp) (for Vertex AI) |
### What is my Google account type?
- **Individual Google accounts:** Includes all
[free tier accounts](../resources/quota-and-pricing.md#free-usage) such as
Gemini Code Assist for individuals, as well as paid subscriptions for
[Google AI Pro and Ultra](https://gemini.google/subscriptions/).
- **Organization accounts:** Accounts using paid licenses through an
organization such as a company, school, or
[Google Workspace](https://workspace.google.com/). Includes
[Google AI Ultra for Business](https://support.google.com/a/answer/16345165)
subscriptions.
## (Recommended) Sign in with Google
If you run Gemini CLI on your local machine, the simplest authentication method
is logging in with your Google account. This method requires a web browser on a
machine that can communicate with the terminal running Gemini CLI (for example,
your local machine).
If you are a **Google AI Pro** or **Google AI Ultra** subscriber, use the Google
account associated with your subscription.
To authenticate and use Gemini CLI:
1. Start the CLI:
```bash
gemini
```
2. Select **Sign in with Google**. Gemini CLI opens a sign in prompt using your
web browser. Follow the on-screen instructions. Your credentials will be
cached locally for future sessions.
### Do I need to set my Google Cloud project?
Most individual Google accounts (free and paid) don't require a Google Cloud
project for authentication. However, you'll need to set a Google Cloud project
when you meet at least one of the following conditions:
- You are using a company, school, or Google Workspace account.
- You are using a Gemini Code Assist license from the Google Developer Program.
- You are using a license from a Gemini Code Assist subscription.
For instructions, see [Set your Google Cloud Project](#set-gcp).
## Use Gemini API key
If you don't want to authenticate using your Google account, you can use an API
key from Google AI Studio.
To authenticate and use Gemini CLI with a Gemini API key:
1. Obtain your API key from
[Google AI Studio](https://aistudio.google.com/app/apikey).
2. Set the `GEMINI_API_KEY` environment variable to your key. For example:
**macOS/Linux**
```bash
# Replace YOUR_GEMINI_API_KEY with the key from AI Studio
export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"
```
**Windows (PowerShell)**
```powershell
# Replace YOUR_GEMINI_API_KEY with the key from AI Studio
$env:GEMINI_API_KEY="YOUR_GEMINI_API_KEY"
```
To make this setting persistent, see
[Persisting Environment Variables](#persisting-vars).
3. Start the CLI:
```bash
gemini
```
4. Select **Use Gemini API key**.
> [!WARNING]
> Treat API keys, especially for services like Gemini, as sensitive
> credentials. Protect them to prevent unauthorized access and potential misuse
> of the service under your account.
## Use Vertex AI
To use Gemini CLI with Google Cloud's Vertex AI platform, choose from the
following authentication options:
- A. Application Default Credentials (ADC) using `gcloud`.
- B. Service account JSON key.
- C. Google Cloud API key.
Regardless of your authentication method for Vertex AI, you'll need to set
`GOOGLE_CLOUD_PROJECT` to your Google Cloud project ID with the Vertex AI API
enabled, and `GOOGLE_CLOUD_LOCATION` to the location of your Vertex AI resources
or the location where you want to run your jobs.
For example:
**macOS/Linux**
```bash
# Replace with your project ID and desired location (for example, us-central1)
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"
```
**Windows (PowerShell)**
```powershell
# Replace with your project ID and desired location (for example, us-central1)
$env:GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
$env:GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"
```
To make any Vertex AI environment variable settings persistent, see
[Persisting Environment Variables](#persisting-vars).
#### A. Vertex AI - application default credentials (ADC) using `gcloud`
Consider this authentication method if you have Google Cloud CLI installed.
If you have previously set `GOOGLE_API_KEY` or `GEMINI_API_KEY`, you must unset
them to use ADC.
**macOS/Linux**
```bash
unset GOOGLE_API_KEY GEMINI_API_KEY
```
**Windows (PowerShell)**
```powershell
Remove-Item Env:\GOOGLE_API_KEY, Env:\GEMINI_API_KEY -ErrorAction Ignore
```
1. Verify you have a Google Cloud project and Vertex AI API is enabled.
2. Log in to Google Cloud:
```bash
gcloud auth application-default login
```
3. [Configure your Google Cloud Project](#set-gcp).
4. Start the CLI:
```bash
gemini
```
5. Select **Vertex AI**.
#### B. Vertex AI - service account JSON key
Consider this method of authentication in non-interactive environments, CI/CD
pipelines, or if your organization restricts user-based ADC or API key creation.
If you have previously set `GOOGLE_API_KEY` or `GEMINI_API_KEY`, you must unset
them:
**macOS/Linux**
```bash
unset GOOGLE_API_KEY GEMINI_API_KEY
```
**Windows (PowerShell)**
```powershell
Remove-Item Env:\GOOGLE_API_KEY, Env:\GEMINI_API_KEY -ErrorAction Ignore
```
1. [Create a service account and key](https://cloud.google.com/iam/docs/keys-create-delete)
and download the provided JSON file. Assign the "Vertex AI User" role to the
service account.
2. Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the JSON
file's absolute path. For example:
**macOS/Linux**
```bash
# Replace /path/to/your/keyfile.json with the actual path
export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/keyfile.json"
```
**Windows (PowerShell)**
```powershell
# Replace C:\path\to\your\keyfile.json with the actual path
$env:GOOGLE_APPLICATION_CREDENTIALS="C:\path\to\your\keyfile.json"
```
3. [Configure your Google Cloud Project](#set-gcp).
4. Start the CLI:
```bash
gemini
```
5. Select **Vertex AI**.
> [!WARNING]
> Protect your service account key file as it gives access to
> your resources.
#### C. Vertex AI - Google Cloud API key
1. Obtain a Google Cloud API key:
[Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser).
2. Set the `GOOGLE_API_KEY` environment variable:
**macOS/Linux**
```bash
# Replace YOUR_GOOGLE_API_KEY with your Vertex AI API key
export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"
```
**Windows (PowerShell)**
```powershell
# Replace YOUR_GOOGLE_API_KEY with your Vertex AI API key
$env:GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"
```
If you see errors like `"API keys are not supported by this API..."`, your
organization might restrict API key usage for this service. Try the other
Vertex AI authentication methods instead.
3. [Configure your Google Cloud Project](#set-gcp).
4. Start the CLI:
```bash
gemini
```
5. Select **Vertex AI**.
## Set your Google Cloud project
> [!IMPORTANT]
> Most individual Google accounts (free and paid) don't require a
> Google Cloud project for authentication.
When you sign in using your Google account, you may need to configure a Google
Cloud project for Gemini CLI to use. This applies when you meet at least one of
the following conditions:
- You are using a Company, School, or Google Workspace account.
- You are using a Gemini Code Assist license from the Google Developer Program.
- You are using a license from a Gemini Code Assist subscription.
To configure Gemini CLI to use a Google Cloud project, do the following:
1. [Find your Google Cloud Project ID](https://support.google.com/googleapi/answer/7014113).
2. [Enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api).
3. [Configure necessary IAM access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam).
4. Configure your environment variables. Set either the `GOOGLE_CLOUD_PROJECT`
or `GOOGLE_CLOUD_PROJECT_ID` variable to the project ID to use with Gemini
CLI. Gemini CLI checks for `GOOGLE_CLOUD_PROJECT` first, then falls back to
`GOOGLE_CLOUD_PROJECT_ID`.
For example, to set the `GOOGLE_CLOUD_PROJECT_ID` variable:
**macOS/Linux**
```bash
# Replace YOUR_PROJECT_ID with your actual Google Cloud project ID
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
```
**Windows (PowerShell)**
```powershell
# Replace YOUR_PROJECT_ID with your actual Google Cloud project ID
$env:GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
```
To make this setting persistent, see
[Persisting Environment Variables](#persisting-vars).
## Persisting environment variables
To avoid setting environment variables for every terminal session, you can
persist them with the following methods:
1. **Add your environment variables to your shell configuration file:** Append
the environment variable commands to your shell's startup file.
**macOS/Linux** (for example, `~/.bashrc`, `~/.zshrc`, or `~/.profile`):
```bash
echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc
source ~/.bashrc
```
**Windows (PowerShell)** (for example, `$PROFILE`):
```powershell
Add-Content -Path $PROFILE -Value '$env:GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"'
. $PROFILE
```
> [!WARNING]
> Be aware that when you export API keys or service account
> paths in your shell configuration file, any process launched from that
> shell can read them.
2. **Use a `.env` file:** Create a `.gemini/.env` file in your project
directory or home directory. Gemini CLI automatically loads variables from
the first `.env` file it finds, searching up from the current directory,
then in your home directory's `.gemini/.env` (for example, `~/.gemini/.env`
or `%USERPROFILE%\.gemini\.env`).
Example for user-wide settings:
**macOS/Linux**
```bash
mkdir -p ~/.gemini
cat >> ~/.gemini/.env <<'EOF'
GOOGLE_CLOUD_PROJECT="your-project-id"
# Add other variables like GEMINI_API_KEY as needed
EOF
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path "$env:USERPROFILE\.gemini"
@"
GOOGLE_CLOUD_PROJECT="your-project-id"
# Add other variables like GEMINI_API_KEY as needed
"@ | Out-File -FilePath "$env:USERPROFILE\.gemini\.env" -Encoding utf8 -Append
```
Variables are loaded from the first file found, not merged.
## Running in Google Cloud environments
When running Gemini CLI within certain Google Cloud environments, authentication
is automatic.
In a Google Cloud Shell environment, Gemini CLI typically authenticates
automatically using your Cloud Shell credentials. In Compute Engine
environments, Gemini CLI automatically uses Application Default Credentials
(ADC) from the environment's metadata server.
If automatic authentication fails, use one of the interactive methods described
on this page.
## Running in headless mode
[Headless mode](../cli/headless) will use your existing authentication method,
if an existing authentication credential is cached.
If you have not already signed in with an authentication credential, you must
configure authentication using environment variables:
- [Use Gemini API Key](#gemini-api)
- [Vertex AI](#vertex-ai)
## What's next?
Your authentication method affects your quotas, pricing, Terms of Service, and
privacy notices. Review the following pages to learn more:
- [Gemini CLI: Quotas and Pricing](../resources/quota-and-pricing.md).
- [Gemini CLI: Terms of Service and Privacy Notice](../resources/tos-privacy.md).
================================================
FILE: docs/get-started/examples.md
================================================
# Gemini CLI examples
Gemini CLI helps you automate common engineering tasks by combining AI reasoning
with local system tools. This document provides examples of how to use the CLI
for file management, code analysis, and data transformation.
> [!NOTE]
> These examples demonstrate potential capabilities. Your actual
> results can vary based on the model used and your project environment.
## Rename your photographs based on content
You can use Gemini CLI to automate file management tasks that require visual
analysis. In this example, Gemini CLI renames images based on their actual
subject matter.
Scenario: You have a folder containing the following files:
```bash
photos/photo1.png
photos/photo2.png
photos/photo3.png
```
Give Gemini the following prompt:
```cli
Rename the photos in my "photos" directory based on their contents.
```
Result: Gemini asks for permission to rename your files.
Select **Allow once** and your files are renamed:
```bash
photos/yellow_flowers.png
photos/antique_dresser.png
photos/green_android_robot.png
```
## Explain a repository by reading its code
Gemini CLI is effective for rapid codebase exploration. The following example
shows how to ask Gemini CLI to fetch, analyze, and summarize a remote project.
Scenario: You want to understand how a popular open-source utility works by
inspecting its code, not just its README.
Give Gemini CLI the following prompt:
```cli
Clone the 'chalk' repository from https://github.com/chalk/chalk, read its key source files, and explain how it works.
```
Result: Gemini performs a sequence of actions to answer your request.
1. First, it asks for permission to run `git clone` to download the repository.
2. Next, it finds the important source files and asks for permission to read
them.
3. Finally, after analyzing the code, it provides a summary.
Gemini CLI returns an explanation based on the actual source code:
```markdown
The `chalk` library is a popular npm package for styling terminal output with
colors. After analyzing the source code, here's how it works:
- **Core functionality:** The main file sets up a chainable API. Each color or
modifier (like `bold` or `italic`) is a getter that appends the corresponding
ANSI escape code to an internal stack.
- **Chainable API:** When you call `chalk.red.bold('Hello')`, you are accessing
getters. The `red` getter adds the red color code, and the `bold` getter adds
the bold code.
- **Output generation:** When the chain is treated as a string (e.g., in
`console.log`), a final `toString()` method is called. This method joins all
the stored ANSI codes, wraps them around the input string ('Hello'), and adds
a reset code at the end. This produces the final, styled string that the
terminal can render.
```
## Combine two spreadsheets into one spreadsheet
Gemini CLI can process and transform data across multiple files. Use this
capability to merge reports or reformat data sets without manual copying.
Scenario: You have two .csv files: `Revenue - 2023.csv` and
`Revenue - 2024.csv`. Each file contains monthly revenue figures.
Give Gemini CLI the following prompt:
```cli
Combine the two .csv files into a single .csv file, with each year a different column.
```
Result: Gemini CLI reads each file and then asks for permission to write a new
file. Provide your permission and Gemini CLI provides the combined data:
```csv
Month,2023,2024
January,0,1000
February,0,1200
March,0,2400
April,900,500
May,1000,800
June,1000,900
July,1200,1000
August,1800,400
September,2000,2000
October,2400,3400
November,3400,1800
December,2100,9000
```
## Run unit tests
Gemini CLI can generate boilerplate code and tests based on your existing
implementation. This example demonstrates how to request code coverage for a
JavaScript component.
Scenario: You've written a simple login page. You wish to write unit tests to
ensure that your login page has code coverage.
Give Gemini CLI the following prompt:
```cli
Write unit tests for Login.js.
```
Result: Gemini CLI asks for permission to write a new file and creates a test
for your login page.
## Next steps
- Follow the [File management](../cli/tutorials/file-management.md) guide to
start working with your codebase.
- Follow the [Quickstart](./index.md) to start your first session.
- See the [Cheatsheet](../cli/cli-reference.md) for a quick reference of
available commands.
================================================
FILE: docs/get-started/gemini-3.md
================================================
# Gemini 3 Pro and Gemini 3 Flash on Gemini CLI
Gemini 3 Pro and Gemini 3 Flash are available on Gemini CLI for all users!
> [!NOTE]
> Gemini 3.1 Pro Preview is rolling out. To determine whether you have
> access to Gemini 3.1, use the `/model` command and select **Manual**. If you
> have access, you will see `gemini-3.1-pro-preview`.
>
> If you have access to Gemini 3.1, it will be included in model routing when
> you select **Auto (Gemini 3)**. You can also launch the Gemini 3.1 model
> directly using the `-m` flag:
>
> ```
> gemini -m gemini-3.1-pro-preview
> ```
>
> Learn more about [models](../cli/model.md) and
> [model routing](../cli/model-routing.md).
## How to get started with Gemini 3 on Gemini CLI
Get started by upgrading Gemini CLI to the latest version:
```bash
npm install -g @google/gemini-cli@latest
```
If your version is 0.21.1 or later:
1. Run `/model`.
2. Select **Auto (Gemini 3)**.
For more information, see [Gemini CLI model selection](../cli/model.md).
### Usage limits and fallback
Gemini CLI will tell you when you reach your Gemini 3 Pro daily usage limit.
When you encounter that limit, you’ll be given the option to switch to Gemini
2.5 Pro, upgrade for higher limits, or stop. You’ll also be told when your usage
limit resets and Gemini 3 Pro can be used again.
> [!TIP]
> Looking to upgrade for higher limits? To compare subscription
> options and find the right quota for your needs, see our
> [Plans page](https://geminicli.com/plans/).
Similarly, when you reach your daily usage limit for Gemini 2.5 Pro, you’ll see
a message prompting fallback to Gemini 2.5 Flash.
### Capacity errors
There may be times when the Gemini 3 Pro model is overloaded. When that happens,
Gemini CLI will ask you to decide whether you want to keep trying Gemini 3 Pro
or fallback to Gemini 2.5 Pro.
> [!NOTE]
> The **Keep trying** option uses exponential backoff, in which Gemini
> CLI waits longer between each retry, when the system is busy. If the retry
> doesn't happen immediately, please wait a few minutes for the request to
> process.
### Model selection and routing types
When using Gemini CLI, you may want to control how your requests are routed
between models. By default, Gemini CLI uses **Auto** routing.
When using Gemini 3 Pro, you may want to use Auto routing or Pro routing to
manage your usage limits:
- **Auto routing:** Auto routing first determines whether a prompt involves a
complex or simple operation. For simple prompts, it will automatically use
Gemini 2.5 Flash. For complex prompts, if Gemini 3 Pro is enabled, it will use
Gemini 3 Pro; otherwise, it will use Gemini 2.5 Pro.
- **Pro routing:** If you want to ensure your task is processed by the most
capable model, use `/model` and select **Pro**. Gemini CLI will prioritize the
most capable model available, including Gemini 3 Pro if it has been enabled.
To learn more about selecting a model and routing, refer to
[Gemini CLI Model Selection](../cli/model.md).
## How to enable Gemini 3 with Gemini CLI on Gemini Code Assist
If you're using Gemini Code Assist Standard or Gemini Code Assist Enterprise,
enabling Gemini 3 Pro on Gemini CLI requires configuring your release channels.
Using Gemini 3 Pro will require two steps: administrative enablement and user
enablement.
To learn more about these settings, refer to
[Configure Gemini Code Assist release channels](https://developers.google.com/gemini-code-assist/docs/configure-release-channels).
### Administrator instructions
An administrator with **Google Cloud Settings Admin** permissions must follow
these directions:
- Navigate to the Google Cloud Project you're using with Gemini CLI for Code
Assist.
- Go to **Admin for Gemini** > **Settings**.
- Under **Release channels for Gemini Code Assist in local IDEs** select
**Preview**.
- Click **Save changes**.
### User instructions
Wait for two to three minutes after your administrator has enabled **Preview**,
then:
- Open Gemini CLI.
- Use the `/settings` command.
- Set **Preview Features** to `true`.
Restart Gemini CLI and you should have access to Gemini 3.
## Next steps
If you need help, we recommend searching for an existing
[GitHub issue](https://github.com/google-gemini/gemini-cli/issues). If you
cannot find a GitHub issue that matches your concern, you can
[create a new issue](https://github.com/google-gemini/gemini-cli/issues/new/choose).
For comments and feedback, consider opening a
[GitHub discussion](https://github.com/google-gemini/gemini-cli/discussions).
================================================
FILE: docs/get-started/index.md
================================================
# Get started with Gemini CLI
Welcome to Gemini CLI! This guide will help you install, configure, and start
using the Gemini CLI to enhance your workflow right from your terminal.
## Quickstart: Install, authenticate, configure, and use Gemini CLI
Gemini CLI brings the power of advanced language models directly to your command
line interface. As an AI-powered assistant, Gemini CLI can help you with a
variety of tasks, from understanding and generating code to reviewing and
editing documents.
## Install
The standard method to install and run Gemini CLI uses `npm`:
```bash
npm install -g @google/gemini-cli
```
Once Gemini CLI is installed, run Gemini CLI from your command line:
```bash
gemini
```
For more installation options, see [Gemini CLI Installation](./installation.md).
## Authenticate
To begin using Gemini CLI, you must authenticate with a Google service. In most
cases, you can log in with your existing Google account:
1. Run Gemini CLI after installation:
```bash
gemini
```
2. When asked "How would you like to authenticate for this project?" select **1.
Sign in with Google**.
3. Select your Google account.
4. Click on **Sign in**.
Certain account types may require you to configure a Google Cloud project. For
more information, including other authentication methods, see
[Gemini CLI Authentication Setup](./authentication.md).
## Configure
Gemini CLI offers several ways to configure its behavior, including environment
variables, command-line arguments, and settings files.
To explore your configuration options, see
[Gemini CLI Configuration](../reference/configuration.md).
## Use
Once installed and authenticated, you can start using Gemini CLI by issuing
commands and prompts in your terminal. Ask it to generate code, explain files,
and more.
To explore the power of Gemini CLI, see [Gemini CLI examples](./examples.md).
## Check usage and quota
You can check your current token usage and quota information using the
`/stats model` command. This command provides a snapshot of your current
session's token usage, as well as your overall quota and usage for the supported
models.
For more information on the `/stats` command and its subcommands, see the
[Command Reference](../reference/commands.md#stats).
## Next steps
- Follow the [File management](../cli/tutorials/file-management.md) guide to
start working with your codebase.
- See [Shell commands](../cli/tutorials/shell-commands.md) to learn about
terminal integration.
================================================
FILE: docs/get-started/installation.md
================================================
# Gemini CLI installation, execution, and releases
This document provides an overview of Gemini CLI's system requirements,
installation methods, and release types.
## Recommended system specifications
- **Operating System:**
- macOS 15+
- Windows 11 24H2+
- Ubuntu 20.04+
- **Hardware:**
- "Casual" usage: 4GB+ RAM (short sessions, common tasks and edits)
- "Power" usage: 16GB+ RAM (long sessions, large codebases, deep context)
- **Runtime:** Node.js 20.0.0+
- **Shell:** Bash, Zsh, or PowerShell
- **Location:**
[Gemini Code Assist supported locations](https://developers.google.com/gemini-code-assist/resources/available-locations#americas)
- **Internet connection required**
## Install Gemini CLI
We recommend most users install Gemini CLI using one of the following
installation methods:
- npm
- Homebrew
- MacPorts
- Anaconda
Note that Gemini CLI comes pre-installed on
[**Cloud Shell**](https://docs.cloud.google.com/shell/docs) and
[**Cloud Workstations**](https://cloud.google.com/workstations).
### Install globally with npm
```bash
npm install -g @google/gemini-cli
```
### Install globally with Homebrew (macOS/Linux)
```bash
brew install gemini-cli
```
### Install globally with MacPorts (macOS)
```bash
sudo port install gemini-cli
```
### Install with Anaconda (for restricted environments)
```bash
# Create and activate a new environment
conda create -y -n gemini_env -c conda-forge nodejs
conda activate gemini_env
# Install Gemini CLI globally via npm (inside the environment)
npm install -g @google/gemini-cli
```
## Run Gemini CLI
For most users, we recommend running Gemini CLI with the `gemini` command:
```bash
gemini
```
For a list of options and additional commands, see the
[CLI cheatsheet](../cli/cli-reference.md).
You can also run Gemini CLI using one of the following advanced methods:
- Run instantly with npx. You can run Gemini CLI without permanent installation.
- In a sandbox. This method offers increased security and isolation.
- From the source. This is recommended for contributors to the project.
### Run instantly with npx
```bash
# Using npx (no installation required)
npx @google/gemini-cli
```
You can also execute the CLI directly from the main branch on GitHub, which is
helpful for testing features still in development:
```bash
npx https://github.com/google-gemini/gemini-cli
```
### Run in a sandbox (Docker/Podman)
For security and isolation, Gemini CLI can be run inside a container. This is
the default way that the CLI executes tools that might have side effects.
- **Directly from the registry:** You can run the published sandbox image
directly. This is useful for environments where you only have Docker and want
to run the CLI.
```bash
# Run the published sandbox image
docker run --rm -it us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.1
```
- **Using the `--sandbox` flag:** If you have Gemini CLI installed locally
(using the standard installation described above), you can instruct it to run
inside the sandbox container.
```bash
gemini --sandbox -y -p "your prompt here"
```
### Run from source (recommended for Gemini CLI contributors)
Contributors to the project will want to run the CLI directly from the source
code.
- **Development mode:** This method provides hot-reloading and is useful for
active development.
```bash
# From the root of the repository
npm run start
```
- **Production-like mode (linked package):** This method simulates a global
installation by linking your local package. It's useful for testing a local
build in a production workflow.
```bash
# Link the local cli package to your global node_modules
npm link packages/cli
# Now you can run your local version using the `gemini` command
gemini
```
## Releases
Gemini CLI has three release channels: nightly, preview, and stable. For most
users, we recommend the stable release, which is the default installation.
### Stable
New stable releases are published each week. The stable release is the promotion
of last week's `preview` release along with any bug fixes. The stable release
uses `latest` tag, but omitting the tag also installs the latest stable release
by default:
```bash
# Both commands install the latest stable release.
npm install -g @google/gemini-cli
npm install -g @google/gemini-cli@latest
```
### Preview
New preview releases will be published each week. These releases are not fully
vetted and may contain regressions or other outstanding issues. Try out the
preview release by using the `preview` tag:
```bash
npm install -g @google/gemini-cli@preview
```
### Nightly
Nightly releases are published every day. The nightly release includes all
changes from the main branch at time of release. It should be assumed there are
pending validations and issues. You can help test the latest changes by
installing with the `nightly` tag:
```bash
npm install -g @google/gemini-cli@nightly
```
================================================
FILE: docs/hooks/best-practices.md
================================================
# Hooks Best Practices
This guide covers security considerations, performance optimization, debugging
techniques, and privacy considerations for developing and deploying hooks in
Gemini CLI.
## Performance
### Keep hooks fast
Hooks run synchronously—slow hooks delay the agent loop. Optimize for speed by
using parallel operations:
```javascript
// Sequential operations are slower
const data1 = await fetch(url1).then((r) => r.json());
const data2 = await fetch(url2).then((r) => r.json());
// Prefer parallel operations for better performance
// Start requests concurrently
const p1 = fetch(url1).then((r) => r.json());
const p2 = fetch(url2).then((r) => r.json());
// Wait for all results
const [data1, data2] = await Promise.all([p1, p2]);
```
### Cache expensive operations
Store results between invocations to avoid repeated computation, especially for
hooks that run frequently (like `BeforeTool` or `AfterModel`).
```javascript
const fs = require('fs');
const path = require('path');
const CACHE_FILE = '.gemini/hook-cache.json';
function readCache() {
try {
return JSON.parse(fs.readFileSync(CACHE_FILE, 'utf8'));
} catch {
return {};
}
}
function writeCache(data) {
fs.writeFileSync(CACHE_FILE, JSON.stringify(data, null, 2));
}
async function main() {
const cache = readCache();
const cacheKey = `tool-list-${(Date.now() / 3600000) | 0}`; // Hourly cache
if (cache[cacheKey]) {
// Write JSON to stdout
console.log(JSON.stringify(cache[cacheKey]));
return;
}
// Expensive operation
const result = await computeExpensiveResult();
cache[cacheKey] = result;
writeCache(cache);
console.log(JSON.stringify(result));
}
```
### Use appropriate events
Choose hook events that match your use case to avoid unnecessary execution.
- **`AfterAgent`**: Fires **once** per turn after the model finishes its final
response. Use this for quality validation (Retries) or final logging.
- **`AfterModel`**: Fires after **every chunk** of LLM output. Use this for
real-time redaction, PII filtering, or monitoring output as it streams.
If you only need to check the final completion, use `AfterAgent` to save
performance.
### Filter with matchers
Use specific matchers to avoid unnecessary hook execution. Instead of matching
all tools with `*`, specify only the tools you need. This saves the overhead of
spawning a process for irrelevant events.
```json
{
"matcher": "write_file|replace",
"hooks": [
{
"name": "validate-writes",
"type": "command",
"command": "./validate.sh"
}
]
}
```
### Optimize JSON parsing
For large inputs (like `AfterModel` receiving a large context), standard JSON
parsing can be slow. If you only need one field, consider streaming parsers or
lightweight extraction logic, though for most shell scripts `jq` is sufficient.
## Debugging
### The "Strict JSON" rule
The most common cause of hook failure is "polluting" the standard output.
- **stdout** is for **JSON only**.
- **stderr** is for **logs and text**.
**Good:**
```bash
#!/bin/bash
echo "Starting check..." >&2 # <--- Redirect to stderr
echo '{"decision": "allow"}'
```
### Log to files
Since hooks run in the background, writing to a dedicated log file is often the
easiest way to debug complex logic.
```bash
#!/usr/bin/env bash
LOG_FILE=".gemini/hooks/debug.log"
# Log with timestamp
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" >> "$LOG_FILE"
}
input=$(cat)
log "Received input: ${input:0:100}..."
# Hook logic here
log "Hook completed successfully"
# Always output valid JSON to stdout at the end, even if just empty
echo "{}"
```
### Use stderr for errors
Error messages on stderr are surfaced appropriately based on exit codes:
```javascript
try {
const result = dangerousOperation();
console.log(JSON.stringify({ result }));
} catch (error) {
// Write the error description to stderr so the user/agent sees it
console.error(`Hook error: ${error.message}`);
process.exit(2); // Blocking error
}
```
### Test hooks independently
Run hook scripts manually with sample JSON input to verify they behave as
expected before hooking them up to the CLI.
**macOS/Linux**
```bash
# Create test input
cat > test-input.json << 'EOF'
{
"session_id": "test-123",
"cwd": "/tmp/test",
"hook_event_name": "BeforeTool",
"tool_name": "write_file",
"tool_input": {
"file_path": "test.txt",
"content": "Test content"
}
}
EOF
# Test the hook
cat test-input.json | .gemini/hooks/my-hook.sh
# Check exit code
echo "Exit code: $?"
```
**Windows (PowerShell)**
```powershell
# Create test input
@"
{
"session_id": "test-123",
"cwd": "C:\\temp\\test",
"hook_event_name": "BeforeTool",
"tool_name": "write_file",
"tool_input": {
"file_path": "test.txt",
"content": "Test content"
}
}
"@ | Out-File -FilePath test-input.json -Encoding utf8
# Test the hook
Get-Content test-input.json | .\.gemini\hooks\my-hook.ps1
# Check exit code
Write-Host "Exit code: $LASTEXITCODE"
```
### Check exit codes
Gemini CLI uses exit codes for high-level flow control:
- **Exit 0 (Success)**: The hook ran successfully. The CLI parses `stdout` for
JSON decisions.
- **Exit 2 (System Block)**: A critical block occurred. `stderr` is used as the
reason.
- For **Agent/Model** events, this aborts the turn.
- For **Tool** events, this blocks the tool but allows the agent to continue.
- For **AfterAgent**, this triggers an automatic retry turn.
> **TIP**
>
> **Blocking vs. Stopping**: Use `decision: "deny"` (or Exit Code 2) to block a
> **specific action**. Use `{"continue": false}` in your JSON output to **kill
> the entire agent loop** immediately.
```bash
#!/usr/bin/env bash
set -e
# Hook logic
if process_input; then
echo '{"decision": "allow"}'
exit 0
else
echo "Critical validation failure" >&2
exit 2
fi
```
### Enable telemetry
Hook execution is logged when `telemetry.logPrompts` is enabled. You can view
these logs to debug execution flow.
```json
{
"telemetry": {
"logPrompts": true
}
}
```
### Use hook panel
The `/hooks panel` command inside the CLI shows execution status and recent
output:
```bash
/hooks panel
```
Check for:
- Hook execution counts
- Recent successes/failures
- Error messages
- Execution timing
## Development
### Start simple
Begin with basic logging hooks before implementing complex logic:
```bash
#!/usr/bin/env bash
# Simple logging hook to understand input structure
input=$(cat)
echo "$input" >> .gemini/hook-inputs.log
# Always return valid JSON
echo "{}"
```
### Documenting your hooks
Maintainability is critical for complex hook systems. Use descriptions and
comments to help yourself and others understand why a hook exists.
**Use the `description` field**: This text is displayed in the `/hooks panel` UI
and helps diagnose issues.
```json
{
"hooks": {
"BeforeTool": [
{
"matcher": "write_file|replace",
"hooks": [
{
"name": "secret-scanner",
"type": "command",
"command": "$GEMINI_PROJECT_DIR/.gemini/hooks/block-secrets.sh",
"description": "Scans code changes for API keys and secrets before writing"
}
]
}
]
}
}
```
**Add comments in hook scripts**: Explain performance expectations and
dependencies.
```javascript
#!/usr/bin/env node
/**
* RAG Tool Filter Hook
*
* Reduces the tool space by extracting keywords from the user's request.
*
* Performance: ~500ms average
* Dependencies: @google/generative-ai
*/
```
### Use JSON libraries
Parse JSON with proper libraries instead of text processing.
**Bad:**
```bash
# Fragile text parsing
tool_name=$(echo "$input" | grep -oP '"tool_name":\s*"\K[^"]+')
```
**Good:**
```bash
# Robust JSON parsing
tool_name=$(echo "$input" | jq -r '.tool_name')
```
### Make scripts executable
Always make hook scripts executable on macOS/Linux:
```bash
chmod +x .gemini/hooks/*.sh
chmod +x .gemini/hooks/*.js
```
**Windows Note**: On Windows, PowerShell scripts (`.ps1`) don't use `chmod`, but
you may need to ensure your execution policy allows them to run (e.g.,
`Set-ExecutionPolicy RemoteSigned -Scope CurrentUser`).
### Version control
Commit hooks to share with your team:
```bash
git add .gemini/hooks/
git add .gemini/settings.json
```
**`.gitignore` considerations:**
```gitignore
# Ignore hook cache and logs
.gemini/hook-cache.json
.gemini/hook-debug.log
.gemini/memory/session-*.jsonl
# Keep hook scripts
!.gemini/hooks/*.sh
!.gemini/hooks/*.js
```
## Hook security
### Threat Model
Understanding where hooks come from and what they can do is critical for secure
usage.
| Hook Source | Description |
| :---------------------------- | :------------------------------------------------------------------------------------------------------------------------- |
| **System** | Configured by system administrators (e.g., `/etc/gemini-cli/settings.json`, `/Library/...`). Assumed to be the **safest**. |
| **User** (`~/.gemini/...`) | Configured by you. You are responsible for ensuring they are safe. |
| **Extensions** | You explicitly approve and install these. Security depends on the extension source (integrity). |
| **Project** (`./.gemini/...`) | **Untrusted by default.** Safest in trusted internal repos; higher risk in third-party/public repos. |
#### Project Hook Security
When you open a project with hooks defined in `.gemini/settings.json`:
1. **Detection**: Gemini CLI detects the hooks.
2. **Identification**: A unique identity is generated for each hook based on its
`name` and `command`.
3. **Warning**: If this specific hook identity has not been seen before, a
**warning** is displayed.
4. **Execution**: The hook is executed (unless specific security settings block
it).
5. **Trust**: The hook is marked as "trusted" for this project.
> **Modification detection**: If the `command` string of a project hook is
> changed (e.g., by a `git pull`), its identity changes. Gemini CLI will treat
> it as a **new, untrusted hook** and warn you again. This prevents malicious
> actors from silently swapping a verified command for a malicious one.
### Risks
| Risk | Description |
| :--------------------------- | :----------------------------------------------------------------------------------------------------------------------------------- |
| **Arbitrary Code Execution** | Hooks run as your user. They can do anything you can do (delete files, install software). |
| **Data Exfiltration** | A hook could read your input (prompts), output (code), or environment variables (`GEMINI_API_KEY`) and send them to a remote server. |
| **Prompt Injection** | Malicious content in a file or web page could trick an LLM into running a tool that triggers a hook in an unexpected way. |
### Mitigation Strategies
#### Verify the source
**Verify the source** of any project hooks or extensions before enabling them.
- For open-source projects, a quick review of the hook scripts is recommended.
- For extensions, ensure you trust the author or publisher (e.g., verified
publishers, well-known community members).
- Be cautious with obfuscated scripts or compiled binaries from unknown sources.
#### Sanitize environment
Hooks inherit the environment of the Gemini CLI process, which may include
sensitive API keys. Gemini CLI provides a
[redaction system](../reference/configuration.md#environment-variable-redaction)
that automatically filters variables matching sensitive patterns (e.g., `KEY`,
`TOKEN`).
> **Disabled by Default**: Environment redaction is currently **OFF by
> default**. We strongly recommend enabling it if you are running third-party
> hooks or working in sensitive environments.
**Impact on hooks:**
- **Security**: Prevents your hook scripts from accidentally leaking secrets.
- **Troubleshooting**: If your hook depends on a specific environment variable
that is being blocked, you must explicitly allow it in `settings.json`.
```json
{
"security": {
"environmentVariableRedaction": {
"enabled": true,
"allowed": ["MY_REQUIRED_TOOL_KEY"]
}
}
}
```
**System administrators:** You can enforce redaction for all users in the system
configuration.
## Troubleshooting
### Hook not executing
**Check hook name in `/hooks panel`:** Verify the hook appears in the list and
is enabled.
**Verify matcher pattern:**
```bash
# Test regex pattern
echo "write_file|replace" | grep -E "write_.*|replace"
```
**Check disabled list:** Verify the hook is not listed in your `settings.json`:
```json
{
"hooks": {
"disabled": ["my-hook-name"]
}
}
```
**Ensure script is executable**: For macOS and Linux users, verify the script
has execution permissions:
```bash
ls -la .gemini/hooks/my-hook.sh
chmod +x .gemini/hooks/my-hook.sh
```
**Windows Note**: On Windows, ensure your execution policy allows running
scripts (e.g., `Get-ExecutionPolicy`).
**Verify script path:** Ensure the path in `settings.json` resolves correctly.
```bash
# Check path expansion
echo "$GEMINI_PROJECT_DIR/.gemini/hooks/my-hook.sh"
# Verify file exists
test -f "$GEMINI_PROJECT_DIR/.gemini/hooks/my-hook.sh" && echo "File exists"
```
### Hook timing out
**Check configured timeout:** The default is 60000ms (1 minute). You can
increase this in `settings.json`:
```json
{
"name": "slow-hook",
"timeout": 120000
}
```
**Optimize slow operations:** Move heavy processing to background tasks or use
caching.
### Invalid JSON output
**Validate JSON before outputting:**
```bash
#!/usr/bin/env bash
output='{"decision": "allow"}'
# Validate JSON
if echo "$output" | jq empty 2>/dev/null; then
echo "$output"
else
echo "Invalid JSON generated" >&2
exit 1
fi
```
### Environment variables not available
**Check if variable is set:**
```bash
#!/usr/bin/env bash
if [ -z "$GEMINI_PROJECT_DIR" ]; then
echo "GEMINI_PROJECT_DIR not set" >&2
exit 1
fi
```
**Debug available variables:**
```bash
env > .gemini/hook-env.log
```
## Authoring secure hooks
When writing your own hooks, follow these practices to ensure they are robust
and secure.
### Validate all inputs
Never trust data from hooks without validation. Hook inputs often come from the
LLM or user prompts, which can be manipulated.
```bash
#!/usr/bin/env bash
input=$(cat)
# Validate JSON structure
if ! echo "$input" | jq empty 2>/dev/null; then
echo "Invalid JSON input" >&2
exit 1
fi
# Validate tool_name explicitly
tool_name=$(echo "$input" | jq -r '.tool_name // empty')
if [[ "$tool_name" != "write_file" && "$tool_name" != "read_file" ]]; then
echo "Unexpected tool: $tool_name" >&2
exit 1
fi
```
### Use timeouts
Prevent denial-of-service (hanging agents) by enforcing timeouts. Gemini CLI
defaults to 60 seconds, but you should set stricter limits for fast hooks.
```json
{
"hooks": {
"BeforeTool": [
{
"matcher": "*",
"hooks": [
{
"name": "fast-validator",
"type": "command",
"command": "./hooks/validate.sh",
"timeout": 5000 // 5 seconds
}
]
}
]
}
}
```
### Limit permissions
Run hooks with minimal required permissions:
```bash
#!/usr/bin/env bash
# Don't run as root
if [ "$EUID" -eq 0 ]; then
echo "Hook should not run as root" >&2
exit 1
fi
# Check file permissions before writing
if [ -w "$file_path" ]; then
# Safe to write
else
echo "Insufficient permissions" >&2
exit 1
fi
```
### Example: Secret Scanner
Use `BeforeTool` hooks to prevent committing sensitive data. This is a powerful
pattern for enhancing security in your workflow.
```javascript
const SECRET_PATTERNS = [
/api[_-]?key\s*[:=]\s*['"]?[a-zA-Z0-9_-]{20,}['"]?/i,
/password\s*[:=]\s*['"]?[^\s'"]{8,}['"]?/i,
/secret\s*[:=]\s*['"]?[a-zA-Z0-9_-]{20,}['"]?/i,
/AKIA[0-9A-Z]{16}/, // AWS access key
/ghp_[a-zA-Z0-9]{36}/, // GitHub personal access token
/sk-[a-zA-Z0-9]{48}/, // OpenAI API key
];
function containsSecret(content) {
return SECRET_PATTERNS.some((pattern) => pattern.test(content));
}
```
## Privacy considerations
Hook inputs and outputs may contain sensitive information.
### What data is collected
Hook telemetry may include inputs (prompts, code) and outputs (decisions,
reasons) unless disabled.
### Privacy settings
**Disable PII logging:** If you are working with sensitive data, disable prompt
logging in your settings:
```json
{
"telemetry": {
"logPrompts": false
}
}
```
**Suppress Output:** Individual hooks can request their metadata be hidden from
logs and telemetry by returning `"suppressOutput": true` in their JSON response.
> **Note**
> `suppressOutput` only affects background logging. Any `systemMessage` or
> `reason` included in the JSON will still be displayed to the user in the
> terminal.
### Sensitive data in hooks
If your hooks process sensitive data:
1. **Minimize logging:** Don't write sensitive data to log files.
2. **Sanitize outputs:** Remove sensitive data before outputting JSON or writing
to stderr.
================================================
FILE: docs/hooks/index.md
================================================
# Gemini CLI hooks
Hooks are scripts or programs that Gemini CLI executes at specific points in the
agentic loop, allowing you to intercept and customize behavior without modifying
the CLI's source code.
## What are hooks?
Hooks run synchronously as part of the agent loop—when a hook event fires,
Gemini CLI waits for all matching hooks to complete before continuing.
With hooks, you can:
- **Add context:** Inject relevant information (like git history) before the
model processes a request.
- **Validate actions:** Review tool arguments and block potentially dangerous
operations.
- **Enforce policies:** Implement security scanners and compliance checks.
- **Log interactions:** Track tool usage and model responses for auditing.
- **Optimize behavior:** Dynamically filter available tools or adjust model
parameters.
### Getting started
- **[Writing hooks guide](../hooks/writing-hooks)**: A tutorial on creating your
first hook with comprehensive examples.
- **[Best practices](../hooks/best-practices)**: Guidelines on security,
performance, and debugging.
- **[Hooks reference](../hooks/reference)**: The definitive technical
specification of I/O schemas and exit codes.
## Core concepts
### Hook events
Hooks are triggered by specific events in Gemini CLI's lifecycle.
| Event | When It Fires | Impact | Common Use Cases |
| --------------------- | ---------------------------------------------- | ---------------------- | -------------------------------------------- |
| `SessionStart` | When a session begins (startup, resume, clear) | Inject Context | Initialize resources, load context |
| `SessionEnd` | When a session ends (exit, clear) | Advisory | Clean up, save state |
| `BeforeAgent` | After user submits prompt, before planning | Block Turn / Context | Add context, validate prompts, block turns |
| `AfterAgent` | When agent loop ends | Retry / Halt | Review output, force retry or halt execution |
| `BeforeModel` | Before sending request to LLM | Block Turn / Mock | Modify prompts, swap models, mock responses |
| `AfterModel` | After receiving LLM response | Block Turn / Redact | Filter/redact responses, log interactions |
| `BeforeToolSelection` | Before LLM selects tools | Filter Tools | Filter available tools, optimize selection |
| `BeforeTool` | Before a tool executes | Block Tool / Rewrite | Validate arguments, block dangerous ops |
| `AfterTool` | After a tool executes | Block Result / Context | Process results, run tests, hide results |
| `PreCompress` | Before context compression | Advisory | Save state, notify user |
| `Notification` | When a system notification occurs | Advisory | Forward to desktop alerts, logging |
### Global mechanics
Understanding these core principles is essential for building robust hooks.
#### Strict JSON requirements (The "Golden Rule")
Hooks communicate via `stdin` (Input) and `stdout` (Output).
1. **Silence is Mandatory**: Your script **must not** print any plain text to
`stdout` other than the final JSON object. **Even a single `echo` or `print`
call before the JSON will break parsing.**
2. **Pollution = Failure**: If `stdout` contains non-JSON text, parsing will
fail. The CLI will default to "Allow" and treat the entire output as a
`systemMessage`.
3. **Debug via Stderr**: Use `stderr` for **all** logging and debugging (e.g.,
`echo "debug" >&2`). Gemini CLI captures `stderr` but never attempts to parse
it as JSON.
#### Exit codes
Gemini CLI uses exit codes to determine the high-level outcome of a hook
execution:
| Exit Code | Label | Behavioral Impact |
| --------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **0** | **Success** | The `stdout` is parsed as JSON. **Preferred code** for all logic, including intentional blocks (e.g., `{"decision": "deny"}`). |
| **2** | **System Block** | **Critical Block**. The target action (tool, turn, or stop) is aborted. `stderr` is used as the rejection reason. High severity; used for security stops or script failures. |
| **Other** | **Warning** | Non-fatal failure. A warning is shown, but the interaction proceeds using original parameters. |
#### Matchers
You can filter which specific tools or triggers fire your hook using the
`matcher` field.
- **Tool events** (`BeforeTool`, `AfterTool`): Matchers are **Regular
Expressions**. (e.g., `"write_.*"`).
- **Lifecycle events**: Matchers are **Exact Strings**. (e.g., `"startup"`).
- **Wildcards**: `"*"` or `""` (empty string) matches all occurrences.
## Configuration
Hooks are configured in `settings.json`. Gemini CLI merges configurations from
multiple layers in the following order of precedence (highest to lowest):
1. **Project settings**: `.gemini/settings.json` in the current directory.
2. **User settings**: `~/.gemini/settings.json`.
3. **System settings**: `/etc/gemini-cli/settings.json`.
4. **Extensions**: Hooks defined by installed extensions.
### Configuration schema
```json
{
"hooks": {
"BeforeTool": [
{
"matcher": "write_file|replace",
"hooks": [
{
"name": "security-check",
"type": "command",
"command": "$GEMINI_PROJECT_DIR/.gemini/hooks/security.sh",
"timeout": 5000
}
]
}
]
}
}
```
#### Hook configuration fields
| Field | Type | Required | Description |
| :------------ | :----- | :-------- | :------------------------------------------------------------------- |
| `type` | string | **Yes** | The execution engine. Currently only `"command"` is supported. |
| `command` | string | **Yes\*** | The shell command to execute. (Required when `type` is `"command"`). |
| `name` | string | No | A friendly name for identifying the hook in logs and CLI commands. |
| `timeout` | number | No | Execution timeout in milliseconds (default: 60000). |
| `description` | string | No | A brief explanation of the hook's purpose. |
---
### Environment variables
Hooks are executed with a sanitized environment.
- `GEMINI_PROJECT_DIR`: The absolute path to the project root.
- `GEMINI_SESSION_ID`: The unique ID for the current session.
- `GEMINI_CWD`: The current working directory.
- `CLAUDE_PROJECT_DIR`: (Alias) Provided for compatibility.
## Security and risks
> [!WARNING]
> Hooks execute arbitrary code with your user privileges. By
> configuring hooks, you are allowing scripts to run shell commands on your
> machine.
**Project-level hooks** are particularly risky when opening untrusted projects.
Gemini CLI **fingerprints** project hooks. If a hook's name or command changes
(e.g., via `git pull`), it is treated as a **new, untrusted hook** and you will
be warned before it executes.
See [Security Considerations](../hooks/best-practices#using-hooks-securely) for
a detailed threat model.
## Managing hooks
Use the CLI commands to manage hooks without editing JSON manually:
- **View hooks:** `/hooks panel`
- **Enable/Disable all:** `/hooks enable-all` or `/hooks disable-all`
- **Toggle individual:** `/hooks enable ` or `/hooks disable `
================================================
FILE: docs/hooks/reference.md
================================================
# Hooks reference
This document provides the technical specification for Gemini CLI hooks,
including JSON schemas and API details.
## Global hook mechanics
- **Communication**: `stdin` for Input (JSON), `stdout` for Output (JSON), and
`stderr` for logs and feedback.
- **Exit codes**:
- `0`: Success. `stdout` is parsed as JSON. **Preferred for all logic.**
- `2`: System Block. The action is blocked; `stderr` is used as the rejection
reason.
- `Other`: Warning. A non-fatal failure occurred; the CLI continues with a
warning.
- **Silence is Mandatory**: Your script **must not** print any plain text to
`stdout` other than the final JSON.
---
## Configuration schema
Hooks are defined in `settings.json` within the `hooks` object. Each event
(e.g., `BeforeTool`) contains an array of **hook definitions**.
### Hook definition
| Field | Type | Required | Description |
| :----------- | :-------- | :------- | :-------------------------------------------------------------------------------------- |
| `matcher` | `string` | No | A regex (for tools) or exact string (for lifecycle) to filter when the hook runs. |
| `sequential` | `boolean` | No | If `true`, hooks in this group run one after another. If `false`, they run in parallel. |
| `hooks` | `array` | **Yes** | An array of **hook configurations**. |
### Hook configuration
| Field | Type | Required | Description |
| :------------ | :------- | :-------- | :------------------------------------------------------------------- |
| `type` | `string` | **Yes** | The execution engine. Currently only `"command"` is supported. |
| `command` | `string` | **Yes\*** | The shell command to execute. (Required when `type` is `"command"`). |
| `name` | `string` | No | A friendly name for identifying the hook in logs and CLI commands. |
| `timeout` | `number` | No | Execution timeout in milliseconds (default: 60000). |
| `description` | `string` | No | A brief explanation of the hook's purpose. |
---
## Base input schema
All hooks receive these common fields via `stdin`:
```typescript
{
"session_id": string, // Unique ID for the current session
"transcript_path": string, // Absolute path to session transcript JSON
"cwd": string, // Current working directory
"hook_event_name": string, // The firing event (e.g. "BeforeTool")
"timestamp": string // ISO 8601 execution time
}
```
---
## Common output fields
Most hooks support these fields in their `stdout` JSON:
| Field | Type | Description |
| :--------------- | :-------- | :----------------------------------------------------------------------------- |
| `systemMessage` | `string` | Displayed immediately to the user in the terminal. |
| `suppressOutput` | `boolean` | If `true`, hides internal hook metadata from logs/telemetry. |
| `continue` | `boolean` | If `false`, stops the entire agent loop immediately. |
| `stopReason` | `string` | Displayed to the user when `continue` is `false`. |
| `decision` | `string` | `"allow"` or `"deny"` (alias `"block"`). Specific impact depends on the event. |
| `reason` | `string` | The feedback/error message provided when a `decision` is `"deny"`. |
---
## Tool hooks
### Matchers and tool names
For `BeforeTool` and `AfterTool` events, the `matcher` field in your settings is
compared against the name of the tool being executed.
- **Built-in Tools**: You can match any built-in tool (e.g., `read_file`,
`run_shell_command`). See the [Tools Reference](../reference/tools) for a full
list of available tool names.
- **MCP Tools**: Tools from MCP servers follow the naming pattern
`mcp__`.
- **Regex Support**: Matchers support regular expressions (e.g.,
`matcher: "read_.*"` matches all file reading tools).
### `BeforeTool`
Fires before a tool is invoked. Used for argument validation, security checks,
and parameter rewriting.
- **Input Fields**:
- `tool_name`: (`string`) The name of the tool being called.
- `tool_input`: (`object`) The raw arguments generated by the model.
- `mcp_context`: (`object`) Optional metadata for MCP-based tools.
- `original_request_name`: (`string`) The original name of the tool being
called, if this is a tail tool call.
- **Relevant Output Fields**:
- `decision`: Set to `"deny"` (or `"block"`) to prevent the tool from
executing.
- `reason`: Required if denied. This text is sent **to the agent** as a tool
error, allowing it to respond or retry.
- `hookSpecificOutput.tool_input`: An object that **merges with and
overrides** the model's arguments before execution.
- `continue`: Set to `false` to **kill the entire agent loop** immediately.
- **Exit Code 2 (Block Tool)**: Prevents execution. Uses `stderr` as the
`reason` sent to the agent. **The turn continues.**
### `AfterTool`
Fires after a tool executes. Used for result auditing, context injection, or
hiding sensitive output from the agent.
- **Input Fields**:
- `tool_name`: (`string`)
- `tool_input`: (`object`) The original arguments.
- `tool_response`: (`object`) The result containing `llmContent`,
`returnDisplay`, and optional `error`.
- `mcp_context`: (`object`)
- `original_request_name`: (`string`) The original name of the tool being
called, if this is a tail tool call.
- **Relevant Output Fields**:
- `decision`: Set to `"deny"` to hide the real tool output from the agent.
- `reason`: Required if denied. This text **replaces** the tool result sent
back to the model.
- `hookSpecificOutput.additionalContext`: Text that is **appended** to the
tool result for the agent.
- `hookSpecificOutput.tailToolCallRequest`: (`{ name: string, args: object }`)
A request to execute another tool immediately after this one. The result of
this "tail call" will replace the original tool's response. Ideal for
programmatic tool routing.
- `continue`: Set to `false` to **kill the entire agent loop** immediately.
- **Exit Code 2 (Block Result)**: Hides the tool result. Uses `stderr` as the
replacement content sent to the agent. **The turn continues.**
---
## Agent hooks
### `BeforeAgent`
Fires after a user submits a prompt, but before the agent begins planning. Used
for prompt validation or injecting dynamic context.
- **Input Fields**:
- `prompt`: (`string`) The original text submitted by the user.
- **Relevant Output Fields**:
- `hookSpecificOutput.additionalContext`: Text that is **appended** to the
prompt for this turn only.
- `decision`: Set to `"deny"` to block the turn and **discard the user's
message** (it will not appear in history).
- `continue`: Set to `false` to block the turn but **save the message to
history**.
- `reason`: Required if denied or stopped.
- **Exit Code 2 (Block Turn)**: Aborts the turn and erases the prompt from
context. Same as `decision: "deny"`.
### `AfterAgent`
Fires once per turn after the model generates its final response. Primary use
case is response validation and automatic retries.
- **Input Fields**:
- `prompt`: (`string`) The user's original request.
- `prompt_response`: (`string`) The final text generated by the agent.
- `stop_hook_active`: (`boolean`) Indicates if this hook is already running as
part of a retry sequence.
- **Relevant Output Fields**:
- `decision`: Set to `"deny"` to **reject the response** and force a retry.
- `reason`: Required if denied. This text is sent **to the agent as a new
prompt** to request a correction.
- `continue`: Set to `false` to **stop the session** without retrying.
- `hookSpecificOutput.clearContext`: If `true`, clears conversation history
(LLM memory) while preserving UI display.
- **Exit Code 2 (Retry)**: Rejects the response and triggers an automatic retry
turn using `stderr` as the feedback prompt.
---
## Model hooks
### `BeforeModel`
Fires before sending a request to the LLM. Operates on a stable, SDK-agnostic
request format.
- **Input Fields**:
- `llm_request`: (`object`) Contains `model`, `messages`, and `config`
(generation params).
- **Relevant Output Fields**:
- `hookSpecificOutput.llm_request`: An object that **overrides** parts of the
outgoing request (e.g., changing models or temperature).
- `hookSpecificOutput.llm_response`: A **Synthetic Response** object. If
provided, the CLI skips the LLM call entirely and uses this as the response.
- `decision`: Set to `"deny"` to block the request and abort the turn.
- **Exit Code 2 (Block Turn)**: Aborts the turn and skips the LLM call. Uses
`stderr` as the error message.
### `BeforeToolSelection`
Fires before the LLM decides which tools to call. Used to filter the available
toolset or force specific tool modes.
- **Input Fields**:
- `llm_request`: (`object`) Same format as `BeforeModel`.
- **Relevant Output Fields**:
- `hookSpecificOutput.toolConfig.mode`: (`"AUTO" | "ANY" | "NONE"`)
- `"NONE"`: Disables all tools (Wins over other hooks).
- `"ANY"`: Forces at least one tool call.
- `hookSpecificOutput.toolConfig.allowedFunctionNames`: (`string[]`) Whitelist
of tool names.
- **Union Strategy**: Multiple hooks' whitelists are **combined**.
- **Limitations**: Does **not** support `decision`, `continue`, or
`systemMessage`.
### `AfterModel`
Fires immediately after an LLM response chunk is received. Used for real-time
redaction or PII filtering.
- **Input Fields**:
- `llm_request`: (`object`) The original request.
- `llm_response`: (`object`) The model's response (or a single chunk during
streaming).
- **Relevant Output Fields**:
- `hookSpecificOutput.llm_response`: An object that **replaces** the model's
response chunk.
- `decision`: Set to `"deny"` to discard the response chunk and block the
turn.
- `continue`: Set to `false` to **kill the entire agent loop** immediately.
- **Note on Streaming**: Fired for **every chunk** generated by the model.
Modifying the response only affects the current chunk.
- **Exit Code 2 (Block Response)**: Aborts the turn and discards the model's
output. Uses `stderr` as the error message.
---
## Lifecycle & system hooks
### `SessionStart`
Fires on application startup, resuming a session, or after a `/clear` command.
Used for loading initial context.
- **Input fields**:
- `source`: (`"startup" | "resume" | "clear"`)
- **Relevant output fields**:
- `hookSpecificOutput.additionalContext`: (`string`)
- **Interactive**: Injected as the first turn in history.
- **Non-interactive**: Prepended to the user's prompt.
- `systemMessage`: Shown at the start of the session.
- **Advisory only**: `continue` and `decision` fields are **ignored**. Startup
is never blocked.
### `SessionEnd`
Fires when the CLI exits or a session is cleared. Used for cleanup or final
telemetry.
- **Input Fields**:
- `reason`: (`"exit" | "clear" | "logout" | "prompt_input_exit" | "other"`)
- **Relevant Output Fields**:
- `systemMessage`: Displayed to the user during shutdown.
- **Best Effort**: The CLI **will not wait** for this hook to complete and
ignores all flow-control fields (`continue`, `decision`).
### `Notification`
Fires when the CLI emits a system alert (e.g., Tool Permissions). Used for
external logging or cross-platform alerts.
- **Input Fields**:
- `notification_type`: (`"ToolPermission"`)
- `message`: Summary of the alert.
- `details`: JSON object with alert-specific metadata (e.g., tool name, file
path).
- **Relevant Output Fields**:
- `systemMessage`: Displayed alongside the system alert.
- **Observability Only**: This hook **cannot** block alerts or grant permissions
automatically. Flow-control fields are ignored.
### `PreCompress`
Fires before the CLI summarizes history to save tokens. Used for logging or
state saving.
- **Input Fields**:
- `trigger`: (`"auto" | "manual"`)
- **Relevant Output Fields**:
- `systemMessage`: Displayed to the user before compression.
- **Advisory Only**: Fired asynchronously. It **cannot** block or modify the
compression process. Flow-control fields are ignored.
---
## Stable Model API
Gemini CLI uses these structures to ensure hooks don't break across SDK updates.
**LLMRequest**:
```typescript
{
"model": string,
"messages": Array<{
"role": "user" | "model" | "system",
"content": string // Non-text parts are filtered out for hooks
}>,
"config": { "temperature": number, ... },
"toolConfig": { "mode": string, "allowedFunctionNames": string[] }
}
```
**LLMResponse**:
```typescript
{
"candidates": Array<{
"content": { "role": "model", "parts": string[] },
"finishReason": string
}>,
"usageMetadata": { "totalTokenCount": number }
}
```
================================================
FILE: docs/hooks/writing-hooks.md
================================================
# Writing hooks for Gemini CLI
This guide will walk you through creating hooks for Gemini CLI, from a simple
logging hook to a comprehensive workflow assistant.
## Prerequisites
Before you start, make sure you have:
- Gemini CLI installed and configured
- Basic understanding of shell scripting or JavaScript/Node.js
- Familiarity with JSON for hook input/output
## Quick start
Let's create a simple hook that logs all tool executions to understand the
basics.
**Crucial Rule:** Always write logs to `stderr`. Write only the final JSON to
`stdout`.
### Step 1: Create your hook script
Create a directory for hooks and a simple logging script.
> **Note**:
>
> This example uses `jq` to parse JSON. If you don't have it installed, you can
> perform similar logic using Node.js or Python.
**macOS/Linux**
```bash
mkdir -p .gemini/hooks
cat > .gemini/hooks/log-tools.sh << 'EOF'
#!/usr/bin/env bash
# Read hook input from stdin
input=$(cat)
# Extract tool name (requires jq)
tool_name=$(echo "$input" | jq -r '.tool_name')
# Log to stderr (visible in terminal if hook fails, or captured in logs)
echo "Logging tool: $tool_name" >&2
# Log to file
echo "[$(date)] Tool executed: $tool_name" >> .gemini/tool-log.txt
# Return success (exit 0) with empty JSON
echo "{}"
exit 0
EOF
chmod +x .gemini/hooks/log-tools.sh
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path ".gemini\hooks"
@"
# Read hook input from stdin
`$inputJson = `$input | Out-String | ConvertFrom-Json
# Extract tool name
`$toolName = `$inputJson.tool_name
# Log to stderr (visible in terminal if hook fails, or captured in logs)
[Console]::Error.WriteLine("Logging tool: `$toolName")
# Log to file
"[`$(Get-Date -Format 'o')] Tool executed: `$toolName" | Out-File -FilePath ".gemini\tool-log.txt" -Append -Encoding utf8
# Return success with empty JSON
"{}"
"@ | Out-File -FilePath ".gemini\hooks\log-tools.ps1" -Encoding utf8
```
## Exit Code Strategies
There are two ways to control or block an action in Gemini CLI:
| Strategy | Exit Code | Implementation | Best For |
| :------------------------- | :-------- | :----------------------------------------------------------------- | :---------------------------------------------------------- |
| **Structured (Idiomatic)** | `0` | Return a JSON object like `{"decision": "deny", "reason": "..."}`. | Production hooks, custom user feedback, and complex logic. |
| **Emergency Brake** | `2` | Print the error message to `stderr` and exit. | Simple security gates, script errors, or rapid prototyping. |
## Practical examples
### Security: Block secrets in commits
Prevent committing files containing API keys or passwords. Note that we use
**Exit Code 0** to provide a structured denial message to the agent.
**`.gemini/hooks/block-secrets.sh`:**
```bash
#!/usr/bin/env bash
input=$(cat)
# Extract content being written
content=$(echo "$input" | jq -r '.tool_input.content // .tool_input.new_string // ""')
# Check for secrets
if echo "$content" | grep -qE 'api[_-]?key|password|secret'; then
# Log to stderr
echo "Blocked potential secret" >&2
# Return structured denial to stdout
cat </dev/null || echo "No git history")
# Return as JSON
cat < m.role === 'user');
if (!lastUserMessage) {
console.log(JSON.stringify({})); // Do nothing
return;
}
const text = lastUserMessage.content;
const allowed = ['write_todos']; // Always allow memory
// Simple keyword matching
if (text.includes('read') || text.includes('check')) {
allowed.push('read_file', 'list_directory');
}
if (text.includes('test')) {
allowed.push('run_shell_command');
}
// If we found specific intent, filter tools. Otherwise allow all.
if (allowed.length > 1) {
console.log(
JSON.stringify({
hookSpecificOutput: {
hookEventName: 'BeforeToolSelection',
toolConfig: {
mode: 'ANY', // Force usage of one of these tools (or AUTO)
allowedFunctionNames: allowed,
},
},
}),
);
} else {
console.log(JSON.stringify({}));
}
}
main().catch((err) => {
console.error(err);
process.exit(1);
});
```
**`.gemini/settings.json`:**
```json
{
"hooks": {
"BeforeToolSelection": [
{
"matcher": "*",
"hooks": [
{
"name": "intent-filter",
"type": "command",
"command": "node .gemini/hooks/filter-tools.js"
}
]
}
]
}
}
```
> **TIP**
>
> **Union Aggregation Strategy**: `BeforeToolSelection` is unique in that it
> combines the results of all matching hooks. If you have multiple filtering
> hooks, the agent will receive the **union** of all whitelisted tools. Only
> using `mode: "NONE"` will override other hooks to disable all tools.
## Complete example: Smart Development Workflow Assistant
This comprehensive example demonstrates all hook events working together. We
will build a system that maintains memory, filters tools, and checks for
security.
### Architecture
1. **SessionStart**: Load project memories.
2. **BeforeAgent**: Inject memories into context.
3. **BeforeToolSelection**: Filter tools based on intent.
4. **BeforeTool**: Scan for secrets.
5. **AfterModel**: Record interactions.
6. **AfterAgent**: Validate final response quality (Retry).
7. **SessionEnd**: Consolidate memories.
### Configuration (`.gemini/settings.json`)
```json
{
"hooks": {
"SessionStart": [
{
"matcher": "startup",
"hooks": [
{
"name": "init",
"type": "command",
"command": "node .gemini/hooks/init.js"
}
]
}
],
"BeforeAgent": [
{
"matcher": "*",
"hooks": [
{
"name": "memory",
"type": "command",
"command": "node .gemini/hooks/inject-memories.js"
}
]
}
],
"BeforeToolSelection": [
{
"matcher": "*",
"hooks": [
{
"name": "filter",
"type": "command",
"command": "node .gemini/hooks/rag-filter.js"
}
]
}
],
"BeforeTool": [
{
"matcher": "write_file",
"hooks": [
{
"name": "security",
"type": "command",
"command": "node .gemini/hooks/security.js"
}
]
}
],
"AfterModel": [
{
"matcher": "*",
"hooks": [
{
"name": "record",
"type": "command",
"command": "node .gemini/hooks/record.js"
}
]
}
],
"AfterAgent": [
{
"matcher": "*",
"hooks": [
{
"name": "validate",
"type": "command",
"command": "node .gemini/hooks/validate.js"
}
]
}
],
"SessionEnd": [
{
"matcher": "exit",
"hooks": [
{
"name": "save",
"type": "command",
"command": "node .gemini/hooks/consolidate.js"
}
]
}
]
}
}
```
### Hook Scripts
> **Note**: For brevity, these scripts use `console.error` for logging and
> standard `console.log` for JSON output.
#### 1. Initialize (`init.js`)
```javascript
#!/usr/bin/env node
// Initialize DB or resources
console.error('Initializing assistant...');
// Output to user
console.log(
JSON.stringify({
systemMessage: '🧠 Smart Assistant Loaded',
}),
);
```
#### 2. Inject Memories (`inject-memories.js`)
```javascript
#!/usr/bin/env node
const fs = require('fs');
async function main() {
const input = JSON.parse(fs.readFileSync(0, 'utf-8'));
// Assume we fetch memories from a DB here
const memories = '- [Memory] Always use TypeScript for this project.';
console.log(
JSON.stringify({
hookSpecificOutput: {
hookEventName: 'BeforeAgent',
additionalContext: `\n## Relevant Memories\n${memories}`,
},
}),
);
}
main();
```
#### 3. Security Check (`security.js`)
```javascript
#!/usr/bin/env node
const fs = require('fs');
const input = JSON.parse(fs.readFileSync(0));
const content = input.tool_input.content || '';
if (content.includes('SECRET_KEY')) {
console.log(
JSON.stringify({
decision: 'deny',
reason: 'Found SECRET_KEY in content',
systemMessage: '🚨 Blocked sensitive commit',
}),
);
process.exit(0);
}
console.log(JSON.stringify({ decision: 'allow' }));
```
#### 4. Record Interaction (`record.js`)
```javascript
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
const input = JSON.parse(fs.readFileSync(0));
const { llm_request, llm_response } = input;
const logFile = path.join(
process.env.GEMINI_PROJECT_DIR,
'.gemini/memory/session.jsonl',
);
fs.appendFileSync(
logFile,
JSON.stringify({
request: llm_request,
response: llm_response,
timestamp: new Date().toISOString(),
}) + '\n',
);
console.log(JSON.stringify({}));
```
#### 5. Validate Response (`validate.js`)
```javascript
#!/usr/bin/env node
const fs = require('fs');
const input = JSON.parse(fs.readFileSync(0));
const response = input.prompt_response;
// Example: Check if the agent forgot to include a summary
if (!response.includes('Summary:')) {
console.log(
JSON.stringify({
decision: 'block', // Triggers an automatic retry turn
reason: 'Your response is missing a Summary section. Please add one.',
systemMessage: '🔄 Requesting missing summary...',
}),
);
process.exit(0);
}
console.log(JSON.stringify({ decision: 'allow' }));
```
#### 6. Consolidate Memories (`consolidate.js`)
```javascript
#!/usr/bin/env node
// Logic to save final session state
console.error('Consolidating memories for session end...');
```
## Packaging as an extension
While project-level hooks are great for specific repositories, you can share
your hooks across multiple projects by packaging them as a
[Gemini CLI extension](https://www.google.com/search?q=../extensions/index.md).
This provides version control, easy distribution, and centralized management.
================================================
FILE: docs/ide-integration/ide-companion-spec.md
================================================
# Gemini CLI companion plugin: Interface specification
> Last Updated: September 15, 2025
This document defines the contract for building a companion plugin to enable
Gemini CLI's IDE mode. For VS Code, these features (native diffing, context
awareness) are provided by the official extension
([marketplace](https://marketplace.visualstudio.com/items?itemName=Google.gemini-cli-vscode-ide-companion)).
This specification is for contributors who wish to bring similar functionality
to other editors like JetBrains IDEs, Sublime Text, etc.
## I. The communication interface
Gemini CLI and the IDE plugin communicate through a local communication channel.
### 1. Transport layer: MCP over HTTP
The plugin **MUST** run a local HTTP server that implements the **Model Context
Protocol (MCP)**.
- **Protocol:** The server must be a valid MCP server. We recommend using an
existing MCP SDK for your language of choice if available.
- **Endpoint:** The server should expose a single endpoint (e.g., `/mcp`) for
all MCP communication.
- **Port:** The server **MUST** listen on a dynamically assigned port (i.e.,
listen on port `0`).
### 2. Discovery mechanism: The port file
For Gemini CLI to connect, it needs to discover which IDE instance it's running
in and what port your server is using. The plugin **MUST** facilitate this by
creating a "discovery file."
- **How the CLI finds the file:** The CLI determines the Process ID (PID) of the
IDE it's running in by traversing the process tree. It then looks for a
discovery file that contains this PID in its name.
- **File location:** The file must be created in a specific directory:
`os.tmpdir()/gemini/ide/`. Your plugin must create this directory if it
doesn't exist.
- **File naming convention:** The filename is critical and **MUST** follow the
pattern: `gemini-ide-server-${PID}-${PORT}.json`
- `${PID}`: The process ID of the parent IDE process. Your plugin must
determine this PID and include it in the filename.
- `${PORT}`: The port your MCP server is listening on.
- **File content and workspace validation:** The file **MUST** contain a JSON
object with the following structure:
```json
{
"port": 12345,
"workspacePath": "/path/to/project1:/path/to/project2",
"authToken": "a-very-secret-token",
"ideInfo": {
"name": "vscode",
"displayName": "VS Code"
}
}
```
- `port` (number, required): The port of the MCP server.
- `workspacePath` (string, required): A list of all open workspace root paths,
delimited by the OS-specific path separator (`:` for Linux/macOS, `;` for
Windows). The CLI uses this path to ensure it's running in the same project
folder that's open in the IDE. If the CLI's current working directory is not
a sub-directory of `workspacePath`, the connection will be rejected. Your
plugin **MUST** provide the correct, absolute path(s) to the root of the
open workspace(s).
- `authToken` (string, required): A secret token for securing the connection.
The CLI will include this token in an `Authorization: Bearer ` header
on all requests.
- `ideInfo` (object, required): Information about the IDE.
- `name` (string, required): A short, lowercase identifier for the IDE
(e.g., `vscode`, `jetbrains`).
- `displayName` (string, required): A user-friendly name for the IDE (e.g.,
`VS Code`, `JetBrains IDE`).
- **Authentication:** To secure the connection, the plugin **MUST** generate a
unique, secret token and include it in the discovery file. The CLI will then
include this token in the `Authorization` header for all requests to the MCP
server (e.g., `Authorization: Bearer a-very-secret-token`). Your server
**MUST** validate this token on every request and reject any that are
unauthorized.
- **Tie-breaking with environment variables (recommended):** For the most
reliable experience, your plugin **SHOULD** both create the discovery file and
set the `GEMINI_CLI_IDE_SERVER_PORT` environment variable in the integrated
terminal. The file serves as the primary discovery mechanism, but the
environment variable is crucial for tie-breaking. If a user has multiple IDE
windows open for the same workspace, the CLI uses the
`GEMINI_CLI_IDE_SERVER_PORT` variable to identify and connect to the correct
window's server.
## II. The context interface
To enable context awareness, the plugin **MAY** provide the CLI with real-time
information about the user's activity in the IDE.
### `ide/contextUpdate` notification
The plugin **MAY** send an `ide/contextUpdate`
[notification](https://modelcontextprotocol.io/specification/2025-06-18/basic/index#notifications)
to the CLI whenever the user's context changes.
- **Triggering events:** This notification should be sent (with a recommended
debounce of 50ms) when:
- A file is opened, closed, or focused.
- The user's cursor position or text selection changes in the active file.
- **Payload (`IdeContext`):** The notification parameters **MUST** be an
`IdeContext` object:
```typescript
interface IdeContext {
workspaceState?: {
openFiles?: File[];
isTrusted?: boolean;
};
}
interface File {
// Absolute path to the file
path: string;
// Last focused Unix timestamp (for ordering)
timestamp: number;
// True if this is the currently focused file
isActive?: boolean;
cursor?: {
// 1-based line number
line: number;
// 1-based character number
character: number;
};
// The text currently selected by the user
selectedText?: string;
}
```
> [!NOTE]
> The `openFiles` list should only include files that exist on disk.
> Virtual files (e.g., unsaved files without a path, editor settings pages)
> **MUST** be excluded.
### How the CLI uses this context
After receiving the `IdeContext` object, the CLI performs several normalization
and truncation steps before sending the information to the model.
- **File ordering:** The CLI uses the `timestamp` field to determine the most
recently used files. It sorts the `openFiles` list based on this value.
Therefore, your plugin **MUST** provide an accurate Unix timestamp for when a
file was last focused.
- **Active file:** The CLI considers only the most recent file (after sorting)
to be the "active" file. It will ignore the `isActive` flag on all other files
and clear their `cursor` and `selectedText` fields. Your plugin should focus
on setting `isActive: true` and providing cursor/selection details only for
the currently focused file.
- **Truncation:** To manage token limits, the CLI truncates both the file list
(to 10 files) and the `selectedText` (to 16KB).
While the CLI handles the final truncation, it is highly recommended that your
plugin also limits the amount of context it sends.
## III. The diffing interface
To enable interactive code modifications, the plugin **MAY** expose a diffing
interface. This allows the CLI to request that the IDE open a diff view, showing
proposed changes to a file. The user can then review, edit, and ultimately
accept or reject these changes directly within the IDE.
### `openDiff` tool
The plugin **MUST** register an `openDiff` tool on its MCP server.
- **Description:** This tool instructs the IDE to open a modifiable diff view
for a specific file.
- **Request (`OpenDiffRequest`):** The tool is invoked via a `tools/call`
request. The `arguments` field within the request's `params` **MUST** be an
`OpenDiffRequest` object.
```typescript
interface OpenDiffRequest {
// The absolute path to the file to be diffed.
filePath: string;
// The proposed new content for the file.
newContent: string;
}
```
- **Response (`CallToolResult`):** The tool **MUST** immediately return a
`CallToolResult` to acknowledge the request and report whether the diff view
was successfully opened.
- On Success: If the diff view was opened successfully, the response **MUST**
contain empty content (i.e., `content: []`).
- On Failure: If an error prevented the diff view from opening, the response
**MUST** have `isError: true` and include a `TextContent` block in the
`content` array describing the error.
The actual outcome of the diff (acceptance or rejection) is communicated
asynchronously via notifications.
### `closeDiff` tool
The plugin **MUST** register a `closeDiff` tool on its MCP server.
- **Description:** This tool instructs the IDE to close an open diff view for a
specific file.
- **Request (`CloseDiffRequest`):** The tool is invoked via a `tools/call`
request. The `arguments` field within the request's `params` **MUST** be an
`CloseDiffRequest` object.
```typescript
interface CloseDiffRequest {
// The absolute path to the file whose diff view should be closed.
filePath: string;
}
```
- **Response (`CallToolResult`):** The tool **MUST** return a `CallToolResult`.
- On Success: If the diff view was closed successfully, the response **MUST**
include a single **TextContent** block in the content array containing the
file's final content before closing.
- On Failure: If an error prevented the diff view from closing, the response
**MUST** have `isError: true` and include a `TextContent` block in the
`content` array describing the error.
### `ide/diffAccepted` notification
When the user accepts the changes in a diff view (e.g., by clicking an "Apply"
or "Save" button), the plugin **MUST** send an `ide/diffAccepted` notification
to the CLI.
- **Payload:** The notification parameters **MUST** include the file path and
the final content of the file. The content may differ from the original
`newContent` if the user made manual edits in the diff view.
```typescript
{
// The absolute path to the file that was diffed.
filePath: string;
// The full content of the file after acceptance.
content: string;
}
```
### `ide/diffRejected` notification
When the user rejects the changes (e.g., by closing the diff view without
accepting), the plugin **MUST** send an `ide/diffRejected` notification to the
CLI.
- **Payload:** The notification parameters **MUST** include the file path of the
rejected diff.
```typescript
{
// The absolute path to the file that was diffed.
filePath: string;
}
```
## IV. The lifecycle interface
The plugin **MUST** manage its resources and the discovery file correctly based
on the IDE's lifecycle.
- **On activation (IDE startup/plugin enabled):**
1. Start the MCP server.
2. Create the discovery file.
- **On deactivation (IDE shutdown/plugin disabled):**
1. Stop the MCP server.
2. Delete the discovery file.
================================================
FILE: docs/ide-integration/index.md
================================================
# IDE integration
Gemini CLI can integrate with your IDE to provide a more seamless and
context-aware experience. This integration allows the CLI to understand your
workspace better and enables powerful features like native in-editor diffing.
Currently, the supported IDEs are [Antigravity](https://antigravity.google),
[Visual Studio Code](https://code.visualstudio.com/), and other editors that
support VS Code extensions. To build support for other editors, see the
[IDE Companion Extension Spec](./ide-companion-spec.md).
## Features
- **Workspace context:** The CLI automatically gains awareness of your workspace
to provide more relevant and accurate responses. This context includes:
- The **10 most recently accessed files** in your workspace.
- Your active cursor position.
- Any text you have selected (up to a 16KB limit; longer selections will be
truncated).
- **Native diffing:** When Gemini suggests code modifications, you can view the
changes directly within your IDE's native diff viewer. This allows you to
review, edit, and accept or reject the suggested changes seamlessly.
- **VS Code commands:** You can access Gemini CLI features directly from the VS
Code Command Palette (`Cmd+Shift+P` or `Ctrl+Shift+P`):
- `Gemini CLI: Run`: Starts a new Gemini CLI session in the integrated
terminal.
- `Gemini CLI: Accept Diff`: Accepts the changes in the active diff editor.
- `Gemini CLI: Close Diff Editor`: Rejects the changes and closes the active
diff editor.
- `Gemini CLI: View Third-Party Notices`: Displays the third-party notices for
the extension.
## Installation and setup
There are three ways to set up the IDE integration:
### 1. Automatic nudge (recommended)
When you run Gemini CLI inside a supported editor, it will automatically detect
your environment and prompt you to connect. Answering "Yes" will automatically
run the necessary setup, which includes installing the companion extension and
enabling the connection.
### 2. Manual installation from CLI
If you previously dismissed the prompt or want to install the extension
manually, you can run the following command inside Gemini CLI:
```
/ide install
```
This will find the correct extension for your IDE and install it.
### 3. Manual installation from a marketplace
You can also install the extension directly from a marketplace.
- **For Visual Studio Code:** Install from the
[VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=google.gemini-cli-vscode-ide-companion).
- **For VS Code forks:** To support forks of VS Code, the extension is also
published on the
[Open VSX Registry](https://open-vsx.org/extension/google/gemini-cli-vscode-ide-companion).
Follow your editor's instructions for installing extensions from this
registry.
> [!NOTE]
> The "Gemini CLI Companion" extension may appear towards the bottom of
> search results. If you don't see it immediately, try scrolling down or
> sorting by "Newly Published".
>
> After manually installing the extension, you must run `/ide enable` in the CLI
> to activate the integration.
## Usage
### Enabling and disabling
You can control the IDE integration from within the CLI:
- To enable the connection to the IDE, run:
```
/ide enable
```
- To disable the connection, run:
```
/ide disable
```
When enabled, Gemini CLI will automatically attempt to connect to the IDE
companion extension.
### Checking the status
To check the connection status and see the context the CLI has received from the
IDE, run:
```
/ide status
```
If connected, this command will show the IDE it's connected to and a list of
recently opened files it is aware of.
> [!NOTE]
> The file list is limited to 10 recently accessed files within your
> workspace and only includes local files on disk.)
### Working with diffs
When you ask Gemini to modify a file, it can open a diff view directly in your
editor.
**To accept a diff**, you can perform any of the following actions:
- Click the **checkmark icon** in the diff editor's title bar.
- Save the file (e.g., with `Cmd+S` or `Ctrl+S`).
- Open the Command Palette and run **Gemini CLI: Accept Diff**.
- Respond with `yes` in the CLI when prompted.
**To reject a diff**, you can:
- Click the **'x' icon** in the diff editor's title bar.
- Close the diff editor tab.
- Open the Command Palette and run **Gemini CLI: Close Diff Editor**.
- Respond with `no` in the CLI when prompted.
You can also **modify the suggested changes** directly in the diff view before
accepting them.
If you select ‘Allow for this session’ in the CLI, changes will no longer show
up in the IDE as they will be auto-accepted.
## Using with sandboxing
If you are using Gemini CLI within a sandbox, please be aware of the following:
- **On macOS:** The IDE integration requires network access to communicate with
the IDE companion extension. You must use a Seatbelt profile that allows
network access.
- **In a Docker container:** If you run Gemini CLI inside a Docker (or Podman)
container, the IDE integration can still connect to the VS Code extension
running on your host machine. The CLI is configured to automatically find the
IDE server on `host.docker.internal`. No special configuration is usually
required, but you may need to ensure your Docker networking setup allows
connections from the container to the host.
## Troubleshooting
If you encounter issues with IDE integration, here are some common error
messages and how to resolve them.
### Connection errors
- **Message:**
`🔴 Disconnected: Failed to connect to IDE companion extension in [IDE Name]. Please ensure the extension is running. To install the extension, run /ide install.`
- **Cause:** Gemini CLI could not find the necessary environment variables
(`GEMINI_CLI_IDE_WORKSPACE_PATH` or `GEMINI_CLI_IDE_SERVER_PORT`) to connect
to the IDE. This usually means the IDE companion extension is not running or
did not initialize correctly.
- **Solution:**
1. Make sure you have installed the **Gemini CLI Companion** extension in
your IDE and that it is enabled.
2. Open a new terminal window in your IDE to ensure it picks up the correct
environment.
- **Message:**
`🔴 Disconnected: IDE connection error. The connection was lost unexpectedly. Please try reconnecting by running /ide enable`
- **Cause:** The connection to the IDE companion was lost.
- **Solution:** Run `/ide enable` to try and reconnect. If the issue
continues, open a new terminal window or restart your IDE.
### Manual PID override
If automatic IDE detection fails, or if you are running Gemini CLI in a
standalone terminal and want to manually associate it with a specific IDE
instance, you can set the `GEMINI_CLI_IDE_PID` environment variable to the
process ID (PID) of your IDE.
**macOS/Linux**
```bash
export GEMINI_CLI_IDE_PID=12345
```
**Windows (PowerShell)**
```powershell
$env:GEMINI_CLI_IDE_PID=12345
```
When this variable is set, Gemini CLI will skip automatic detection and attempt
to connect using the provided PID.
### Configuration errors
- **Message:**
`🔴 Disconnected: Directory mismatch. Gemini CLI is running in a different location than the open workspace in [IDE Name]. Please run the CLI from one of the following directories: [List of directories]`
- **Cause:** The CLI's current working directory is outside the workspace you
have open in your IDE.
- **Solution:** `cd` into the same directory that is open in your IDE and
restart the CLI.
- **Message:**
`🔴 Disconnected: To use this feature, please open a workspace folder in [IDE Name] and try again.`
- **Cause:** You have no workspace open in your IDE.
- **Solution:** Open a workspace in your IDE and restart the CLI.
### General errors
- **Message:**
`IDE integration is not supported in your current environment. To use this feature, run Gemini CLI in one of these supported IDEs: [List of IDEs]`
- **Cause:** You are running Gemini CLI in a terminal or environment that is
not a supported IDE.
- **Solution:** Run Gemini CLI from the integrated terminal of a supported
IDE, like Antigravity or VS Code.
- **Message:**
`No installer is available for IDE. Please install the Gemini CLI Companion extension manually from the marketplace.`
- **Cause:** You ran `/ide install`, but the CLI does not have an automated
installer for your specific IDE.
- **Solution:** Open your IDE's extension marketplace, search for "Gemini CLI
Companion", and
[install it manually](#3-manual-installation-from-a-marketplace).
================================================
FILE: docs/index.md
================================================
# Gemini CLI documentation
Gemini CLI brings the power of Gemini models directly into your terminal. Use it
to understand code, automate tasks, and build workflows with your local project
context.
## Install
```bash
npm install -g @google/gemini-cli
```
## Get started
Jump in to Gemini CLI.
- **[Quickstart](./get-started/index.md):** Your first session with Gemini CLI.
- **[Installation](./get-started/installation.md):** How to install Gemini CLI
on your system.
- **[Authentication](./get-started/authentication.md):** Setup instructions for
personal and enterprise accounts.
- **[Examples](./get-started/examples.md):** Practical examples of Gemini CLI in
action.
- **[CLI cheatsheet](./cli/cli-reference.md):** A quick reference for common
commands and options.
- **[Gemini 3 on Gemini CLI](./get-started/gemini-3.md):** Learn about Gemini 3
support in Gemini CLI.
## Use Gemini CLI
User-focused guides and tutorials for daily development workflows.
- **[File management](./cli/tutorials/file-management.md):** How to work with
local files and directories.
- **[Get started with Agent skills](./cli/tutorials/skills-getting-started.md):**
Getting started with specialized expertise.
- **[Manage context and memory](./cli/tutorials/memory-management.md):**
Managing persistent instructions and facts.
- **[Execute shell commands](./cli/tutorials/shell-commands.md):** Executing
system commands safely.
- **[Manage sessions and history](./cli/tutorials/session-management.md):**
Resuming, managing, and rewinding conversations.
- **[Plan tasks with todos](./cli/tutorials/task-planning.md):** Using todos for
complex workflows.
- **[Web search and fetch](./cli/tutorials/web-tools.md):** Searching and
fetching content from the web.
- **[Set up an MCP server](./cli/tutorials/mcp-setup.md):** Set up an MCP
server.
- **[Automate tasks](./cli/tutorials/automation.md):** Automate tasks.
## Features
Technical documentation for each capability of Gemini CLI.
- **[Extensions](./extensions/index.md):** Extend Gemini CLI with new tools and
capabilities.
- **[Agent Skills](./cli/skills.md):** Use specialized agents for specific
tasks.
- **[Checkpointing](./cli/checkpointing.md):** Automatic session snapshots.
- **[Headless mode](./cli/headless.md):** Programmatic and scripting interface.
- **[Hooks](./hooks/index.md):** Customize Gemini CLI behavior with scripts.
- **[IDE integration](./ide-integration/index.md):** Integrate Gemini CLI with
your favorite IDE.
- **[MCP servers](./tools/mcp-server.md):** Connect to and use remote agents.
- **[Model routing](./cli/model-routing.md):** Automatic fallback resilience.
- **[Model selection](./cli/model.md):** Choose the best model for your needs.
- **[Plan mode 🔬](./cli/plan-mode.md):** Use a safe, read-only mode for
planning complex changes.
- **[Subagents 🔬](./core/subagents.md):** Using specialized agents for specific
tasks.
- **[Remote subagents 🔬](./core/remote-agents.md):** Connecting to and using
remote agents.
- **[Rewind](./cli/rewind.md):** Rewind and replay sessions.
- **[Sandboxing](./cli/sandbox.md):** Isolate tool execution.
- **[Settings](./cli/settings.md):** Full configuration reference.
- **[Telemetry](./cli/telemetry.md):** Usage and performance metric details.
- **[Token caching](./cli/token-caching.md):** Performance optimization.
## Configuration
Settings and customization options for Gemini CLI.
- **[Custom commands](./cli/custom-commands.md):** Personalized shortcuts.
- **[Enterprise configuration](./cli/enterprise.md):** Professional environment
controls.
- **[Ignore files (.geminiignore)](./cli/gemini-ignore.md):** Exclusion pattern
reference.
- **[Model configuration](./cli/generation-settings.md):** Fine-tune generation
parameters like temperature and thinking budget.
- **[Project context (GEMINI.md)](./cli/gemini-md.md):** Technical hierarchy of
context files.
- **[System prompt override](./cli/system-prompt.md):** Instruction replacement
logic.
- **[Themes](./cli/themes.md):** UI personalization technical guide.
- **[Trusted folders](./cli/trusted-folders.md):** Security permission logic.
## Reference
Deep technical documentation and API specifications.
- **[Command reference](./reference/commands.md):** Detailed slash command
guide.
- **[Configuration reference](./reference/configuration.md):** Settings and
environment variables.
- **[Keyboard shortcuts](./reference/keyboard-shortcuts.md):** Productivity
tips.
- **[Memory import processor](./reference/memport.md):** How Gemini CLI
processes memory from various sources.
- **[Policy engine](./reference/policy-engine.md):** Fine-grained execution
control.
- **[Tools reference](./reference/tools.md):** Information on how tools are
defined, registered, and used.
## Resources
Support, release history, and legal information.
- **[FAQ](./resources/faq.md):** Answers to frequently asked questions.
- **[Quota and pricing](./resources/quota-and-pricing.md):** Limits and billing
details.
- **[Terms and privacy](./resources/tos-privacy.md):** Official notices and
terms.
- **[Troubleshooting](./resources/troubleshooting.md):** Common issues and
solutions.
- **[Uninstall](./resources/uninstall.md):** How to uninstall Gemini CLI.
## Development
- **[Contribution guide](/docs/contributing):** How to contribute to Gemini CLI.
- **[Integration testing](./integration-tests.md):** Running integration tests.
- **[Issue and PR automation](./issue-and-pr-automation.md):** Automation for
issues and pull requests.
- **[Local development](./local-development.md):** Setting up a local
development environment.
- **[NPM package structure](./npm.md):** The structure of the NPM packages.
## Releases
- **[Release notes](./changelogs/index.md):** Release notes for all versions.
- **[Stable release](./changelogs/latest.md):** The latest stable release.
- **[Preview release](./changelogs/preview.md):** The latest preview release.
================================================
FILE: docs/integration-tests.md
================================================
# Integration tests
This document provides information about the integration testing framework used
in this project.
## Overview
The integration tests are designed to validate the end-to-end functionality of
the Gemini CLI. They execute the built binary in a controlled environment and
verify that it behaves as expected when interacting with the file system.
These tests are located in the `integration-tests` directory and are run using a
custom test runner.
## Building the tests
Prior to running any integration tests, you need to create a release bundle that
you want to actually test:
```bash
npm run bundle
```
You must re-run this command after making any changes to the CLI source code,
but not after making changes to tests.
## Running the tests
The integration tests are not run as part of the default `npm run test` command.
They must be run explicitly using the `npm run test:integration:all` script.
The integration tests can also be run using the following shortcut:
```bash
npm run test:e2e
```
## Running a specific set of tests
To run a subset of test files, you can use
`npm run ....` where <integration
test command> is either `test:e2e` or `test:integration*` and ``
is any of the `.test.js` files in the `integration-tests/` directory. For
example, the following command runs `list_directory.test.js` and
`write_file.test.js`:
```bash
npm run test:e2e list_directory write_file
```
### Running a single test by name
To run a single test by its name, use the `--test-name-pattern` flag:
```bash
npm run test:e2e -- --test-name-pattern "reads a file"
```
### Regenerating model responses
Some integration tests use faked out model responses, which may need to be
regenerated from time to time as the implementations change.
To regenerate these golden files, set the REGENERATE_MODEL_GOLDENS environment
variable to "true" when running the tests, for example:
**WARNING**: If running locally you should review these updated responses for
any information about yourself or your system that gemini may have included in
these responses.
```bash
REGENERATE_MODEL_GOLDENS="true" npm run test:e2e
```
**WARNING**: Make sure you run **await rig.cleanup()** at the end of your test,
else the golden files will not be updated.
### Deflaking a test
Before adding a **new** integration test, you should test it at least 5 times
with the deflake script or workflow to make sure that it is not flaky.
### Deflake script
```bash
npm run deflake -- --runs=5 --command="npm run test:e2e -- -- --test-name-pattern ''"
```
#### Deflake workflow
```bash
gh workflow run deflake.yml --ref -f test_name_pattern=""
```
### Running all tests
To run the entire suite of integration tests, use the following command:
```bash
npm run test:integration:all
```
### Sandbox matrix
The `all` command will run tests for `no sandboxing`, `docker` and `podman`.
Each individual type can be run using the following commands:
```bash
npm run test:integration:sandbox:none
```
```bash
npm run test:integration:sandbox:docker
```
```bash
npm run test:integration:sandbox:podman
```
## Diagnostics
The integration test runner provides several options for diagnostics to help
track down test failures.
### Keeping test output
You can preserve the temporary files created during a test run for inspection.
This is useful for debugging issues with file system operations.
To keep the test output set the `KEEP_OUTPUT` environment variable to `true`.
```bash
KEEP_OUTPUT=true npm run test:integration:sandbox:none
```
When output is kept, the test runner will print the path to the unique directory
for the test run.
### Verbose output
For more detailed debugging, set the `VERBOSE` environment variable to `true`.
```bash
VERBOSE=true npm run test:integration:sandbox:none
```
When using `VERBOSE=true` and `KEEP_OUTPUT=true` in the same command, the output
is streamed to the console and also saved to a log file within the test's
temporary directory.
The verbose output is formatted to clearly identify the source of the logs:
```
--- TEST: : ---
... output from the gemini command ...
--- END TEST: : ---
```
## Linting and formatting
To ensure code quality and consistency, the integration test files are linted as
part of the main build process. You can also manually run the linter and
auto-fixer.
### Running the linter
To check for linting errors, run the following command:
```bash
npm run lint
```
You can include the `:fix` flag in the command to automatically fix any fixable
linting errors:
```bash
npm run lint:fix
```
## Directory structure
The integration tests create a unique directory for each test run inside the
`.integration-tests` directory. Within this directory, a subdirectory is created
for each test file, and within that, a subdirectory is created for each
individual test case.
This structure makes it easy to locate the artifacts for a specific test run,
file, or case.
```
.integration-tests/
└── /
└── .test.js/
└── /
├── output.log
└── ...other test artifacts...
```
## Continuous integration
To ensure the integration tests are always run, a GitHub Actions workflow is
defined in `.github/workflows/chained_e2e.yml`. This workflow automatically runs
the integrations tests for pull requests against the `main` branch, or when a
pull request is added to a merge queue.
The workflow runs the tests in different sandboxing environments to ensure
Gemini CLI is tested across each:
- `sandbox:none`: Runs the tests without any sandboxing.
- `sandbox:docker`: Runs the tests in a Docker container.
- `sandbox:podman`: Runs the tests in a Podman container.
================================================
FILE: docs/issue-and-pr-automation.md
================================================
# Automation and triage processes
This document provides a detailed overview of the automated processes we use to
manage and triage issues and pull requests. Our goal is to provide prompt
feedback and ensure that contributions are reviewed and integrated efficiently.
Understanding this automation will help you as a contributor know what to expect
and how to best interact with our repository bots.
## Guiding principle: Issues and pull requests
First and foremost, almost every Pull Request (PR) should be linked to a
corresponding Issue. The issue describes the "what" and the "why" (the bug or
feature), while the PR is the "how" (the implementation). This separation helps
us track work, prioritize features, and maintain clear historical context. Our
automation is built around this principle.
> [!NOTE]
> Issues tagged as "🔒Maintainers only" are reserved for project
> maintainers. We will not accept pull requests related to these issues.
---
## Detailed automation workflows
Here is a breakdown of the specific automation workflows that run in our
repository.
### 1. When you open an issue: `Automated Issue Triage`
This is the first bot you will interact with when you create an issue. Its job
is to perform an initial analysis and apply the correct labels.
- **Workflow File**: `.github/workflows/gemini-automated-issue-triage.yml`
- **When it runs**: Immediately after an issue is created or reopened.
- **What it does**:
- It uses a Gemini model to analyze the issue's title and body against a
detailed set of guidelines.
- **Applies one `area/*` label**: Categorizes the issue into a functional area
of the project (e.g., `area/ux`, `area/models`, `area/platform`).
- **Applies one `kind/*` label**: Identifies the type of issue (e.g.,
`kind/bug`, `kind/enhancement`, `kind/question`).
- **Applies one `priority/*` label**: Assigns a priority from P0 (critical) to
P3 (low) based on the described impact.
- **May apply `status/need-information`**: If the issue lacks critical details
(like logs or reproduction steps), it will be flagged for more information.
- **May apply `status/need-retesting`**: If the issue references a CLI version
that is more than six versions old, it will be flagged for retesting on a
current version.
- **What you should do**:
- Fill out the issue template as completely as possible. The more detail you
provide, the more accurate the triage will be.
- If the `status/need-information` label is added, please provide the
requested details in a comment.
### 2. When you open a pull request: `Continuous Integration (CI)`
This workflow ensures that all changes meet our quality standards before they
can be merged.
- **Workflow File**: `.github/workflows/ci.yml`
- **When it runs**: On every push to a pull request.
- **What it does**:
- **Lint**: Checks that your code adheres to our project's formatting and
style rules.
- **Test**: Runs our full suite of automated tests across macOS, Windows, and
Linux, and on multiple Node.js versions. This is the most time-consuming
part of the CI process.
- **Post Coverage Comment**: After all tests have successfully passed, a bot
will post a comment on your PR. This comment provides a summary of how well
your changes are covered by tests.
- **What you should do**:
- Ensure all CI checks pass. A green checkmark ✅ will appear next to your
commit when everything is successful.
- If a check fails (a red "X" ❌), click the "Details" link next to the failed
check to view the logs, identify the problem, and push a fix.
### 3. Ongoing triage for pull requests: `PR Auditing and Label Sync`
This workflow runs periodically to ensure all open PRs are correctly linked to
issues and have consistent labels.
- **Workflow File**: `.github/workflows/gemini-scheduled-pr-triage.yml`
- **When it runs**: Every 15 minutes on all open pull requests.
- **What it does**:
- **Checks for a linked issue**: The bot scans your PR description for a
keyword that links it to an issue (e.g., `Fixes #123`, `Closes #456`).
- **Adds `status/need-issue`**: If no linked issue is found, the bot will add
the `status/need-issue` label to your PR. This is a clear signal that an
issue needs to be created and linked.
- **Synchronizes labels**: If an issue _is_ linked, the bot ensures the PR's
labels perfectly match the issue's labels. It will add any missing labels
and remove any that don't belong, and it will remove the `status/need-issue`
label if it was present.
- **What you should do**:
- **Always link your PR to an issue.** This is the most important step. Add a
line like `Resolves #` to your PR description.
- This will ensure your PR is correctly categorized and moves through the
review process smoothly.
### 4. Ongoing triage for issues: `Scheduled Issue Triage`
This is a fallback workflow to ensure that no issue gets missed by the triage
process.
- **Workflow File**: `.github/workflows/gemini-scheduled-issue-triage.yml`
- **When it runs**: Every hour on all open issues.
- **What it does**:
- It actively seeks out issues that either have no labels at all or still have
the `status/need-triage` label.
- It then triggers the same powerful Gemini-based analysis as the initial
triage bot to apply the correct labels.
- **What you should do**:
- You typically don't need to do anything. This workflow is a safety net to
ensure every issue is eventually categorized, even if the initial triage
fails.
### 5. Automatic unassignment of inactive contributors: `Unassign Inactive Issue Assignees`
To keep the list of open `help wanted` issues accessible to all contributors,
this workflow automatically removes **external contributors** who have not
opened a linked pull request within **7 days** of being assigned. Maintainers,
org members, and repo collaborators with write access or above are always exempt
and will never be auto-unassigned.
- **Workflow File**: `.github/workflows/unassign-inactive-assignees.yml`
- **When it runs**: Every day at 09:00 UTC, and can be triggered manually with
an optional `dry_run` mode.
- **What it does**:
1. Finds every open issue labeled `help wanted` that has at least one
assignee.
2. Identifies privileged users (team members, repo collaborators with write+
access, maintainers) and skips them entirely.
3. For each remaining (external) assignee it reads the issue's timeline to
determine:
- The exact date they were assigned (using `assigned` timeline events).
- Whether they have opened a PR that is already linked/cross-referenced to
the issue.
4. Each cross-referenced PR is fetched to verify it is **ready for review**:
open and non-draft, or already merged. Draft PRs do not count.
5. If an assignee has been assigned for **more than 7 days** and no qualifying
PR is found, they are automatically unassigned and a comment is posted
explaining the reason and how to re-claim the issue.
6. Assignees who have a non-draft, open or merged PR linked to the issue are
**never** unassigned by this workflow.
- **What you should do**:
- **Open a real PR, not a draft**: Within 7 days of being assigned, open a PR
that is ready for review and include `Fixes #` in the
description. Draft PRs do not satisfy the requirement and will not prevent
auto-unassignment.
- **Re-assign if unassigned by mistake**: Comment `/assign` on the issue to
assign yourself again.
- **Unassign yourself** if you can no longer work on the issue by commenting
`/unassign`, so other contributors can pick it up right away.
### 6. Release automation
This workflow handles the process of packaging and publishing new versions of
the Gemini CLI.
- **Workflow File**: `.github/workflows/release-manual.yml`
- **When it runs**: On a daily schedule for "nightly" releases, and manually for
official patch/minor releases.
- **What it does**:
- Automatically builds the project, bumps the version numbers, and publishes
the packages to npm.
- Creates a corresponding release on GitHub with generated release notes.
- **What you should do**:
- As a contributor, you don't need to do anything for this process. You can be
confident that once your PR is merged into the `main` branch, your changes
will be included in the very next nightly release.
We hope this detailed overview is helpful. If you have any questions about our
automation or processes, please don't hesitate to ask!
================================================
FILE: docs/local-development.md
================================================
# Local development guide
This guide provides instructions for setting up and using local development
features for Gemini CLI.
## Tracing
Gemini CLI uses OpenTelemetry (OTel) to record traces that help you debug agent
behavior. Traces instrument key events like model calls, tool scheduler
operations, and tool calls.
Traces provide deep visibility into agent behavior and help you debug complex
issues. They are captured automatically when you enable telemetry.
### View traces
You can view traces using Genkit Developer UI, Jaeger, or Google Cloud.
#### Use Genkit
Genkit provides a web-based UI for viewing traces and other telemetry data.
1. **Start the Genkit telemetry server:**
Run the following command to start the Genkit server:
```bash
npm run telemetry -- --target=genkit
```
The script will output the URL for the Genkit Developer UI. For example:
`Genkit Developer UI: http://localhost:4000`
2. **Run Gemini CLI:**
In a separate terminal, run your Gemini CLI command:
```bash
gemini
```
3. **View the traces:**
Open the Genkit Developer UI URL in your browser and navigate to the
**Traces** tab to view the traces.
#### Use Jaeger
You can view traces in the Jaeger UI for local development.
1. **Start the telemetry collector:**
Run the following command in your terminal to download and start Jaeger and
an OTel collector:
```bash
npm run telemetry -- --target=local
```
This command configures your workspace for local telemetry and provides a
link to the Jaeger UI (usually `http://localhost:16686`).
- **Collector logs:** `~/.gemini/tmp//otel/collector.log`
2. **Run Gemini CLI:**
In a separate terminal, run your Gemini CLI command:
```bash
gemini
```
3. **View the traces:**
After running your command, open the Jaeger UI link in your browser to view
the traces.
#### Use Google Cloud
You can use an OpenTelemetry collector to forward telemetry data to Google Cloud
Trace for custom processing or routing.
> [!WARNING]
> Ensure you complete the
> [Google Cloud telemetry prerequisites](./cli/telemetry.md#prerequisites)
> (Project ID, authentication, IAM roles, and APIs) before using this method.
1. **Configure `.gemini/settings.json`:**
```json
{
"telemetry": {
"enabled": true,
"target": "gcp",
"useCollector": true
}
}
```
2. **Start the telemetry collector:**
Run the following command to start a local OTel collector that forwards to
Google Cloud:
```bash
npm run telemetry -- --target=gcp
```
The script outputs links to view traces, metrics, and logs in the Google
Cloud Console.
- **Collector logs:** `~/.gemini/tmp//otel/collector-gcp.log`
3. **Run Gemini CLI:**
In a separate terminal, run your Gemini CLI command:
```bash
gemini
```
4. **View logs, metrics, and traces:**
After sending prompts, view your data in the Google Cloud Console. See the
[telemetry documentation](./cli/telemetry.md#view-google-cloud-telemetry)
for links to Logs, Metrics, and Trace explorers.
For more detailed information on telemetry, see the
[telemetry documentation](./cli/telemetry.md).
### Instrument code with traces
You can add traces to your own code for more detailed instrumentation.
Adding traces helps you debug and understand the flow of execution. Use the
`runInDevTraceSpan` function to wrap any section of code in a trace span.
Here is a basic example:
```typescript
import { runInDevTraceSpan } from '@google/gemini-cli-core';
import { GeminiCliOperation } from '@google/gemini-cli-core/lib/telemetry/constants.js';
await runInDevTraceSpan(
{
operation: GeminiCliOperation.ToolCall,
attributes: {
[GEN_AI_AGENT_NAME]: 'gemini-cli',
},
},
async ({ metadata }) => {
// metadata allows you to record the input and output of the
// operation as well as other attributes.
metadata.input = { key: 'value' };
// Set custom attributes.
metadata.attributes['custom.attribute'] = 'custom.value';
// Your code to be traced goes here.
try {
const output = await somethingRisky();
metadata.output = output;
return output;
} catch (e) {
metadata.error = e;
throw e;
}
},
);
```
In this example:
- `operation`: The operation type of the span, represented by the
`GeminiCliOperation` enum.
- `metadata.input`: (Optional) An object containing the input data for the
traced operation.
- `metadata.output`: (Optional) An object containing the output data from the
traced operation.
- `metadata.attributes`: (Optional) A record of custom attributes to add to the
span.
- `metadata.error`: (Optional) An error object to record if the operation fails.
================================================
FILE: docs/mermaid/context.mmd
================================================
graph LR
%% --- Style Definitions ---
classDef new fill:#98fb98,color:#000
classDef changed fill:#add8e6,color:#000
classDef unchanged fill:#f0f0f0,color:#000
%% --- Subgraphs ---
subgraph "Context Providers"
direction TB
A["gemini.tsx"]
B["AppContainer.tsx"]
end
subgraph "Contexts"
direction TB
CtxSession["SessionContext"]
CtxVim["VimModeContext"]
CtxSettings["SettingsContext"]
CtxApp["AppContext"]
CtxConfig["ConfigContext"]
CtxUIState["UIStateContext"]
CtxUIActions["UIActionsContext"]
end
subgraph "Component Consumers"
direction TB
ConsumerApp["App"]
ConsumerAppContainer["AppContainer"]
ConsumerAppHeader["AppHeader"]
ConsumerDialogManager["DialogManager"]
ConsumerHistoryItem["HistoryItemDisplay"]
ConsumerComposer["Composer"]
ConsumerMainContent["MainContent"]
ConsumerNotifications["Notifications"]
end
%% --- Provider -> Context Connections ---
A -.-> CtxSession
A -.-> CtxVim
A -.-> CtxSettings
B -.-> CtxApp
B -.-> CtxConfig
B -.-> CtxUIState
B -.-> CtxUIActions
B -.-> CtxSettings
%% --- Context -> Consumer Connections ---
CtxSession -.-> ConsumerAppContainer
CtxSession -.-> ConsumerApp
CtxVim -.-> ConsumerAppContainer
CtxVim -.-> ConsumerComposer
CtxVim -.-> ConsumerApp
CtxSettings -.-> ConsumerAppContainer
CtxSettings -.-> ConsumerAppHeader
CtxSettings -.-> ConsumerDialogManager
CtxSettings -.-> ConsumerApp
CtxApp -.-> ConsumerAppHeader
CtxApp -.-> ConsumerNotifications
CtxConfig -.-> ConsumerAppHeader
CtxConfig -.-> ConsumerHistoryItem
CtxConfig -.-> ConsumerComposer
CtxConfig -.-> ConsumerDialogManager
CtxUIState -.-> ConsumerApp
CtxUIState -.-> ConsumerMainContent
CtxUIState -.-> ConsumerComposer
CtxUIState -.-> ConsumerDialogManager
CtxUIActions -.-> ConsumerComposer
CtxUIActions -.-> ConsumerDialogManager
%% --- Apply Styles ---
%% New Elements (Green)
class B,CtxApp,CtxConfig,CtxUIState,CtxUIActions,ConsumerAppHeader,ConsumerDialogManager,ConsumerComposer,ConsumerMainContent,ConsumerNotifications new
%% Heavily Changed Elements (Blue)
class A,ConsumerApp,ConsumerAppContainer,ConsumerHistoryItem changed
%% Mostly Unchanged Elements (Gray)
class CtxSession,CtxVim,CtxSettings unchanged
%% --- Link Styles ---
%% CtxSession (Red)
linkStyle 0,8,9 stroke:#e57373,stroke-width:2px
%% CtxVim (Orange)
linkStyle 1,10,11,12 stroke:#ffb74d,stroke-width:2px
%% CtxSettings (Yellow)
linkStyle 2,7,13,14,15,16 stroke:#fff176,stroke-width:2px
%% CtxApp (Green)
linkStyle 3,17,18 stroke:#81c784,stroke-width:2px
%% CtxConfig (Blue)
linkStyle 4,19,20,21,22 stroke:#64b5f6,stroke-width:2px
%% CtxUIState (Indigo)
linkStyle 5,23,24,25,26 stroke:#7986cb,stroke-width:2px
%% CtxUIActions (Violet)
linkStyle 6,27,28 stroke:#ba68c8,stroke-width:2px
================================================
FILE: docs/mermaid/render-path.mmd
================================================
graph TD
%% --- Style Definitions ---
classDef new fill:#98fb98,color:#000
classDef changed fill:#add8e6,color:#000
classDef unchanged fill:#f0f0f0,color:#000
classDef dispatcher fill:#f9e79f,color:#000,stroke:#333,stroke-width:1px
classDef container fill:#f5f5f5,color:#000,stroke:#ccc
%% --- Component Tree ---
subgraph "Entry Point"
A["gemini.tsx"]
end
subgraph "State & Logic Wrapper"
B["AppContainer.tsx"]
end
subgraph "Primary Layout"
C["App.tsx"]
end
A -.-> B
B -.-> C
subgraph "UI Containers"
direction LR
C -.-> D["MainContent"]
C -.-> G["Composer"]
C -.-> F["DialogManager"]
C -.-> E["Notifications"]
end
subgraph "MainContent"
direction TB
D -.-> H["AppHeader"]
D -.-> I["HistoryItemDisplay"]:::dispatcher
D -.-> L["ShowMoreLines"]
end
subgraph "Composer"
direction TB
G -.-> K_Prompt["InputPrompt"]
G -.-> K_Footer["Footer"]
end
subgraph "DialogManager"
F -.-> J["Various Dialogs (Auth, Theme, Settings, etc.)"]
end
%% --- Apply Styles ---
class B,D,E,F,G,H,J,K_Prompt,L new
class A,C,I changed
class K_Footer unchanged
%% --- Link Styles ---
%% MainContent Branch (Blue)
linkStyle 2,6,7,8 stroke:#64b5f6,stroke-width:2px
%% Composer Branch (Green)
linkStyle 3,9,10 stroke:#81c784,stroke-width:2px
%% DialogManager Branch (Orange)
linkStyle 4,11 stroke:#ffb74d,stroke-width:2px
%% Notifications Branch (Violet)
linkStyle 5 stroke:#ba68c8,stroke-width:2px
================================================
FILE: docs/npm.md
================================================
# Package overview
This monorepo contains two main packages: `@google/gemini-cli` and
`@google/gemini-cli-core`.
## `@google/gemini-cli`
This is the main package for the Gemini CLI. It is responsible for the user
interface, command parsing, and all other user-facing functionality.
When this package is published, it is bundled into a single executable file.
This bundle includes all of the package's dependencies, including
`@google/gemini-cli-core`. This means that whether a user installs the package
with `npm install -g @google/gemini-cli` or runs it directly with
`npx @google/gemini-cli`, they are using this single, self-contained executable.
## `@google/gemini-cli-core`
This package contains the core logic for interacting with the Gemini API. It is
responsible for making API requests, handling authentication, and managing the
local cache.
This package is not bundled. When it is published, it is published as a standard
Node.js package with its own dependencies. This allows it to be used as a
standalone package in other projects, if needed. All transpiled js code in the
`dist` folder is included in the package.
## NPM workspaces
This project uses
[NPM Workspaces](https://docs.npmjs.com/cli/v10/using-npm/workspaces) to manage
the packages within this monorepo. This simplifies development by allowing us to
manage dependencies and run scripts across multiple packages from the root of
the project.
### How it works
The root `package.json` file defines the workspaces for this project:
```json
{
"workspaces": ["packages/*"]
}
```
This tells NPM that any folder inside the `packages` directory is a separate
package that should be managed as part of the workspace.
### Benefits of workspaces
- **Simplified dependency management**: Running `npm install` from the root of
the project will install all dependencies for all packages in the workspace
and link them together. This means you don't need to run `npm install` in each
package's directory.
- **Automatic linking**: Packages within the workspace can depend on each other.
When you run `npm install`, NPM will automatically create symlinks between the
packages. This means that when you make changes to one package, the changes
are immediately available to other packages that depend on it.
- **Simplified script execution**: You can run scripts in any package from the
root of the project using the `--workspace` flag. For example, to run the
`build` script in the `cli` package, you can run
`npm run build --workspace @google/gemini-cli`.
================================================
FILE: docs/redirects.json
================================================
{
"/docs/architecture": "/docs/cli/index",
"/docs/cli/commands": "/docs/reference/commands",
"/docs/cli": "/docs",
"/docs/cli/index": "/docs",
"/docs/cli/keyboard-shortcuts": "/docs/reference/keyboard-shortcuts",
"/docs/cli/uninstall": "/docs/resources/uninstall",
"/docs/core/concepts": "/docs",
"/docs/core/memport": "/docs/reference/memport",
"/docs/core/policy-engine": "/docs/reference/policy-engine",
"/docs/core/tools-api": "/docs/reference/tools",
"/docs/reference/tools-api": "/docs/reference/tools",
"/docs/faq": "/docs/resources/faq",
"/docs/get-started/configuration": "/docs/reference/configuration",
"/docs/get-started/configuration-v1": "/docs/reference/configuration",
"/docs/index": "/docs",
"/docs/quota-and-pricing": "/docs/resources/quota-and-pricing",
"/docs/tos-privacy": "/docs/resources/tos-privacy",
"/docs/troubleshooting": "/docs/resources/troubleshooting"
}
================================================
FILE: docs/reference/commands.md
================================================
# CLI commands
Gemini CLI supports several built-in commands to help you manage your session,
customize the interface, and control its behavior. These commands are prefixed
with a forward slash (`/`), an at symbol (`@`), or an exclamation mark (`!`).
## Slash commands (`/`)
Slash commands provide meta-level control over the CLI itself.
### Built-in Commands
### `/about`
- **Description:** Show version info. Share this information when filing issues.
### `/agents`
- **Description:** Manage local and remote subagents.
- **Note:** This command is experimental and requires
`experimental.enableAgents: true` in your `settings.json`.
- **Sub-commands:**
- **`list`**:
- **Description:** Lists all discovered agents, including built-in, local,
and remote agents.
- **Usage:** `/agents list`
- **`reload`** (alias: `refresh`):
- **Description:** Rescans agent directories (`~/.gemini/agents` and
`.gemini/agents`) and reloads the registry.
- **Usage:** `/agents reload`
- **`enable`**:
- **Description:** Enables a specific subagent.
- **Usage:** `/agents enable `
- **`disable`**:
- **Description:** Disables a specific subagent.
- **Usage:** `/agents disable `
- **`config`**:
- **Description:** Opens a configuration dialog for the specified agent to
adjust its model, temperature, or execution limits.
- **Usage:** `/agents config `
### `/auth`
- **Description:** Open a dialog that lets you change the authentication method.
### `/bug`
- **Description:** File an issue about Gemini CLI. By default, the issue is
filed within the GitHub repository for Gemini CLI. The string you enter after
`/bug` will become the headline for the bug being filed. The default `/bug`
behavior can be modified using the `advanced.bugCommand` setting in your
`.gemini/settings.json` files.
### `/chat`
- **Description:** Alias for `/resume`. Both commands now expose the same
session browser action and checkpoint subcommands.
- **Menu layout when typing `/chat` (or `/resume`)**:
- `-- auto --`
- `list` (selecting this opens the auto-saved session browser)
- `-- checkpoints --`
- `list`, `save`, `resume`, `delete`, `share` (manual tagged checkpoints)
- Unique prefixes (for example `/cha` or `/resu`) resolve to the same grouped
menu.
- **Sub-commands:**
- **`debug`**
- **Description:** Export the most recent API request as a JSON payload.
- **`delete `**
- **Description:** Deletes a saved conversation checkpoint.
- **Equivalent:** `/resume delete `
- **`list`**
- **Description:** Lists available tags for manually saved checkpoints.
- **Note:** This command only lists chats saved within the current project.
Because chat history is project-scoped, chats saved in other project
directories will not be displayed.
- **Equivalent:** `/resume list`
- **`resume `**
- **Description:** Resumes a conversation from a previous save.
- **Note:** You can only resume chats that were saved within the current
project. To resume a chat from a different project, you must run the
Gemini CLI from that project's directory.
- **Equivalent:** `/resume resume `
- **`save `**
- **Description:** Saves the current conversation history. You must add a
`` for identifying the conversation state.
- **Details on checkpoint location:** The default locations for saved chat
checkpoints are:
- Linux/macOS: `~/.gemini/tmp//`
- Windows: `C:\Users\\.gemini\tmp\\`
- **Behavior:** Chats are saved into a project-specific directory,
determined by where you run the CLI. Consequently, saved chats are only
accessible when working within that same project.
- **Note:** These checkpoints are for manually saving and resuming
conversation states. For automatic checkpoints created before file
modifications, see the
[Checkpointing documentation](../cli/checkpointing.md).
- **Equivalent:** `/resume save `
- **`share [filename]`**
- **Description:** Writes the current conversation to a provided Markdown or
JSON file. If no filename is provided, then the CLI will generate one.
- **Usage:** `/chat share file.md` or `/chat share file.json`.
- **Equivalent:** `/resume share [filename]`
### `/clear`
- **Description:** Clear the terminal screen, including the visible session
history and scrollback within the CLI. The underlying session data (for
history recall) might be preserved depending on the exact implementation, but
the visual display is cleared.
- **Keyboard shortcut:** Press **Ctrl+L** at any time to perform a clear action.
### `/commands`
- **Description:** Manage custom slash commands loaded from `.toml` files.
- **Sub-commands:**
- **`reload`**:
- **Description:** Reload custom command definitions from all sources
(user-level `~/.gemini/commands/`, project-level
`/.gemini/commands/`, MCP prompts, and extensions). Use this to
pick up new or modified `.toml` files without restarting the CLI.
- **Usage:** `/commands reload`
### `/compress`
- **Description:** Replace the entire chat context with a summary. This saves on
tokens used for future tasks while retaining a high level summary of what has
happened.
### `/copy`
- **Description:** Copies the last output produced by Gemini CLI to your
clipboard, for easy sharing or reuse.
- **Behavior:**
- Local sessions use system clipboard tools (pbcopy/xclip/clip).
- Remote sessions (SSH/WSL) use OSC 52 and require terminal support.
- **Note:** This command requires platform-specific clipboard tools to be
installed.
- On Linux, it requires `xclip` or `xsel`. You can typically install them
using your system's package manager.
- On macOS, it requires `pbcopy`, and on Windows, it requires `clip`. These
tools are typically pre-installed on their respective systems.
### `/directory` (or `/dir`)
- **Description:** Manage workspace directories for multi-directory support.
- **Sub-commands:**
- **`add`**:
- **Description:** Add a directory to the workspace. The path can be
absolute or relative to the current working directory. Moreover, the
reference from home directory is supported as well.
- **Usage:** `/directory add ,`
- **Note:** Disabled in restrictive sandbox profiles. If you're using that,
use `--include-directories` when starting the session instead.
- **`show`**:
- **Description:** Display all directories added by `/directory add` and
`--include-directories`.
- **Usage:** `/directory show`
### `/docs`
- **Description:** Open the Gemini CLI documentation in your browser.
### `/editor`
- **Description:** Open a dialog for selecting supported editors.
### `/extensions`
- **Description:** Manage extensions. See
[Gemini CLI Extensions](../extensions/index.md).
- **Sub-commands:**
- **`config`**:
- **Description:** Configure extension settings.
- **`disable`**:
- **Description:** Disable an extension.
- **`enable`**:
- **Description:** Enable an extension.
- **`explore`**:
- **Description:** Open extensions page in your browser.
- **`install`**:
- **Description:** Install an extension from a git repo or local path.
- **`link`**:
- **Description:** Link an extension from a local path.
- **`list`**:
- **Description:** List active extensions.
- **`restart`**:
- **Description:** Restart all extensions.
- **`uninstall`**:
- **Description:** Uninstall an extension.
- **`update`**:
- **Description:** Update extensions. Usage: update |--all
### `/help` (or `/?`)
- **Description:** Display help information about Gemini CLI, including
available commands and their usage.
### `/hooks`
- **Description:** Manage hooks, which allow you to intercept and customize
Gemini CLI behavior at specific lifecycle events.
- **Sub-commands:**
- **`disable-all`**:
- **Description:** Disable all enabled hooks.
- **`disable `**:
- **Description:** Disable a hook by name.
- **`enable-all`**:
- **Description:** Enable all disabled hooks.
- **`enable `**:
- **Description:** Enable a hook by name.
- **`list`** (or `show`, `panel`):
- **Description:** Display all registered hooks with their status.
### `/ide`
- **Description:** Manage IDE integration.
- **Sub-commands:**
- **`disable`**:
- **Description:** Disable IDE integration.
- **`enable`**:
- **Description:** Enable IDE integration.
- **`install`**:
- **Description:** Install required IDE companion.
- **`status`**:
- **Description:** Check status of IDE integration.
### `/init`
- **Description:** To help users easily create a `GEMINI.md` file, this command
analyzes the current directory and generates a tailored context file, making
it simpler for them to provide project-specific instructions to the Gemini
agent.
### `/mcp`
- **Description:** Manage configured Model Context Protocol (MCP) servers.
- **Sub-commands:**
- **`auth`**:
- **Description:** Authenticate with an OAuth-enabled MCP server.
- **Usage:** `/mcp auth `
- **Details:** If `` is provided, it initiates the OAuth flow
for that server. If no server name is provided, it lists all configured
servers that support OAuth authentication.
- **`desc`**
- **Description:** List configured MCP servers and tools with descriptions.
- **`disable`**
- **Description:** Disable an MCP server.
- **`enable`**
- **Description:** Enable a disabled MCP server.
- **`list`** or **`ls`**:
- **Description:** List configured MCP servers and tools. This is the
default action if no subcommand is specified.
- **`refresh`**:
- **Description:** Restarts all MCP servers and re-discovers their available
tools.
- **`schema`**:
- **Description:** List configured MCP servers and tools with descriptions
and schemas.
### `/memory`
- **Description:** Manage the AI's instructional context (hierarchical memory
loaded from `GEMINI.md` files).
- **Sub-commands:**
- **`add`**:
- **Description:** Adds the following text to the AI's memory. Usage:
`/memory add `
- **`list`**:
- **Description:** Lists the paths of the GEMINI.md files in use for
hierarchical memory.
- **`refresh`**:
- **Description:** Reload the hierarchical instructional memory from all
`GEMINI.md` files found in the configured locations (global,
project/ancestors, and sub-directories). This command updates the model
with the latest `GEMINI.md` content.
- **`show`**:
- **Description:** Display the full, concatenated content of the current
hierarchical memory that has been loaded from all `GEMINI.md` files. This
lets you inspect the instructional context being provided to the Gemini
model.
- **Note:** For more details on how `GEMINI.md` files contribute to
hierarchical memory, see the
[CLI Configuration documentation](./configuration.md).
### `/model`
- **Description:** Manage model configuration.
- **Sub-commands:**
- **`manage`**:
- **Description:** Opens a dialog to configure the model.
- **`set`**:
- **Description:** Set the model to use.
- **Usage:** `/model set [--persist]`
### `/permissions`
- **Description:** Manage folder trust settings and other permissions.
- **Sub-commands:**
- **`trust`**:
- **Description:** Manage folder trust settings.
- **Usage:** `/permissions trust []`
### `/plan`
- **Description:** Switch to Plan Mode (read-only) and view the current plan if
one has been generated.
- **Note:** This feature is enabled by default. It can be disabled via the
`experimental.plan` setting in your configuration.
- **Sub-commands:**
- **`copy`**:
- **Description:** Copy the currently approved plan to your clipboard.
### `/policies`
- **Description:** Manage policies.
- **Sub-commands:**
- **`list`**:
- **Description:** List all active policies grouped by mode.
### `/privacy`
- **Description:** Display the Privacy Notice and allow users to select whether
they consent to the collection of their data for service improvement purposes.
### `/quit` (or `/exit`)
- **Description:** Exit Gemini CLI.
### `/restore`
- **Description:** Restores the project files to the state they were in just
before a tool was executed. This is particularly useful for undoing file edits
made by a tool. If run without a tool call ID, it will list available
checkpoints to restore from.
- **Usage:** `/restore [tool_call_id]`
- **Note:** Only available if checkpointing is configured via
[settings](./configuration.md). See
[Checkpointing documentation](../cli/checkpointing.md) for more details.
### `/rewind`
- **Description:** Navigates backward through the conversation history, letting
you review past interactions and potentially revert both chat state and file
changes.
- **Usage:** Press **Esc** twice as a shortcut.
- **Features:**
- **Select Interaction:** Preview user prompts and file changes.
- **Action Selection:** Choose to rewind history only, revert code changes
only, or both.
### `/resume`
- **Description:** Browse and resume previous conversation sessions, and manage
manual chat checkpoints.
- **Features:**
- **Auto sessions:** Run `/resume` to open the interactive session browser for
automatically saved conversations.
- **Chat checkpoints:** Use checkpoint subcommands directly (`/resume save`,
`/resume resume`, etc.).
- **Management:** Delete unwanted sessions directly from the browser
- **Resume:** Select any session to resume and continue the conversation
- **Search:** Use `/` to search through conversation content across all
sessions
- **Session Browser:** Interactive interface showing all saved sessions with
timestamps, message counts, and first user message for context
- **Sorting:** Sort sessions by date or message count
- **Note:** All conversations are automatically saved as you chat - no manual
saving required. See [Session Management](../cli/session-management.md) for
complete details.
- **Alias:** `/chat` provides the same behavior and subcommands.
- **Sub-commands:**
- **`list`**
- **Description:** Lists available tags for manual chat checkpoints.
- **`save `**
- **Description:** Saves the current conversation as a tagged checkpoint.
- **`resume `** (alias: `load`)
- **Description:** Loads a previously saved tagged checkpoint.
- **`delete `**
- **Description:** Deletes a tagged checkpoint.
- **`share [filename]`**
- **Description:** Exports the current conversation to Markdown or JSON.
- **`debug`**
- **Description:** Export the most recent API request as JSON payload
(nightly builds).
- **Compatibility alias:** `/resume checkpoints ...` is still accepted for the
same checkpoint commands.
### `/settings`
- **Description:** Open the settings editor to view and modify Gemini CLI
settings.
- **Details:** This command provides a user-friendly interface for changing
settings that control the behavior and appearance of Gemini CLI. It is
equivalent to manually editing the `.gemini/settings.json` file, but with
validation and guidance to prevent errors. See the
[settings documentation](../cli/settings.md) for a full list of available
settings.
- **Usage:** Simply run `/settings` and the editor will open. You can then
browse or search for specific settings, view their current values, and modify
them as desired. Changes to some settings are applied immediately, while
others require a restart.
### `/shells` (or `/bashes`)
- **Description:** Toggle the background shells view. This allows you to view
and manage long-running processes that you've sent to the background.
### `/setup-github`
- **Description:** Set up GitHub Actions to triage issues and review PRs with
Gemini.
### `/skills`
- **Description:** Manage Agent Skills, which provide on-demand expertise and
specialized workflows.
- **Sub-commands:**
- **`disable `**:
- **Description:** Disable a specific skill by name.
- **Usage:** `/skills disable `
- **`enable `**:
- **Description:** Enable a specific skill by name.
- **Usage:** `/skills enable `
- **`list`**:
- **Description:** List all discovered skills and their current status
(enabled/disabled).
- **`reload`**:
- **Description:** Refresh the list of discovered skills from all tiers
(workspace, user, and extensions).
### `/stats`
- **Description:** Display detailed statistics for the current Gemini CLI
session.
- **Sub-commands:**
- **`session`**:
- **Description:** Show session-specific usage statistics, including
duration, tool calls, and performance metrics. This is the default view.
- **`model`**:
- **Description:** Show model-specific usage statistics, including token
counts and quota information.
- **`tools`**:
- **Description:** Show tool-specific usage statistics.
### `/terminal-setup`
- **Description:** Configure terminal keybindings for multiline input (VS Code,
Cursor, Windsurf).
### `/theme`
- **Description:** Open a dialog that lets you change the visual theme of Gemini
CLI.
### `/tools`
- **Description:** Display a list of tools that are currently available within
Gemini CLI.
- **Usage:** `/tools [desc]`
- **Sub-commands:**
- **`desc`** or **`descriptions`**:
- **Description:** Show detailed descriptions of each tool, including each
tool's name with its full description as provided to the model.
- **`nodesc`** or **`nodescriptions`**:
- **Description:** Hide tool descriptions, showing only the tool names.
### `/upgrade`
- **Description:** Open the Gemini Code Assist upgrade page in your browser.
This lets you upgrade your tier for higher usage limits.
- **Note:** This command is only available when logged in with Google.
### `/vim`
- **Description:** Toggle vim mode on or off. When vim mode is enabled, the
input area supports vim-style navigation and editing commands in both NORMAL
and INSERT modes.
- **Features:**
- **Count support:** Prefix commands with numbers (e.g., `3h`, `5w`, `10G`)
- **Editing commands:** Delete with `x`, change with `c`, insert with `i`,
`a`, `o`, `O`; complex operations like `dd`, `cc`, `dw`, `cw`
- **INSERT mode:** Standard text input with escape to return to NORMAL mode
- **NORMAL mode:** Navigate with `h`, `j`, `k`, `l`; jump by words with `w`,
`b`, `e`; go to line start/end with `0`, `$`, `^`; go to specific lines with
`G` (or `gg` for first line)
- **Persistent setting:** Vim mode preference is saved to
`~/.gemini/settings.json` and restored between sessions
- **Repeat last command:** Use `.` to repeat the last editing operation
- **Status indicator:** When enabled, shows `[NORMAL]` or `[INSERT]` in the
footer
### Custom commands
Custom commands allow you to create personalized shortcuts for your most-used
prompts. For detailed instructions on how to create, manage, and use them,
please see the dedicated
[Custom Commands documentation](../cli/custom-commands.md).
## Input prompt shortcuts
These shortcuts apply directly to the input prompt for text manipulation.
- **Undo:**
- **Keyboard shortcut:** Press **Alt+z** or **Cmd+z** to undo the last action
in the input prompt.
- **Redo:**
- **Keyboard shortcut:** Press **Shift+Alt+Z** or **Shift+Cmd+Z** to redo the
last undone action in the input prompt.
## At commands (`@`)
At commands are used to include the content of files or directories as part of
your prompt to Gemini. These commands include git-aware filtering.
- **`@`**
- **Description:** Inject the content of the specified file or files into your
current prompt. This is useful for asking questions about specific code,
text, or collections of files.
- **Examples:**
- `@path/to/your/file.txt Explain this text.`
- `@src/my_project/ Summarize the code in this directory.`
- `What is this file about? @README.md`
- **Details:**
- If a path to a single file is provided, the content of that file is read.
- If a path to a directory is provided, the command attempts to read the
content of files within that directory and any subdirectories.
- Spaces in paths should be escaped with a backslash (e.g.,
`@My\ Documents/file.txt`).
- The command uses the `read_many_files` tool internally. The content is
fetched and then inserted into your query before being sent to the Gemini
model.
- **Git-aware filtering:** By default, git-ignored files (like
`node_modules/`, `dist/`, `.env`, `.git/`) are excluded. This behavior can
be changed via the `context.fileFiltering` settings.
- **File types:** The command is intended for text-based files. While it
might attempt to read any file, binary files or very large files might be
skipped or truncated by the underlying `read_many_files` tool to ensure
performance and relevance. The tool indicates if files were skipped.
- **Output:** The CLI will show a tool call message indicating that
`read_many_files` was used, along with a message detailing the status and
the path(s) that were processed.
- **`@` (Lone at symbol)**
- **Description:** If you type a lone `@` symbol without a path, the query is
passed as-is to the Gemini model. This might be useful if you are
specifically talking _about_ the `@` symbol in your prompt.
### Error handling for `@` commands
- If the path specified after `@` is not found or is invalid, an error message
will be displayed, and the query might not be sent to the Gemini model, or it
will be sent without the file content.
- If the `read_many_files` tool encounters an error (e.g., permission issues),
this will also be reported.
## Shell mode and passthrough commands (`!`)
The `!` prefix lets you interact with your system's shell directly from within
Gemini CLI.
- **`!`**
- **Description:** Execute the given `` using `bash` on
Linux/macOS or `powershell.exe -NoProfile -Command` on Windows (unless you
override `ComSpec`). Any output or errors from the command are displayed in
the terminal.
- **Examples:**
- `!ls -la` (executes `ls -la` and returns to Gemini CLI)
- `!git status` (executes `git status` and returns to Gemini CLI)
- **`!` (Toggle shell mode)**
- **Description:** Typing `!` on its own toggles shell mode.
- **Entering shell mode:**
- When active, shell mode uses a different coloring and a "Shell Mode
Indicator".
- While in shell mode, text you type is interpreted directly as a shell
command.
- **Exiting shell mode:**
- When exited, the UI reverts to its standard appearance and normal Gemini
CLI behavior resumes.
- **Caution for all `!` usage:** Commands you execute in shell mode have the
same permissions and impact as if you ran them directly in your terminal.
- **Environment variable:** When a command is executed via `!` or in shell mode,
the `GEMINI_CLI=1` environment variable is set in the subprocess's
environment. This allows scripts or tools to detect if they are being run from
within the Gemini CLI.
================================================
FILE: docs/reference/configuration.md
================================================
# Gemini CLI configuration
Gemini CLI offers several ways to configure its behavior, including environment
variables, command-line arguments, and settings files. This document outlines
the different configuration methods and available settings.
## Configuration layers
Configuration is applied in the following order of precedence (lower numbers are
overridden by higher numbers):
1. **Default values:** Hardcoded defaults within the application.
2. **System defaults file:** System-wide default settings that can be
overridden by other settings files.
3. **User settings file:** Global settings for the current user.
4. **Project settings file:** Project-specific settings.
5. **System settings file:** System-wide settings that override all other
settings files.
6. **Environment variables:** System-wide or session-specific variables,
potentially loaded from `.env` files.
7. **Command-line arguments:** Values passed when launching the CLI.
## Settings files
Gemini CLI uses JSON settings files for persistent configuration. There are four
locations for these files:
> [!TIP]
> JSON-aware editors can use autocomplete and validation by pointing to
> the generated schema at `schemas/settings.schema.json` in this repository.
> When working outside the repo, reference the hosted schema at
> `https://raw.githubusercontent.com/google-gemini/gemini-cli/main/schemas/settings.schema.json`.
- **System defaults file:**
- **Location:** `/etc/gemini-cli/system-defaults.json` (Linux),
`C:\ProgramData\gemini-cli\system-defaults.json` (Windows) or
`/Library/Application Support/GeminiCli/system-defaults.json` (macOS). The
path can be overridden using the `GEMINI_CLI_SYSTEM_DEFAULTS_PATH`
environment variable.
- **Scope:** Provides a base layer of system-wide default settings. These
settings have the lowest precedence and are intended to be overridden by
user, project, or system override settings.
- **User settings file:**
- **Location:** `~/.gemini/settings.json` (where `~` is your home directory).
- **Scope:** Applies to all Gemini CLI sessions for the current user. User
settings override system defaults.
- **Project settings file:**
- **Location:** `.gemini/settings.json` within your project's root directory.
- **Scope:** Applies only when running Gemini CLI from that specific project.
Project settings override user settings and system defaults.
- **System settings file:**
- **Location:** `/etc/gemini-cli/settings.json` (Linux),
`C:\ProgramData\gemini-cli\settings.json` (Windows) or
`/Library/Application Support/GeminiCli/settings.json` (macOS). The path can
be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment
variable.
- **Scope:** Applies to all Gemini CLI sessions on the system, for all users.
System settings act as overrides, taking precedence over all other settings
files. May be useful for system administrators at enterprises to have
controls over users' Gemini CLI setups.
**Note on environment variables in settings:** String values within your
`settings.json` and `gemini-extension.json` files can reference environment
variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will
be automatically resolved when the settings are loaded. For example, if you have
an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like
this: `"apiKey": "$MY_API_TOKEN"`. Additionally, each extension can have its own
`.env` file in its directory, which will be loaded automatically.
**Note for Enterprise Users:** For guidance on deploying and managing Gemini CLI
in a corporate environment, please see the
[Enterprise Configuration](../cli/enterprise.md) documentation.
### The `.gemini` directory in your project
In addition to a project settings file, a project's `.gemini` directory can
contain other project-specific files related to Gemini CLI's operation, such as:
- [Custom sandbox profiles](#sandboxing) (e.g.,
`.gemini/sandbox-macos-custom.sb`, `.gemini/sandbox.Dockerfile`).
### Available settings in `settings.json`
Settings are organized into categories. All settings should be placed within
their corresponding top-level category object in your `settings.json` file.
#### `policyPaths`
- **`policyPaths`** (array):
- **Description:** Additional policy files or directories to load.
- **Default:** `[]`
- **Requires restart:** Yes
#### `adminPolicyPaths`
- **`adminPolicyPaths`** (array):
- **Description:** Additional admin policy files or directories to load.
- **Default:** `[]`
- **Requires restart:** Yes
#### `general`
- **`general.preferredEditor`** (string):
- **Description:** The preferred editor to open files in.
- **Default:** `undefined`
- **`general.vimMode`** (boolean):
- **Description:** Enable Vim keybindings
- **Default:** `false`
- **`general.defaultApprovalMode`** (enum):
- **Description:** The default approval mode for tool execution. 'default'
prompts for approval, 'auto_edit' auto-approves edit tools, and 'plan' is
read-only mode. YOLO mode (auto-approve all actions) can only be enabled via
command line (--yolo or --approval-mode=yolo).
- **Default:** `"default"`
- **Values:** `"default"`, `"auto_edit"`, `"plan"`
- **`general.devtools`** (boolean):
- **Description:** Enable DevTools inspector on launch.
- **Default:** `false`
- **`general.enableAutoUpdate`** (boolean):
- **Description:** Enable automatic updates.
- **Default:** `true`
- **`general.enableAutoUpdateNotification`** (boolean):
- **Description:** Enable update notification prompts.
- **Default:** `true`
- **`general.enableNotifications`** (boolean):
- **Description:** Enable run-event notifications for action-required prompts
and session completion. Currently macOS only.
- **Default:** `false`
- **`general.checkpointing.enabled`** (boolean):
- **Description:** Enable session checkpointing for recovery
- **Default:** `false`
- **Requires restart:** Yes
- **`general.plan.directory`** (string):
- **Description:** The directory where planning artifacts are stored. If not
specified, defaults to the system temporary directory.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`general.plan.modelRouting`** (boolean):
- **Description:** Automatically switch between Pro and Flash models based on
Plan Mode status. Uses Pro for the planning phase and Flash for the
implementation phase.
- **Default:** `true`
- **`general.retryFetchErrors`** (boolean):
- **Description:** Retry on "exception TypeError: fetch failed sending
request" errors.
- **Default:** `true`
- **`general.maxAttempts`** (number):
- **Description:** Maximum number of attempts for requests to the main chat
model. Cannot exceed 10.
- **Default:** `10`
- **`general.debugKeystrokeLogging`** (boolean):
- **Description:** Enable debug logging of keystrokes to the console.
- **Default:** `false`
- **`general.sessionRetention.enabled`** (boolean):
- **Description:** Enable automatic session cleanup
- **Default:** `true`
- **`general.sessionRetention.maxAge`** (string):
- **Description:** Automatically delete chats older than this time period
(e.g., "30d", "7d", "24h", "1w")
- **Default:** `"30d"`
- **`general.sessionRetention.maxCount`** (number):
- **Description:** Alternative: Maximum number of sessions to keep (most
recent)
- **Default:** `undefined`
- **`general.sessionRetention.minRetention`** (string):
- **Description:** Minimum retention period (safety limit, defaults to "1d")
- **Default:** `"1d"`
#### `output`
- **`output.format`** (enum):
- **Description:** The format of the CLI output. Can be `text` or `json`.
- **Default:** `"text"`
- **Values:** `"text"`, `"json"`
#### `ui`
- **`ui.theme`** (string):
- **Description:** The color theme for the UI. See the CLI themes guide for
available options.
- **Default:** `undefined`
- **`ui.autoThemeSwitching`** (boolean):
- **Description:** Automatically switch between default light and dark themes
based on terminal background color.
- **Default:** `true`
- **`ui.terminalBackgroundPollingInterval`** (number):
- **Description:** Interval in seconds to poll the terminal background color.
- **Default:** `60`
- **`ui.customThemes`** (object):
- **Description:** Custom theme definitions.
- **Default:** `{}`
- **`ui.hideWindowTitle`** (boolean):
- **Description:** Hide the window title bar
- **Default:** `false`
- **Requires restart:** Yes
- **`ui.inlineThinkingMode`** (enum):
- **Description:** Display model thinking inline: off or full.
- **Default:** `"off"`
- **Values:** `"off"`, `"full"`
- **`ui.showStatusInTitle`** (boolean):
- **Description:** Show Gemini CLI model thoughts in the terminal window title
during the working phase
- **Default:** `false`
- **`ui.dynamicWindowTitle`** (boolean):
- **Description:** Update the terminal window title with current status icons
(Ready: ◇, Action Required: ✋, Working: ✦)
- **Default:** `true`
- **`ui.showHomeDirectoryWarning`** (boolean):
- **Description:** Show a warning when running Gemini CLI in the home
directory.
- **Default:** `true`
- **Requires restart:** Yes
- **`ui.showCompatibilityWarnings`** (boolean):
- **Description:** Show warnings about terminal or OS compatibility issues.
- **Default:** `true`
- **Requires restart:** Yes
- **`ui.hideTips`** (boolean):
- **Description:** Hide helpful tips in the UI
- **Default:** `false`
- **`ui.escapePastedAtSymbols`** (boolean):
- **Description:** When enabled, @ symbols in pasted text are escaped to
prevent unintended @path expansion.
- **Default:** `false`
- **`ui.showShortcutsHint`** (boolean):
- **Description:** Show the "? for shortcuts" hint above the input.
- **Default:** `true`
- **`ui.hideBanner`** (boolean):
- **Description:** Hide the application banner
- **Default:** `false`
- **`ui.hideContextSummary`** (boolean):
- **Description:** Hide the context summary (GEMINI.md, MCP servers) above the
input.
- **Default:** `false`
- **`ui.footer.items`** (array):
- **Description:** List of item IDs to display in the footer. Rendered in
order
- **Default:** `undefined`
- **`ui.footer.showLabels`** (boolean):
- **Description:** Display a second line above the footer items with
descriptive headers (e.g., /model).
- **Default:** `true`
- **`ui.footer.hideCWD`** (boolean):
- **Description:** Hide the current working directory in the footer.
- **Default:** `false`
- **`ui.footer.hideSandboxStatus`** (boolean):
- **Description:** Hide the sandbox status indicator in the footer.
- **Default:** `false`
- **`ui.footer.hideModelInfo`** (boolean):
- **Description:** Hide the model name and context usage in the footer.
- **Default:** `false`
- **`ui.footer.hideContextPercentage`** (boolean):
- **Description:** Hides the context window usage percentage.
- **Default:** `true`
- **`ui.hideFooter`** (boolean):
- **Description:** Hide the footer from the UI
- **Default:** `false`
- **`ui.showMemoryUsage`** (boolean):
- **Description:** Display memory usage information in the UI
- **Default:** `false`
- **`ui.showLineNumbers`** (boolean):
- **Description:** Show line numbers in the chat.
- **Default:** `true`
- **`ui.showCitations`** (boolean):
- **Description:** Show citations for generated text in the chat.
- **Default:** `false`
- **`ui.showModelInfoInChat`** (boolean):
- **Description:** Show the model name in the chat for each model turn.
- **Default:** `false`
- **`ui.showUserIdentity`** (boolean):
- **Description:** Show the signed-in user's identity (e.g. email) in the UI.
- **Default:** `true`
- **`ui.useAlternateBuffer`** (boolean):
- **Description:** Use an alternate screen buffer for the UI, preserving shell
history.
- **Default:** `false`
- **Requires restart:** Yes
- **`ui.useBackgroundColor`** (boolean):
- **Description:** Whether to use background colors in the UI.
- **Default:** `true`
- **`ui.incrementalRendering`** (boolean):
- **Description:** Enable incremental rendering for the UI. This option will
reduce flickering but may cause rendering artifacts. Only supported when
useAlternateBuffer is enabled.
- **Default:** `true`
- **Requires restart:** Yes
- **`ui.showSpinner`** (boolean):
- **Description:** Show the spinner during operations.
- **Default:** `true`
- **`ui.loadingPhrases`** (enum):
- **Description:** What to show while the model is working: tips, witty
comments, both, or nothing.
- **Default:** `"tips"`
- **Values:** `"tips"`, `"witty"`, `"all"`, `"off"`
- **`ui.errorVerbosity`** (enum):
- **Description:** Controls whether recoverable errors are hidden (low) or
fully shown (full).
- **Default:** `"low"`
- **Values:** `"low"`, `"full"`
- **`ui.customWittyPhrases`** (array):
- **Description:** Custom witty phrases to display during loading. When
provided, the CLI cycles through these instead of the defaults.
- **Default:** `[]`
- **`ui.accessibility.enableLoadingPhrases`** (boolean):
- **Description:** @deprecated Use ui.loadingPhrases instead. Enable loading
phrases during operations.
- **Default:** `true`
- **Requires restart:** Yes
- **`ui.accessibility.screenReader`** (boolean):
- **Description:** Render output in plain-text to be more screen reader
accessible
- **Default:** `false`
- **Requires restart:** Yes
#### `ide`
- **`ide.enabled`** (boolean):
- **Description:** Enable IDE integration mode.
- **Default:** `false`
- **Requires restart:** Yes
- **`ide.hasSeenNudge`** (boolean):
- **Description:** Whether the user has seen the IDE integration nudge.
- **Default:** `false`
#### `privacy`
- **`privacy.usageStatisticsEnabled`** (boolean):
- **Description:** Enable collection of usage statistics
- **Default:** `true`
- **Requires restart:** Yes
#### `billing`
- **`billing.overageStrategy`** (enum):
- **Description:** How to handle quota exhaustion when AI credits are
available. 'ask' prompts each time, 'always' automatically uses credits,
'never' disables credit usage.
- **Default:** `"ask"`
- **Values:** `"ask"`, `"always"`, `"never"`
#### `model`
- **`model.name`** (string):
- **Description:** The Gemini model to use for conversations.
- **Default:** `undefined`
- **`model.maxSessionTurns`** (number):
- **Description:** Maximum number of user/model/tool turns to keep in a
session. -1 means unlimited.
- **Default:** `-1`
- **`model.summarizeToolOutput`** (object):
- **Description:** Enables or disables summarization of tool output. Configure
per-tool token budgets (for example {"run_shell_command": {"tokenBudget":
2000}}). Currently only the run_shell_command tool supports summarization.
- **Default:** `undefined`
- **`model.compressionThreshold`** (number):
- **Description:** The fraction of context usage at which to trigger context
compression (e.g. 0.2, 0.3).
- **Default:** `0.5`
- **Requires restart:** Yes
- **`model.disableLoopDetection`** (boolean):
- **Description:** Disable automatic detection and prevention of infinite
loops.
- **Default:** `false`
- **Requires restart:** Yes
- **`model.skipNextSpeakerCheck`** (boolean):
- **Description:** Skip the next speaker check.
- **Default:** `true`
#### `modelConfigs`
- **`modelConfigs.aliases`** (object):
- **Description:** Named presets for model configs. Can be used in place of a
model name and can inherit from other aliases using an `extends` property.
- **Default:**
```json
{
"base": {
"modelConfig": {
"generateContentConfig": {
"temperature": 0,
"topP": 1
}
}
},
"chat-base": {
"extends": "base",
"modelConfig": {
"generateContentConfig": {
"thinkingConfig": {
"includeThoughts": true
},
"temperature": 1,
"topP": 0.95,
"topK": 64
}
}
},
"chat-base-2.5": {
"extends": "chat-base",
"modelConfig": {
"generateContentConfig": {
"thinkingConfig": {
"thinkingBudget": 8192
}
}
}
},
"chat-base-3": {
"extends": "chat-base",
"modelConfig": {
"generateContentConfig": {
"thinkingConfig": {
"thinkingLevel": "HIGH"
}
}
}
},
"gemini-3-pro-preview": {
"extends": "chat-base-3",
"modelConfig": {
"model": "gemini-3-pro-preview"
}
},
"gemini-3-flash-preview": {
"extends": "chat-base-3",
"modelConfig": {
"model": "gemini-3-flash-preview"
}
},
"gemini-2.5-pro": {
"extends": "chat-base-2.5",
"modelConfig": {
"model": "gemini-2.5-pro"
}
},
"gemini-2.5-flash": {
"extends": "chat-base-2.5",
"modelConfig": {
"model": "gemini-2.5-flash"
}
},
"gemini-2.5-flash-lite": {
"extends": "chat-base-2.5",
"modelConfig": {
"model": "gemini-2.5-flash-lite"
}
},
"gemini-2.5-flash-base": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash"
}
},
"gemini-3-flash-base": {
"extends": "base",
"modelConfig": {
"model": "gemini-3-flash-preview"
}
},
"classifier": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash-lite",
"generateContentConfig": {
"maxOutputTokens": 1024,
"thinkingConfig": {
"thinkingBudget": 512
}
}
}
},
"prompt-completion": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash-lite",
"generateContentConfig": {
"temperature": 0.3,
"maxOutputTokens": 16000,
"thinkingConfig": {
"thinkingBudget": 0
}
}
}
},
"fast-ack-helper": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash-lite",
"generateContentConfig": {
"temperature": 0.2,
"maxOutputTokens": 120,
"thinkingConfig": {
"thinkingBudget": 0
}
}
}
},
"edit-corrector": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash-lite",
"generateContentConfig": {
"thinkingConfig": {
"thinkingBudget": 0
}
}
}
},
"summarizer-default": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash-lite",
"generateContentConfig": {
"maxOutputTokens": 2000
}
}
},
"summarizer-shell": {
"extends": "base",
"modelConfig": {
"model": "gemini-2.5-flash-lite",
"generateContentConfig": {
"maxOutputTokens": 2000
}
}
},
"web-search": {
"extends": "gemini-3-flash-base",
"modelConfig": {
"generateContentConfig": {
"tools": [
{
"googleSearch": {}
}
]
}
}
},
"web-fetch": {
"extends": "gemini-3-flash-base",
"modelConfig": {
"generateContentConfig": {
"tools": [
{
"urlContext": {}
}
]
}
}
},
"web-fetch-fallback": {
"extends": "gemini-3-flash-base",
"modelConfig": {}
},
"loop-detection": {
"extends": "gemini-3-flash-base",
"modelConfig": {}
},
"loop-detection-double-check": {
"extends": "base",
"modelConfig": {
"model": "gemini-3-pro-preview"
}
},
"llm-edit-fixer": {
"extends": "gemini-3-flash-base",
"modelConfig": {}
},
"next-speaker-checker": {
"extends": "gemini-3-flash-base",
"modelConfig": {}
},
"chat-compression-3-pro": {
"modelConfig": {
"model": "gemini-3-pro-preview"
}
},
"chat-compression-3-flash": {
"modelConfig": {
"model": "gemini-3-flash-preview"
}
},
"chat-compression-2.5-pro": {
"modelConfig": {
"model": "gemini-2.5-pro"
}
},
"chat-compression-2.5-flash": {
"modelConfig": {
"model": "gemini-2.5-flash"
}
},
"chat-compression-2.5-flash-lite": {
"modelConfig": {
"model": "gemini-2.5-flash-lite"
}
},
"chat-compression-default": {
"modelConfig": {
"model": "gemini-3-pro-preview"
}
}
}
```
- **`modelConfigs.customAliases`** (object):
- **Description:** Custom named presets for model configs. These are merged
with (and override) the built-in aliases.
- **Default:** `{}`
- **`modelConfigs.customOverrides`** (array):
- **Description:** Custom model config overrides. These are merged with (and
added to) the built-in overrides.
- **Default:** `[]`
- **`modelConfigs.overrides`** (array):
- **Description:** Apply specific configuration overrides based on matches,
with a primary key of model (or alias). The most specific match will be
used.
- **Default:** `[]`
- **`modelConfigs.modelDefinitions`** (object):
- **Description:** Registry of model metadata, including tier, family, and
features.
- **Default:**
```json
{
"gemini-3.1-flash-lite-preview": {
"tier": "flash-lite",
"family": "gemini-3",
"isPreview": true,
"isVisible": true,
"features": {
"thinking": false,
"multimodalToolUse": true
}
},
"gemini-3.1-pro-preview": {
"tier": "pro",
"family": "gemini-3",
"isPreview": true,
"isVisible": true,
"features": {
"thinking": true,
"multimodalToolUse": true
}
},
"gemini-3.1-pro-preview-customtools": {
"tier": "pro",
"family": "gemini-3",
"isPreview": true,
"isVisible": false,
"features": {
"thinking": true,
"multimodalToolUse": true
}
},
"gemini-3-pro-preview": {
"tier": "pro",
"family": "gemini-3",
"isPreview": true,
"isVisible": true,
"features": {
"thinking": true,
"multimodalToolUse": true
}
},
"gemini-3-flash-preview": {
"tier": "flash",
"family": "gemini-3",
"isPreview": true,
"isVisible": true,
"features": {
"thinking": false,
"multimodalToolUse": true
}
},
"gemini-2.5-pro": {
"tier": "pro",
"family": "gemini-2.5",
"isPreview": false,
"isVisible": true,
"features": {
"thinking": false,
"multimodalToolUse": false
}
},
"gemini-2.5-flash": {
"tier": "flash",
"family": "gemini-2.5",
"isPreview": false,
"isVisible": true,
"features": {
"thinking": false,
"multimodalToolUse": false
}
},
"gemini-2.5-flash-lite": {
"tier": "flash-lite",
"family": "gemini-2.5",
"isPreview": false,
"isVisible": true,
"features": {
"thinking": false,
"multimodalToolUse": false
}
},
"auto": {
"tier": "auto",
"isPreview": true,
"isVisible": false,
"features": {
"thinking": true,
"multimodalToolUse": false
}
},
"pro": {
"tier": "pro",
"isPreview": false,
"isVisible": false,
"features": {
"thinking": true,
"multimodalToolUse": false
}
},
"flash": {
"tier": "flash",
"isPreview": false,
"isVisible": false,
"features": {
"thinking": false,
"multimodalToolUse": false
}
},
"flash-lite": {
"tier": "flash-lite",
"isPreview": false,
"isVisible": false,
"features": {
"thinking": false,
"multimodalToolUse": false
}
},
"auto-gemini-3": {
"displayName": "Auto (Gemini 3)",
"tier": "auto",
"isPreview": true,
"isVisible": true,
"dialogDescription": "Let Gemini CLI decide the best model for the task: gemini-3-pro, gemini-3-flash",
"features": {
"thinking": true,
"multimodalToolUse": false
}
},
"auto-gemini-2.5": {
"displayName": "Auto (Gemini 2.5)",
"tier": "auto",
"isPreview": false,
"isVisible": true,
"dialogDescription": "Let Gemini CLI decide the best model for the task: gemini-2.5-pro, gemini-2.5-flash",
"features": {
"thinking": false,
"multimodalToolUse": false
}
}
}
```
- **Requires restart:** Yes
- **`modelConfigs.modelIdResolutions`** (object):
- **Description:** Rules for resolving requested model names to concrete model
IDs based on context.
- **Default:**
```json
{
"gemini-3.1-pro-preview": {
"default": "gemini-3.1-pro-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-pro"
}
]
},
"gemini-3.1-pro-preview-customtools": {
"default": "gemini-3.1-pro-preview-customtools",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-pro"
}
]
},
"gemini-3-flash-preview": {
"default": "gemini-3-flash-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-flash"
}
]
},
"gemini-3-pro-preview": {
"default": "gemini-3-pro-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-pro"
},
{
"condition": {
"useGemini3_1": true,
"useCustomTools": true
},
"target": "gemini-3.1-pro-preview-customtools"
},
{
"condition": {
"useGemini3_1": true
},
"target": "gemini-3.1-pro-preview"
}
]
},
"auto-gemini-3": {
"default": "gemini-3-pro-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-pro"
},
{
"condition": {
"useGemini3_1": true,
"useCustomTools": true
},
"target": "gemini-3.1-pro-preview-customtools"
},
{
"condition": {
"useGemini3_1": true
},
"target": "gemini-3.1-pro-preview"
}
]
},
"auto": {
"default": "gemini-3-pro-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-pro"
},
{
"condition": {
"useGemini3_1": true,
"useCustomTools": true
},
"target": "gemini-3.1-pro-preview-customtools"
},
{
"condition": {
"useGemini3_1": true
},
"target": "gemini-3.1-pro-preview"
}
]
},
"pro": {
"default": "gemini-3-pro-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-pro"
},
{
"condition": {
"useGemini3_1": true,
"useCustomTools": true
},
"target": "gemini-3.1-pro-preview-customtools"
},
{
"condition": {
"useGemini3_1": true
},
"target": "gemini-3.1-pro-preview"
}
]
},
"auto-gemini-2.5": {
"default": "gemini-2.5-pro"
},
"flash": {
"default": "gemini-3-flash-preview",
"contexts": [
{
"condition": {
"hasAccessToPreview": false
},
"target": "gemini-2.5-flash"
}
]
},
"flash-lite": {
"default": "gemini-2.5-flash-lite"
}
}
```
- **Requires restart:** Yes
- **`modelConfigs.classifierIdResolutions`** (object):
- **Description:** Rules for resolving classifier tiers (flash, pro) to
concrete model IDs.
- **Default:**
```json
{
"flash": {
"default": "gemini-3-flash-preview",
"contexts": [
{
"condition": {
"requestedModels": ["auto-gemini-2.5", "gemini-2.5-pro"]
},
"target": "gemini-2.5-flash"
},
{
"condition": {
"requestedModels": ["auto-gemini-3", "gemini-3-pro-preview"]
},
"target": "gemini-3-flash-preview"
}
]
},
"pro": {
"default": "gemini-3-pro-preview",
"contexts": [
{
"condition": {
"requestedModels": ["auto-gemini-2.5", "gemini-2.5-pro"]
},
"target": "gemini-2.5-pro"
},
{
"condition": {
"useGemini3_1": true,
"useCustomTools": true
},
"target": "gemini-3.1-pro-preview-customtools"
},
{
"condition": {
"useGemini3_1": true
},
"target": "gemini-3.1-pro-preview"
}
]
}
}
```
- **Requires restart:** Yes
- **`modelConfigs.modelChains`** (object):
- **Description:** Availability policy chains defining fallback behavior for
models.
- **Default:**
```json
{
"preview": [
{
"model": "gemini-3-pro-preview",
"actions": {
"terminal": "prompt",
"transient": "prompt",
"not_found": "prompt",
"unknown": "prompt"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
},
{
"model": "gemini-3-flash-preview",
"isLastResort": true,
"actions": {
"terminal": "prompt",
"transient": "prompt",
"not_found": "prompt",
"unknown": "prompt"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
}
],
"default": [
{
"model": "gemini-2.5-pro",
"actions": {
"terminal": "prompt",
"transient": "prompt",
"not_found": "prompt",
"unknown": "prompt"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
},
{
"model": "gemini-2.5-flash",
"isLastResort": true,
"actions": {
"terminal": "prompt",
"transient": "prompt",
"not_found": "prompt",
"unknown": "prompt"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
}
],
"lite": [
{
"model": "gemini-2.5-flash-lite",
"actions": {
"terminal": "silent",
"transient": "silent",
"not_found": "silent",
"unknown": "silent"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
},
{
"model": "gemini-2.5-flash",
"actions": {
"terminal": "silent",
"transient": "silent",
"not_found": "silent",
"unknown": "silent"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
},
{
"model": "gemini-2.5-pro",
"isLastResort": true,
"actions": {
"terminal": "silent",
"transient": "silent",
"not_found": "silent",
"unknown": "silent"
},
"stateTransitions": {
"terminal": "terminal",
"transient": "terminal",
"not_found": "terminal",
"unknown": "terminal"
}
}
]
}
```
- **Requires restart:** Yes
#### `agents`
- **`agents.overrides`** (object):
- **Description:** Override settings for specific agents, e.g. to disable the
agent, set a custom model config, or run config.
- **Default:** `{}`
- **Requires restart:** Yes
- **`agents.browser.sessionMode`** (enum):
- **Description:** Session mode: 'persistent', 'isolated', or 'existing'.
- **Default:** `"persistent"`
- **Values:** `"persistent"`, `"isolated"`, `"existing"`
- **Requires restart:** Yes
- **`agents.browser.headless`** (boolean):
- **Description:** Run browser in headless mode.
- **Default:** `false`
- **Requires restart:** Yes
- **`agents.browser.profilePath`** (string):
- **Description:** Path to browser profile directory for session persistence.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`agents.browser.visualModel`** (string):
- **Description:** Model override for the visual agent.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`agents.browser.allowedDomains`** (array):
- **Description:** A list of allowed domains for the browser agent (e.g.,
["github.com", "*.google.com"]).
- **Default:**
```json
["github.com", "*.google.com", "localhost"]
```
- **Requires restart:** Yes
- **`agents.browser.disableUserInput`** (boolean):
- **Description:** Disable user input on browser window during automation.
- **Default:** `true`
#### `context`
- **`context.fileName`** (string | string[]):
- **Description:** The name of the context file or files to load into memory.
Accepts either a single string or an array of strings.
- **Default:** `undefined`
- **`context.importFormat`** (string):
- **Description:** The format to use when importing memory.
- **Default:** `undefined`
- **`context.includeDirectoryTree`** (boolean):
- **Description:** Whether to include the directory tree of the current
working directory in the initial request to the model.
- **Default:** `true`
- **`context.discoveryMaxDirs`** (number):
- **Description:** Maximum number of directories to search for memory.
- **Default:** `200`
- **`context.includeDirectories`** (array):
- **Description:** Additional directories to include in the workspace context.
Missing directories will be skipped with a warning.
- **Default:** `[]`
- **`context.loadMemoryFromIncludeDirectories`** (boolean):
- **Description:** Controls how /memory reload loads GEMINI.md files. When
true, include directories are scanned; when false, only the current
directory is used.
- **Default:** `false`
- **`context.fileFiltering.respectGitIgnore`** (boolean):
- **Description:** Respect .gitignore files when searching.
- **Default:** `true`
- **Requires restart:** Yes
- **`context.fileFiltering.respectGeminiIgnore`** (boolean):
- **Description:** Respect .geminiignore files when searching.
- **Default:** `true`
- **Requires restart:** Yes
- **`context.fileFiltering.enableRecursiveFileSearch`** (boolean):
- **Description:** Enable recursive file search functionality when completing
@ references in the prompt.
- **Default:** `true`
- **Requires restart:** Yes
- **`context.fileFiltering.enableFuzzySearch`** (boolean):
- **Description:** Enable fuzzy search when searching for files.
- **Default:** `true`
- **Requires restart:** Yes
- **`context.fileFiltering.customIgnoreFilePaths`** (array):
- **Description:** Additional ignore file paths to respect. These files take
precedence over .geminiignore and .gitignore. Files earlier in the array
take precedence over files later in the array, e.g. the first file takes
precedence over the second one.
- **Default:** `[]`
- **Requires restart:** Yes
#### `tools`
- **`tools.sandbox`** (string):
- **Description:** Legacy full-process sandbox execution environment. Set to a
boolean to enable or disable the sandbox, provide a string path to a sandbox
profile, or specify an explicit sandbox command (e.g., "docker", "podman",
"lxc", "windows-native").
- **Default:** `undefined`
- **Requires restart:** Yes
- **`tools.sandboxAllowedPaths`** (array):
- **Description:** List of additional paths that the sandbox is allowed to
access.
- **Default:** `[]`
- **Requires restart:** Yes
- **`tools.sandboxNetworkAccess`** (boolean):
- **Description:** Whether the sandbox is allowed to access the network.
- **Default:** `false`
- **Requires restart:** Yes
- **`tools.shell.enableInteractiveShell`** (boolean):
- **Description:** Use node-pty for an interactive shell experience. Fallback
to child_process still applies.
- **Default:** `true`
- **Requires restart:** Yes
- **`tools.shell.pager`** (string):
- **Description:** The pager command to use for shell output. Defaults to
`cat`.
- **Default:** `"cat"`
- **`tools.shell.showColor`** (boolean):
- **Description:** Show color in shell output.
- **Default:** `false`
- **`tools.shell.inactivityTimeout`** (number):
- **Description:** The maximum time in seconds allowed without output from the
shell command. Defaults to 5 minutes.
- **Default:** `300`
- **`tools.shell.enableShellOutputEfficiency`** (boolean):
- **Description:** Enable shell output efficiency optimizations for better
performance.
- **Default:** `true`
- **`tools.core`** (array):
- **Description:** Restrict the set of built-in tools with an allowlist. Match
semantics mirror tools.allowed; see the built-in tools documentation for
available names.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`tools.allowed`** (array):
- **Description:** Tool names that bypass the confirmation dialog. Useful for
trusted commands (for example ["run_shell_command(git)",
"run_shell_command(npm test)"]). See shell tool command restrictions for
matching details.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`tools.exclude`** (array):
- **Description:** Tool names to exclude from discovery.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`tools.discoveryCommand`** (string):
- **Description:** Command to run for tool discovery.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`tools.callCommand`** (string):
- **Description:** Defines a custom shell command for invoking discovered
tools. The command must take the tool name as the first argument, read JSON
arguments from stdin, and emit JSON results on stdout.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`tools.useRipgrep`** (boolean):
- **Description:** Use ripgrep for file content search instead of the fallback
implementation. Provides faster search performance.
- **Default:** `true`
- **`tools.truncateToolOutputThreshold`** (number):
- **Description:** Maximum characters to show when truncating large tool
outputs. Set to 0 or negative to disable truncation.
- **Default:** `40000`
- **Requires restart:** Yes
- **`tools.disableLLMCorrection`** (boolean):
- **Description:** Disable LLM-based error correction for edit tools. When
enabled, tools will fail immediately if exact string matches are not found,
instead of attempting to self-correct.
- **Default:** `true`
- **Requires restart:** Yes
#### `mcp`
- **`mcp.serverCommand`** (string):
- **Description:** Command to start an MCP server.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`mcp.allowed`** (array):
- **Description:** A list of MCP servers to allow.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`mcp.excluded`** (array):
- **Description:** A list of MCP servers to exclude.
- **Default:** `undefined`
- **Requires restart:** Yes
#### `useWriteTodos`
- **`useWriteTodos`** (boolean):
- **Description:** Enable the write_todos tool.
- **Default:** `true`
#### `security`
- **`security.toolSandboxing`** (boolean):
- **Description:** Experimental tool-level sandboxing (implementation in
progress).
- **Default:** `false`
- **`security.disableYoloMode`** (boolean):
- **Description:** Disable YOLO mode, even if enabled by a flag.
- **Default:** `false`
- **Requires restart:** Yes
- **`security.disableAlwaysAllow`** (boolean):
- **Description:** Disable "Always allow" options in tool confirmation
dialogs.
- **Default:** `false`
- **Requires restart:** Yes
- **`security.enablePermanentToolApproval`** (boolean):
- **Description:** Enable the "Allow for all future sessions" option in tool
confirmation dialogs.
- **Default:** `false`
- **`security.autoAddToPolicyByDefault`** (boolean):
- **Description:** When enabled, the "Allow for all future sessions" option
becomes the default choice for low-risk tools in trusted workspaces.
- **Default:** `false`
- **`security.blockGitExtensions`** (boolean):
- **Description:** Blocks installing and loading extensions from Git.
- **Default:** `false`
- **Requires restart:** Yes
- **`security.allowedExtensions`** (array):
- **Description:** List of Regex patterns for allowed extensions. If nonempty,
only extensions that match the patterns in this list are allowed. Overrides
the blockGitExtensions setting.
- **Default:** `[]`
- **Requires restart:** Yes
- **`security.folderTrust.enabled`** (boolean):
- **Description:** Setting to track whether Folder trust is enabled.
- **Default:** `true`
- **Requires restart:** Yes
- **`security.environmentVariableRedaction.allowed`** (array):
- **Description:** Environment variables to always allow (bypass redaction).
- **Default:** `[]`
- **Requires restart:** Yes
- **`security.environmentVariableRedaction.blocked`** (array):
- **Description:** Environment variables to always redact.
- **Default:** `[]`
- **Requires restart:** Yes
- **`security.environmentVariableRedaction.enabled`** (boolean):
- **Description:** Enable redaction of environment variables that may contain
secrets.
- **Default:** `false`
- **Requires restart:** Yes
- **`security.auth.selectedType`** (string):
- **Description:** The currently selected authentication type.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`security.auth.enforcedType`** (string):
- **Description:** The required auth type. If this does not match the selected
auth type, the user will be prompted to re-authenticate.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`security.auth.useExternal`** (boolean):
- **Description:** Whether to use an external authentication flow.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`security.enableConseca`** (boolean):
- **Description:** Enable the context-aware security checker. This feature
uses an LLM to dynamically generate and enforce security policies for tool
use based on your prompt, providing an additional layer of protection
against unintended actions.
- **Default:** `false`
- **Requires restart:** Yes
#### `advanced`
- **`advanced.autoConfigureMemory`** (boolean):
- **Description:** Automatically configure Node.js memory limits
- **Default:** `false`
- **Requires restart:** Yes
- **`advanced.dnsResolutionOrder`** (string):
- **Description:** The DNS resolution order.
- **Default:** `undefined`
- **Requires restart:** Yes
- **`advanced.excludedEnvVars`** (array):
- **Description:** Environment variables to exclude from project context.
- **Default:**
```json
["DEBUG", "DEBUG_MODE"]
```
- **`advanced.bugCommand`** (object):
- **Description:** Configuration for the bug report command.
- **Default:** `undefined`
#### `experimental`
- **`experimental.toolOutputMasking.enabled`** (boolean):
- **Description:** Enables tool output masking to save tokens.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.toolOutputMasking.toolProtectionThreshold`** (number):
- **Description:** Minimum number of tokens to protect from masking (most
recent tool outputs).
- **Default:** `50000`
- **Requires restart:** Yes
- **`experimental.toolOutputMasking.minPrunableTokensThreshold`** (number):
- **Description:** Minimum prunable tokens required to trigger a masking pass.
- **Default:** `30000`
- **Requires restart:** Yes
- **`experimental.toolOutputMasking.protectLatestTurn`** (boolean):
- **Description:** Ensures the absolute latest turn is never masked,
regardless of token count.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.enableAgents`** (boolean):
- **Description:** Enable local and remote subagents.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.extensionManagement`** (boolean):
- **Description:** Enable extension management features.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.extensionConfig`** (boolean):
- **Description:** Enable requesting and fetching of extension settings.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.extensionRegistry`** (boolean):
- **Description:** Enable extension registry explore UI.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.extensionRegistryURI`** (string):
- **Description:** The URI (web URL or local file path) of the extension
registry.
- **Default:** `"https://geminicli.com/extensions.json"`
- **Requires restart:** Yes
- **`experimental.extensionReloading`** (boolean):
- **Description:** Enables extension loading/unloading within the CLI session.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.jitContext`** (boolean):
- **Description:** Enable Just-In-Time (JIT) context loading.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.useOSC52Paste`** (boolean):
- **Description:** Use OSC 52 for pasting. This may be more robust than the
default system when using remote terminal sessions (if your terminal is
configured to allow it).
- **Default:** `false`
- **`experimental.useOSC52Copy`** (boolean):
- **Description:** Use OSC 52 for copying. This may be more robust than the
default system when using remote terminal sessions (if your terminal is
configured to allow it).
- **Default:** `false`
- **`experimental.plan`** (boolean):
- **Description:** Enable Plan Mode.
- **Default:** `true`
- **Requires restart:** Yes
- **`experimental.taskTracker`** (boolean):
- **Description:** Enable task tracker tools.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.modelSteering`** (boolean):
- **Description:** Enable model steering (user hints) to guide the model
during tool execution.
- **Default:** `false`
- **`experimental.directWebFetch`** (boolean):
- **Description:** Enable web fetch behavior that bypasses LLM summarization.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.dynamicModelConfiguration`** (boolean):
- **Description:** Enable dynamic model configuration (definitions,
resolutions, and chains) via settings.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.gemmaModelRouter.enabled`** (boolean):
- **Description:** Enable the Gemma Model Router (experimental). Requires a
local endpoint serving Gemma via the Gemini API using LiteRT-LM shim.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.gemmaModelRouter.classifier.host`** (string):
- **Description:** The host of the classifier.
- **Default:** `"http://localhost:9379"`
- **Requires restart:** Yes
- **`experimental.gemmaModelRouter.classifier.model`** (string):
- **Description:** The model to use for the classifier. Only tested on
`gemma3-1b-gpu-custom`.
- **Default:** `"gemma3-1b-gpu-custom"`
- **Requires restart:** Yes
- **`experimental.memoryManager`** (boolean):
- **Description:** Replace the built-in save_memory tool with a memory manager
subagent that supports adding, removing, de-duplicating, and organizing
memories.
- **Default:** `false`
- **Requires restart:** Yes
- **`experimental.topicUpdateNarration`** (boolean):
- **Description:** Enable the experimental Topic & Update communication model
for reduced chattiness and structured progress reporting.
- **Default:** `false`
#### `skills`
- **`skills.enabled`** (boolean):
- **Description:** Enable Agent Skills.
- **Default:** `true`
- **Requires restart:** Yes
- **`skills.disabled`** (array):
- **Description:** List of disabled skills.
- **Default:** `[]`
- **Requires restart:** Yes
#### `hooksConfig`
- **`hooksConfig.enabled`** (boolean):
- **Description:** Canonical toggle for the hooks system. When disabled, no
hooks will be executed.
- **Default:** `true`
- **Requires restart:** Yes
- **`hooksConfig.disabled`** (array):
- **Description:** List of hook names (commands) that should be disabled.
Hooks in this list will not execute even if configured.
- **Default:** `[]`
- **`hooksConfig.notifications`** (boolean):
- **Description:** Show visual indicators when hooks are executing.
- **Default:** `true`
#### `hooks`
- **`hooks.BeforeTool`** (array):
- **Description:** Hooks that execute before tool execution. Can intercept,
validate, or modify tool calls.
- **Default:** `[]`
- **`hooks.AfterTool`** (array):
- **Description:** Hooks that execute after tool execution. Can process
results, log outputs, or trigger follow-up actions.
- **Default:** `[]`
- **`hooks.BeforeAgent`** (array):
- **Description:** Hooks that execute before agent loop starts. Can set up
context or initialize resources.
- **Default:** `[]`
- **`hooks.AfterAgent`** (array):
- **Description:** Hooks that execute after agent loop completes. Can perform
cleanup or summarize results.
- **Default:** `[]`
- **`hooks.Notification`** (array):
- **Description:** Hooks that execute on notification events (errors,
warnings, info). Can log or alert on specific conditions.
- **Default:** `[]`
- **`hooks.SessionStart`** (array):
- **Description:** Hooks that execute when a session starts. Can initialize
session-specific resources or state.
- **Default:** `[]`
- **`hooks.SessionEnd`** (array):
- **Description:** Hooks that execute when a session ends. Can perform cleanup
or persist session data.
- **Default:** `[]`
- **`hooks.PreCompress`** (array):
- **Description:** Hooks that execute before chat history compression. Can
back up or analyze conversation before compression.
- **Default:** `[]`
- **`hooks.BeforeModel`** (array):
- **Description:** Hooks that execute before LLM requests. Can modify prompts,
inject context, or control model parameters.
- **Default:** `[]`
- **`hooks.AfterModel`** (array):
- **Description:** Hooks that execute after LLM responses. Can process
outputs, extract information, or log interactions.
- **Default:** `[]`
- **`hooks.BeforeToolSelection`** (array):
- **Description:** Hooks that execute before tool selection. Can filter or
prioritize available tools dynamically.
- **Default:** `[]`
#### `admin`
- **`admin.secureModeEnabled`** (boolean):
- **Description:** If true, disallows YOLO mode and "Always allow" options
from being used.
- **Default:** `false`
- **`admin.extensions.enabled`** (boolean):
- **Description:** If false, disallows extensions from being installed or
used.
- **Default:** `true`
- **`admin.mcp.enabled`** (boolean):
- **Description:** If false, disallows MCP servers from being used.
- **Default:** `true`
- **`admin.mcp.config`** (object):
- **Description:** Admin-configured MCP servers (allowlist).
- **Default:** `{}`
- **`admin.mcp.requiredConfig`** (object):
- **Description:** Admin-required MCP servers that are always injected.
- **Default:** `{}`
- **`admin.skills.enabled`** (boolean):
- **Description:** If false, disallows agent skills from being used.
- **Default:** `true`
#### `mcpServers`
Configures connections to one or more Model-Context Protocol (MCP) servers for
discovering and using custom tools. Gemini CLI attempts to connect to each
configured MCP server to discover available tools. Every discovered tool is
prepended with the `mcp_` prefix and its server alias to form a fully qualified
name (FQN) (e.g., `mcp_serverAlias_actualToolName`) to avoid conflicts. Note
that the system might strip certain schema properties from MCP tool definitions
for compatibility. At least one of `command`, `url`, or `httpUrl` must be
provided. If multiple are specified, the order of precedence is `httpUrl`, then
`url`, then `command`.
> [!WARNING]
> Avoid using underscores (`_`) in your server aliases (e.g., use
> `my-server` instead of `my_server`). The underlying policy engine parses Fully
> Qualified Names (`mcp_server_tool`) using the first underscore after the
> `mcp_` prefix. An underscore in your server alias will cause the parser to
> misidentify the server name, which can cause security policies to fail
> silently.
- **`mcpServers.`** (object): The server parameters for the named
server.
- `command` (string, optional): The command to execute to start the MCP server
via standard I/O.
- `args` (array of strings, optional): Arguments to pass to the command.
- `env` (object, optional): Environment variables to set for the server
process.
- `cwd` (string, optional): The working directory in which to start the
server.
- `url` (string, optional): The URL of an MCP server that uses Server-Sent
Events (SSE) for communication.
- `httpUrl` (string, optional): The URL of an MCP server that uses streamable
HTTP for communication.
- `headers` (object, optional): A map of HTTP headers to send with requests to
`url` or `httpUrl`.
- `timeout` (number, optional): Timeout in milliseconds for requests to this
MCP server.
- `trust` (boolean, optional): Trust this server and bypass all tool call
confirmations.
- `description` (string, optional): A brief description of the server, which
may be used for display purposes.
- `includeTools` (array of strings, optional): List of tool names to include
from this MCP server. When specified, only the tools listed here will be
available from this server (allowlist behavior). If not specified, all tools
from the server are enabled by default.
- `excludeTools` (array of strings, optional): List of tool names to exclude
from this MCP server. Tools listed here will not be available to the model,
even if they are exposed by the server. **Note:** `excludeTools` takes
precedence over `includeTools` - if a tool is in both lists, it will be
excluded.
#### `telemetry`
Configures logging and metrics collection for Gemini CLI. For more information,
see [Telemetry](../cli/telemetry.md).
- **Properties:**
- **`enabled`** (boolean): Whether or not telemetry is enabled.
- **`target`** (string): The destination for collected telemetry. Supported
values are `local` and `gcp`.
- **`otlpEndpoint`** (string): The endpoint for the OTLP Exporter.
- **`otlpProtocol`** (string): The protocol for the OTLP Exporter (`grpc` or
`http`).
- **`logPrompts`** (boolean): Whether or not to include the content of user
prompts in the logs.
- **`outfile`** (string): The file to write telemetry to when `target` is
`local`.
- **`useCollector`** (boolean): Whether to use an external OTLP collector.
### Example `settings.json`
Here is an example of a `settings.json` file with the nested structure, new as
of v0.3.0:
```json
{
"general": {
"vimMode": true,
"preferredEditor": "code",
"sessionRetention": {
"enabled": true,
"maxAge": "30d",
"maxCount": 100
}
},
"ui": {
"theme": "GitHub",
"hideBanner": true,
"hideTips": false,
"customWittyPhrases": [
"You forget a thousand things every day. Make sure this is one of ’em",
"Connecting to AGI"
]
},
"tools": {
"sandbox": "docker",
"discoveryCommand": "bin/get_tools",
"callCommand": "bin/call_tool",
"exclude": ["write_file"]
},
"mcpServers": {
"mainServer": {
"command": "bin/mcp_server.py"
},
"anotherServer": {
"command": "node",
"args": ["mcp_server.js", "--verbose"]
}
},
"telemetry": {
"enabled": true,
"target": "local",
"otlpEndpoint": "http://localhost:4317",
"logPrompts": true
},
"privacy": {
"usageStatisticsEnabled": true
},
"model": {
"name": "gemini-1.5-pro-latest",
"maxSessionTurns": 10,
"summarizeToolOutput": {
"run_shell_command": {
"tokenBudget": 100
}
}
},
"context": {
"fileName": ["CONTEXT.md", "GEMINI.md"],
"includeDirectories": ["path/to/dir1", "~/path/to/dir2", "../path/to/dir3"],
"loadFromIncludeDirectories": true,
"fileFiltering": {
"respectGitIgnore": false
}
},
"advanced": {
"excludedEnvVars": ["DEBUG", "DEBUG_MODE", "NODE_ENV"]
}
}
```
## Shell history
The CLI keeps a history of shell commands you run. To avoid conflicts between
different projects, this history is stored in a project-specific directory
within your user's home folder.
- **Location:** `~/.gemini/tmp//shell_history`
- `` is a unique identifier generated from your project's root
path.
- The history is stored in a file named `shell_history`.
## Environment variables and `.env` files
Environment variables are a common way to configure applications, especially for
sensitive information like API keys or for settings that might change between
environments. For authentication setup, see the
[Authentication documentation](../get-started/authentication.md) which covers
all available authentication methods.
The CLI automatically loads environment variables from an `.env` file. The
loading order is:
1. `.env` file in the current working directory.
2. If not found, it searches upwards in parent directories until it finds an
`.env` file or reaches the project root (identified by a `.git` folder) or
the home directory.
3. If still not found, it looks for `~/.env` (in the user's home directory).
**Environment variable exclusion:** Some environment variables (like `DEBUG` and
`DEBUG_MODE`) are automatically excluded from being loaded from project `.env`
files to prevent interference with gemini-cli behavior. Variables from
`.gemini/.env` files are never excluded. You can customize this behavior using
the `advanced.excludedEnvVars` setting in your `settings.json` file.
- **`GEMINI_API_KEY`**:
- Your API key for the Gemini API.
- One of several available
[authentication methods](../get-started/authentication.md).
- Set this in your shell profile (e.g., `~/.bashrc`, `~/.zshrc`) or an `.env`
file.
- **`GEMINI_MODEL`**:
- Specifies the default Gemini model to use.
- Overrides the hardcoded default
- Example: `export GEMINI_MODEL="gemini-3-flash-preview"` (Windows PowerShell:
`$env:GEMINI_MODEL="gemini-3-flash-preview"`)
- **`GEMINI_CLI_IDE_PID`**:
- Manually specifies the PID of the IDE process to use for integration. This
is useful when running Gemini CLI in a standalone terminal while still
wanting to associate it with a specific IDE instance.
- Overrides the automatic IDE detection logic.
- **`GEMINI_CLI_HOME`**:
- Specifies the root directory for Gemini CLI's user-level configuration and
storage.
- By default, this is the user's system home directory. The CLI will create a
`.gemini` folder inside this directory.
- Useful for shared compute environments or keeping CLI state isolated.
- Example: `export GEMINI_CLI_HOME="/path/to/user/config"` (Windows
PowerShell: `$env:GEMINI_CLI_HOME="C:\path\to\user\config"`)
- **`GEMINI_CLI_SURFACE`**:
- Specifies a custom label to include in the `User-Agent` header for API
traffic reporting.
- This is useful for tracking specific internal tools or distribution
channels.
- Example: `export GEMINI_CLI_SURFACE="my-custom-tool"` (Windows PowerShell:
`$env:GEMINI_CLI_SURFACE="my-custom-tool"`)
- **`GOOGLE_API_KEY`**:
- Your Google Cloud API key.
- Required for using Vertex AI in express mode.
- Ensure you have the necessary permissions.
- Example: `export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"` (Windows PowerShell:
`$env:GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"`).
- **`GOOGLE_CLOUD_PROJECT`**:
- Your Google Cloud Project ID.
- Required for using Code Assist or Vertex AI.
- If using Vertex AI, ensure you have the necessary permissions in this
project.
- **Cloud Shell note:** When running in a Cloud Shell environment, this
variable defaults to a special project allocated for Cloud Shell users. If
you have `GOOGLE_CLOUD_PROJECT` set in your global environment in Cloud
Shell, it will be overridden by this default. To use a different project in
Cloud Shell, you must define `GOOGLE_CLOUD_PROJECT` in a `.env` file.
- Example: `export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"` (Windows
PowerShell: `$env:GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`).
- **`GOOGLE_APPLICATION_CREDENTIALS`** (string):
- **Description:** The path to your Google Application Credentials JSON file.
- **Example:**
`export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/credentials.json"`
(Windows PowerShell:
`$env:GOOGLE_APPLICATION_CREDENTIALS="C:\path\to\your\credentials.json"`)
- **`GOOGLE_GENAI_API_VERSION`**:
- Specifies the API version to use for Gemini API requests.
- When set, overrides the default API version used by the SDK.
- Example: `export GOOGLE_GENAI_API_VERSION="v1"` (Windows PowerShell:
`$env:GOOGLE_GENAI_API_VERSION="v1"`)
- **`OTLP_GOOGLE_CLOUD_PROJECT`**:
- Your Google Cloud Project ID for Telemetry in Google Cloud
- Example: `export OTLP_GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"` (Windows
PowerShell: `$env:OTLP_GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`).
- **`GEMINI_TELEMETRY_ENABLED`**:
- Set to `true` or `1` to enable telemetry. Any other value is treated as
disabling it.
- Overrides the `telemetry.enabled` setting.
- **`GEMINI_TELEMETRY_TARGET`**:
- Sets the telemetry target (`local` or `gcp`).
- Overrides the `telemetry.target` setting.
- **`GEMINI_TELEMETRY_OTLP_ENDPOINT`**:
- Sets the OTLP endpoint for telemetry.
- Overrides the `telemetry.otlpEndpoint` setting.
- **`GEMINI_TELEMETRY_OTLP_PROTOCOL`**:
- Sets the OTLP protocol (`grpc` or `http`).
- Overrides the `telemetry.otlpProtocol` setting.
- **`GEMINI_TELEMETRY_LOG_PROMPTS`**:
- Set to `true` or `1` to enable or disable logging of user prompts. Any other
value is treated as disabling it.
- Overrides the `telemetry.logPrompts` setting.
- **`GEMINI_TELEMETRY_OUTFILE`**:
- Sets the file path to write telemetry to when the target is `local`.
- Overrides the `telemetry.outfile` setting.
- **`GEMINI_TELEMETRY_USE_COLLECTOR`**:
- Set to `true` or `1` to enable or disable using an external OTLP collector.
Any other value is treated as disabling it.
- Overrides the `telemetry.useCollector` setting.
- **`GOOGLE_CLOUD_LOCATION`**:
- Your Google Cloud Project Location (e.g., us-central1).
- Required for using Vertex AI in non-express mode.
- Example: `export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"` (Windows
PowerShell: `$env:GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"`).
- **`GEMINI_SANDBOX`**:
- Alternative to the `sandbox` setting in `settings.json`.
- Accepts `true`, `false`, `docker`, `podman`, or a custom command string.
- **`GEMINI_SYSTEM_MD`**:
- Replaces the built‑in system prompt with content from a Markdown file.
- `true`/`1`: Use project default path `./.gemini/system.md`.
- Any other string: Treat as a path (relative/absolute supported, `~`
expands).
- `false`/`0` or unset: Use the built‑in prompt. See
[System Prompt Override](../cli/system-prompt.md).
- **`GEMINI_WRITE_SYSTEM_MD`**:
- Writes the current built‑in system prompt to a file for review.
- `true`/`1`: Write to `./.gemini/system.md`. Otherwise treat the value as a
path.
- Run the CLI once with this set to generate the file.
- **`SEATBELT_PROFILE`** (macOS specific):
- Switches the Seatbelt (`sandbox-exec`) profile on macOS.
- `permissive-open`: (Default) Restricts writes to the project folder (and a
few other folders, see
`packages/cli/src/utils/sandbox-macos-permissive-open.sb`) but allows other
operations.
- `restrictive-open`: Declines operations by default, allows network.
- `strict-open`: Restricts both reads and writes to the working directory,
allows network.
- `strict-proxied`: Same as `strict-open` but routes network through proxy.
- ``: Uses a custom profile. To define a custom profile, create
a file named `sandbox-macos-.sb` in your project's `.gemini/`
directory (e.g., `my-project/.gemini/sandbox-macos-custom.sb`).
- **`DEBUG` or `DEBUG_MODE`** (often used by underlying libraries or the CLI
itself):
- Set to `true` or `1` to enable verbose debug logging, which can be helpful
for troubleshooting.
- **Note:** These variables are automatically excluded from project `.env`
files by default to prevent interference with gemini-cli behavior. Use
`.gemini/.env` files if you need to set these for gemini-cli specifically.
- **`NO_COLOR`**:
- Set to any value to disable all color output in the CLI.
- **`CLI_TITLE`**:
- Set to a string to customize the title of the CLI.
- **`CODE_ASSIST_ENDPOINT`**:
- Specifies the endpoint for the code assist server.
- This is useful for development and testing.
### Environment variable redaction
To prevent accidental leakage of sensitive information, Gemini CLI automatically
redacts potential secrets from environment variables when executing tools (such
as shell commands). This "best effort" redaction applies to variables inherited
from the system or loaded from `.env` files.
**Default Redaction Rules:**
- **By Name:** Variables are redacted if their names contain sensitive terms
like `TOKEN`, `SECRET`, `PASSWORD`, `KEY`, `AUTH`, `CREDENTIAL`, `PRIVATE`, or
`CERT`.
- **By Value:** Variables are redacted if their values match known secret
patterns, such as:
- Private keys (RSA, OpenSSH, PGP, etc.)
- Certificates
- URLs containing credentials
- API keys and tokens (GitHub, Google, AWS, Stripe, Slack, etc.)
- **Specific Blocklist:** Certain variables like `CLIENT_ID`, `DB_URI`,
`DATABASE_URL`, and `CONNECTION_STRING` are always redacted by default.
**Allowlist (Never Redacted):**
- Common system variables (e.g., `PATH`, `HOME`, `USER`, `SHELL`, `TERM`,
`LANG`).
- Variables starting with `GEMINI_CLI_`.
- GitHub Action specific variables.
**Configuration:**
You can customize this behavior in your `settings.json` file:
- **`security.allowedEnvironmentVariables`**: A list of variable names to
_never_ redact, even if they match sensitive patterns.
- **`security.blockedEnvironmentVariables`**: A list of variable names to
_always_ redact, even if they don't match sensitive patterns.
```json
{
"security": {
"allowedEnvironmentVariables": ["MY_PUBLIC_KEY", "NOT_A_SECRET_TOKEN"],
"blockedEnvironmentVariables": ["INTERNAL_IP_ADDRESS"]
}
}
```
## Command-line arguments
Arguments passed directly when running the CLI can override other configurations
for that specific session.
- **`--model `** (**`-m `**):
- Specifies the Gemini model to use for this session.
- Example: `npm start -- --model gemini-3-pro-preview`
- **`--prompt `** (**`-p `**):
- **Deprecated:** Use positional arguments instead.
- Used to pass a prompt directly to the command. This invokes Gemini CLI in a
non-interactive mode.
- **`--prompt-interactive `** (**`-i `**):
- Starts an interactive session with the provided prompt as the initial input.
- The prompt is processed within the interactive session, not before it.
- Cannot be used when piping input from stdin.
- Example: `gemini -i "explain this code"`
- **`--output-format `**:
- **Description:** Specifies the format of the CLI output for non-interactive
mode.
- **Values:**
- `text`: (Default) The standard human-readable output.
- `json`: A machine-readable JSON output.
- `stream-json`: A streaming JSON output that emits real-time events.
- **Note:** For structured output and scripting, use the
`--output-format json` or `--output-format stream-json` flag.
- **`--sandbox`** (**`-s`**):
- Enables sandbox mode for this session.
- **`--debug`** (**`-d`**):
- Enables debug mode for this session, providing more verbose output. Open the
debug console with F12 to see the additional logging.
- **`--help`** (or **`-h`**):
- Displays help information about command-line arguments.
- **`--yolo`**:
- Enables YOLO mode, which automatically approves all tool calls.
- **`--approval-mode `**:
- Sets the approval mode for tool calls. Available modes:
- `default`: Prompt for approval on each tool call (default behavior)
- `auto_edit`: Automatically approve edit tools (replace, write_file) while
prompting for others
- `yolo`: Automatically approve all tool calls (equivalent to `--yolo`)
- `plan`: Read-only mode for tool calls (requires experimental planning to
be enabled).
> **Note:** This mode is currently under development and not yet fully
> functional.
- Cannot be used together with `--yolo`. Use `--approval-mode=yolo` instead of
`--yolo` for the new unified approach.
- Example: `gemini --approval-mode auto_edit`
- **`--allowed-tools `**:
- A comma-separated list of tool names that will bypass the confirmation
dialog.
- Example: `gemini --allowed-tools "ShellTool(git status)"`
- **`--extensions `** (**`-e `**):
- Specifies a list of extensions to use for the session. If not provided, all
available extensions are used.
- Use the special term `gemini -e none` to disable all extensions.
- Example: `gemini -e my-extension -e my-other-extension`
- **`--list-extensions`** (**`-l`**):
- Lists all available extensions and exits.
- **`--resume [session_id]`** (**`-r [session_id]`**):
- Resume a previous chat session. Use "latest" for the most recent session,
provide a session index number, or provide a full session UUID.
- If no session_id is provided, defaults to "latest".
- Example: `gemini --resume 5` or `gemini --resume latest` or
`gemini --resume a1b2c3d4-e5f6-7890-abcd-ef1234567890` or `gemini --resume`
- See [Session Management](../cli/session-management.md) for more details.
- **`--list-sessions`**:
- List all available chat sessions for the current project and exit.
- Shows session indices, dates, message counts, and preview of first user
message.
- Example: `gemini --list-sessions`
- **`--delete-session `**:
- Delete a specific chat session by its index number or full session UUID.
- Use `--list-sessions` first to see available sessions, their indices, and
UUIDs.
- Example: `gemini --delete-session 3` or
`gemini --delete-session a1b2c3d4-e5f6-7890-abcd-ef1234567890`
- **`--include-directories `**:
- Includes additional directories in the workspace for multi-directory
support.
- Can be specified multiple times or as comma-separated values.
- 5 directories can be added at maximum.
- Example: `--include-directories /path/to/project1,/path/to/project2` or
`--include-directories /path/to/project1 --include-directories /path/to/project2`
- **`--screen-reader`**:
- Enables screen reader mode, which adjusts the TUI for better compatibility
with screen readers.
- **`--version`**:
- Displays the version of the CLI.
- **`--experimental-acp`**:
- Starts the agent in ACP mode.
- **`--allowed-mcp-server-names`**:
- Allowed MCP server names.
- **`--fake-responses`**:
- Path to a file with fake model responses for testing.
- **`--record-responses`**:
- Path to a file to record model responses for testing.
## Context files (hierarchical instructional context)
While not strictly configuration for the CLI's _behavior_, context files
(defaulting to `GEMINI.md` but configurable via the `context.fileName` setting)
are crucial for configuring the _instructional context_ (also referred to as
"memory") provided to the Gemini model. This powerful feature allows you to give
project-specific instructions, coding style guides, or any relevant background
information to the AI, making its responses more tailored and accurate to your
needs. The CLI includes UI elements, such as an indicator in the footer showing
the number of loaded context files, to keep you informed about the active
context.
- **Purpose:** These Markdown files contain instructions, guidelines, or context
that you want the Gemini model to be aware of during your interactions. The
system is designed to manage this instructional context hierarchically.
### Example context file content (e.g., `GEMINI.md`)
Here's a conceptual example of what a context file at the root of a TypeScript
project might contain:
```markdown
# Project: My Awesome TypeScript Library
## General Instructions:
- When generating new TypeScript code, please follow the existing coding style.
- Ensure all new functions and classes have JSDoc comments.
- Prefer functional programming paradigms where appropriate.
- All code should be compatible with TypeScript 5.0 and Node.js 20+.
## Coding Style:
- Use 2 spaces for indentation.
- Interface names should be prefixed with `I` (e.g., `IUserService`).
- Private class members should be prefixed with an underscore (`_`).
- Always use strict equality (`===` and `!==`).
## Specific Component: `src/api/client.ts`
- This file handles all outbound API requests.
- When adding new API call functions, ensure they include robust error handling
and logging.
- Use the existing `fetchWithRetry` utility for all GET requests.
## Regarding Dependencies:
- Avoid introducing new external dependencies unless absolutely necessary.
- If a new dependency is required, please state the reason.
```
This example demonstrates how you can provide general project context, specific
coding conventions, and even notes about particular files or components. The
more relevant and precise your context files are, the better the AI can assist
you. Project-specific context files are highly encouraged to establish
conventions and context.
- **Hierarchical loading and precedence:** The CLI implements a sophisticated
hierarchical memory system by loading context files (e.g., `GEMINI.md`) from
several locations. Content from files lower in this list (more specific)
typically overrides or supplements content from files higher up (more
general). The exact concatenation order and final context can be inspected
using the `/memory show` command. The typical loading order is:
1. **Global context file:**
- Location: `~/.gemini/` (e.g.,
`~/.gemini/GEMINI.md` in your user home directory).
- Scope: Provides default instructions for all your projects.
2. **Project root and ancestors context files:**
- Location: The CLI searches for the configured context file in the
current working directory and then in each parent directory up to either
the project root (identified by a `.git` folder) or your home directory.
- Scope: Provides context relevant to the entire project or a significant
portion of it.
3. **Sub-directory context files (contextual/local):**
- Location: The CLI also scans for the configured context file in
subdirectories _below_ the current working directory (respecting common
ignore patterns like `node_modules`, `.git`, etc.). The breadth of this
search is limited to 200 directories by default, but can be configured
with the `context.discoveryMaxDirs` setting in your `settings.json`
file.
- Scope: Allows for highly specific instructions relevant to a particular
component, module, or subsection of your project.
- **Concatenation and UI indication:** The contents of all found context files
are concatenated (with separators indicating their origin and path) and
provided as part of the system prompt to the Gemini model. The CLI footer
displays the count of loaded context files, giving you a quick visual cue
about the active instructional context.
- **Importing content:** You can modularize your context files by importing
other Markdown files using the `@path/to/file.md` syntax. For more details,
see the [Memory Import Processor documentation](./memport.md).
- **Commands for memory management:**
- Use `/memory refresh` to force a re-scan and reload of all context files
from all configured locations. This updates the AI's instructional context.
- Use `/memory show` to display the combined instructional context currently
loaded, allowing you to verify the hierarchy and content being used by the
AI.
- See the [Commands documentation](./commands.md#memory) for full details on
the `/memory` command and its sub-commands (`show` and `reload`).
By understanding and utilizing these configuration layers and the hierarchical
nature of context files, you can effectively manage the AI's memory and tailor
the Gemini CLI's responses to your specific needs and projects.
## Sandboxing
The Gemini CLI can execute potentially unsafe operations (like shell commands
and file modifications) within a sandboxed environment to protect your system.
Sandboxing is disabled by default, but you can enable it in a few ways:
- Using `--sandbox` or `-s` flag.
- Setting `GEMINI_SANDBOX` environment variable.
- Sandbox is enabled when using `--yolo` or `--approval-mode=yolo` by default.
By default, it uses a pre-built `gemini-cli-sandbox` Docker image.
For project-specific sandboxing needs, you can create a custom Dockerfile at
`.gemini/sandbox.Dockerfile` in your project's root directory. This Dockerfile
can be based on the base sandbox image:
```dockerfile
FROM gemini-cli-sandbox
# Add your custom dependencies or configurations here
# For example:
# RUN apt-get update && apt-get install -y some-package
# COPY ./my-config /app/my-config
```
When `.gemini/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX`
environment variable when running Gemini CLI to automatically build the custom
sandbox image:
```bash
BUILD_SANDBOX=1 gemini -s
```
## Usage statistics
To help us improve the Gemini CLI, we collect anonymized usage statistics. This
data helps us understand how the CLI is used, identify common issues, and
prioritize new features.
**What we collect:**
- **Tool calls:** We log the names of the tools that are called, whether they
succeed or fail, and how long they take to execute. We do not collect the
arguments passed to the tools or any data returned by them.
- **API requests:** We log the Gemini model used for each request, the duration
of the request, and whether it was successful. We do not collect the content
of the prompts or responses.
- **Session information:** We collect information about the configuration of the
CLI, such as the enabled tools and the approval mode.
**What we DON'T collect:**
- **Personally identifiable information (PII):** We do not collect any personal
information, such as your name, email address, or API keys.
- **Prompt and response content:** We do not log the content of your prompts or
the responses from the Gemini model.
- **File content:** We do not log the content of any files that are read or
written by the CLI.
**How to opt out:**
You can opt out of usage statistics collection at any time by setting the
`usageStatisticsEnabled` property to `false` under the `privacy` category in
your `settings.json` file:
```json
{
"privacy": {
"usageStatisticsEnabled": false
}
}
```
================================================
FILE: docs/reference/keyboard-shortcuts.md
================================================
# Gemini CLI keyboard shortcuts
Gemini CLI ships with a set of default keyboard shortcuts for editing input,
navigating history, and controlling the UI. Use this reference to learn the
available combinations.
#### Basic Controls
| Command | Action | Keys |
| --------------- | --------------------------------------------------------------- | ------------------- |
| `basic.confirm` | Confirm the current selection or choice. | `Enter` |
| `basic.cancel` | Dismiss dialogs or cancel the current focus. | `Esc` `Ctrl+[` |
| `basic.quit` | Cancel the current request or quit the CLI when input is empty. | `Ctrl+C` |
| `basic.exit` | Exit the CLI when the input buffer is empty. | `Ctrl+D` |
#### Cursor Movement
| Command | Action | Keys |
| ------------------ | ------------------------------------------- | ------------------------------------------ |
| `cursor.home` | Move the cursor to the start of the line. | `Ctrl+A` `Home` |
| `cursor.end` | Move the cursor to the end of the line. | `Ctrl+E` `End` |
| `cursor.up` | Move the cursor up one line. | `Up` |
| `cursor.down` | Move the cursor down one line. | `Down` |
| `cursor.left` | Move the cursor one character to the left. | `Left` |
| `cursor.right` | Move the cursor one character to the right. | `Right` `Ctrl+F` |
| `cursor.wordLeft` | Move the cursor one word to the left. | `Ctrl+Left` `Alt+Left` `Alt+B` |
| `cursor.wordRight` | Move the cursor one word to the right. | `Ctrl+Right` `Alt+Right` `Alt+F` |
#### Editing
| Command | Action | Keys |
| ---------------------- | ------------------------------------------------ | -------------------------------------------------------- |
| `edit.deleteRightAll` | Delete from the cursor to the end of the line. | `Ctrl+K` |
| `edit.deleteLeftAll` | Delete from the cursor to the start of the line. | `Ctrl+U` |
| `edit.clear` | Clear all text in the input field. | `Ctrl+C` |
| `edit.deleteWordLeft` | Delete the previous word. | `Ctrl+Backspace` `Alt+Backspace` `Ctrl+W` |
| `edit.deleteWordRight` | Delete the next word. | `Ctrl+Delete` `Alt+Delete` `Alt+D` |
| `edit.deleteLeft` | Delete the character to the left. | `Backspace` `Ctrl+H` |
| `edit.deleteRight` | Delete the character to the right. | `Delete` `Ctrl+D` |
| `edit.undo` | Undo the most recent text edit. | `Cmd/Win+Z` `Alt+Z` |
| `edit.redo` | Redo the most recent undone text edit. | `Ctrl+Shift+Z` `Shift+Cmd/Win+Z` `Alt+Shift+Z` |
#### Scrolling
| Command | Action | Keys |
| ----------------- | ------------------------ | ----------------------------- |
| `scroll.up` | Scroll content up. | `Shift+Up` |
| `scroll.down` | Scroll content down. | `Shift+Down` |
| `scroll.home` | Scroll to the top. | `Ctrl+Home` `Shift+Home` |
| `scroll.end` | Scroll to the bottom. | `Ctrl+End` `Shift+End` |
| `scroll.pageUp` | Scroll up by one page. | `Page Up` |
| `scroll.pageDown` | Scroll down by one page. | `Page Down` |
#### History & Search
| Command | Action | Keys |
| ----------------------- | -------------------------------------------- | -------- |
| `history.previous` | Show the previous entry in history. | `Ctrl+P` |
| `history.next` | Show the next entry in history. | `Ctrl+N` |
| `history.search.start` | Start reverse search through history. | `Ctrl+R` |
| `history.search.submit` | Submit the selected reverse-search match. | `Enter` |
| `history.search.accept` | Accept a suggestion while reverse searching. | `Tab` |
#### Navigation
| Command | Action | Keys |
| --------------------- | -------------------------------------------------- | --------------- |
| `nav.up` | Move selection up in lists. | `Up` |
| `nav.down` | Move selection down in lists. | `Down` |
| `nav.dialog.up` | Move up within dialog options. | `Up` `K` |
| `nav.dialog.down` | Move down within dialog options. | `Down` `J` |
| `nav.dialog.next` | Move to the next item or question in a dialog. | `Tab` |
| `nav.dialog.previous` | Move to the previous item or question in a dialog. | `Shift+Tab` |
#### Suggestions & Completions
| Command | Action | Keys |
| ----------------------- | --------------------------------------- | -------------------- |
| `suggest.accept` | Accept the inline suggestion. | `Tab` `Enter` |
| `suggest.focusPrevious` | Move to the previous completion option. | `Up` `Ctrl+P` |
| `suggest.focusNext` | Move to the next completion option. | `Down` `Ctrl+N` |
| `suggest.expand` | Expand an inline suggestion. | `Right` |
| `suggest.collapse` | Collapse an inline suggestion. | `Left` |
#### Text Input
| Command | Action | Keys |
| -------------------------- | ---------------------------------------------------------- | ----------------------------------------------------------------------------------- |
| `input.submit` | Submit the current prompt. | `Enter` |
| `input.newline` | Insert a newline without submitting. | `Ctrl+Enter` `Cmd/Win+Enter` `Alt+Enter` `Shift+Enter` `Ctrl+J` |
| `input.openExternalEditor` | Open the current prompt or the plan in an external editor. | `Ctrl+X` |
| `input.paste` | Paste from the clipboard. | `Ctrl+V` `Cmd/Win+V` `Alt+V` |
#### App Controls
| Command | Action | Keys |
| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
| `app.showErrorDetails` | Toggle detailed error information. | `F12` |
| `app.showFullTodos` | Toggle the full TODO list. | `Ctrl+T` |
| `app.showIdeContextDetail` | Show IDE context details. | `Ctrl+G` |
| `app.toggleMarkdown` | Toggle Markdown rendering. | `Alt+M` |
| `app.toggleCopyMode` | Toggle copy mode when in alternate buffer mode. | `Ctrl+S` |
| `app.toggleYolo` | Toggle YOLO (auto-approval) mode for tool calls. | `Ctrl+Y` |
| `app.cycleApprovalMode` | Cycle through approval modes: default (prompt), auto_edit (auto-approve edits), and plan (read-only). Plan mode is skipped when the agent is busy. | `Shift+Tab` |
| `app.showMoreLines` | Expand and collapse blocks of content when not in alternate buffer mode. | `Ctrl+O` |
| `app.expandPaste` | Expand or collapse a paste placeholder when cursor is over placeholder. | `Ctrl+O` |
| `app.focusShellInput` | Move focus from Gemini to the active shell. | `Tab` |
| `app.unfocusShellInput` | Move focus from the shell back to Gemini. | `Shift+Tab` |
| `app.clearScreen` | Clear the terminal screen and redraw the UI. | `Ctrl+L` |
| `app.restart` | Restart the application. | `R` `Shift+R` |
| `app.suspend` | Suspend the CLI and move it to the background. | `Ctrl+Z` |
| `app.showShellUnfocusWarning` | Show warning when trying to move focus away from shell input. | `Tab` |
#### Background Shell Controls
| Command | Action | Keys |
| --------------------------- | ------------------------------------------------------------------ | ----------- |
| `background.escape` | Dismiss background shell list. | `Esc` |
| `background.select` | Confirm selection in background shell list. | `Enter` |
| `background.toggle` | Toggle current background shell visibility. | `Ctrl+B` |
| `background.toggleList` | Toggle background shell list. | `Ctrl+L` |
| `background.kill` | Kill the active background shell. | `Ctrl+K` |
| `background.unfocus` | Move focus from background shell to Gemini. | `Shift+Tab` |
| `background.unfocusList` | Move focus from background shell list to Gemini. | `Tab` |
| `background.unfocusWarning` | Show warning when trying to move focus away from background shell. | `Tab` |
## Customizing Keybindings
You can add alternative keybindings or remove default keybindings by creating a
`keybindings.json` file in your home gemini directory (typically
`~/.gemini/keybindings.json`).
### Configuration Format
The configuration uses a JSON array of objects, similar to VS Code's keybinding
schema. Each object must specify a `command` from the reference tables above and
a `key` combination.
```json
[
{
"command": "edit.clear",
"key": "cmd+l"
},
{
// prefix "-" to unbind a key
"command": "-app.toggleYolo",
"key": "ctrl+y"
},
{
"command": "input.submit",
"key": "ctrl+y"
},
{
// multiple modifiers
"command": "cursor.right",
"key": "shift+alt+a"
},
{
// Some mac keyboards send "Å" instead of "shift+option+a"
"command": "cursor.right",
"key": "Å"
},
{
// some base keys have special multi-char names
"command": "cursor.right",
"key": "shift+pageup"
}
]
```
- **Unbinding** To remove an existing or default keybinding, prefix a minus sign
(`-`) to the `command` name.
- **No Auto-unbinding** The same key can be bound to multiple commands in
different contexts at the same time. Therefore, creating a binding does not
automatically unbind the key from other commands.
- **Explicit Modifiers**: Key matching is explicit. For example, a binding for
`ctrl+f` will only trigger on exactly `ctrl+f`, not `ctrl+shift+f` or
`alt+ctrl+f`.
- **Literal Characters**: Terminals often translate complex key combinations
(especially on macOS with the `Option` key) into special characters, losing
modifier and keystroke information along the way. For example,`shift+5` might
be sent as `%`. In these cases, you must bind to the literal character `%` as
bindings to `shift+5` will never fire. To see precisely what is being sent,
enable `Debug Keystroke Logging` and hit f12 to open the debug log console.
- **Key Modifiers**: The supported key modifiers are:
- `ctrl`
- `shift`,
- `alt` (synonyms: `opt`, `option`)
- `cmd` (synonym: `meta`)
- **Base Key**: The base key can be any single unicode code point or any of the
following special keys:
- **Navigation**: `up`, `down`, `left`, `right`, `home`, `end`, `pageup`,
`pagedown`
- **Actions**: `enter`, `escape`, `tab`, `space`, `backspace`, `delete`,
`clear`, `insert`, `printscreen`
- **Toggles**: `capslock`, `numlock`, `scrolllock`, `pausebreak`
- **Function Keys**: `f1` through `f35`
- **Numpad**: `numpad0` through `numpad9`, `numpad_add`, `numpad_subtract`,
`numpad_multiply`, `numpad_divide`, `numpad_decimal`, `numpad_separator`
## Additional context-specific shortcuts
- `Option+B/F/M` (macOS only): Are interpreted as `Cmd+B/F/M` even if your
terminal isn't configured to send Meta with Option.
- `!` on an empty prompt: Enter or exit shell mode.
- `?` on an empty prompt: Toggle the shortcuts panel above the input. Press
`Esc`, `Backspace`, any printable key, or a registered app hotkey to close it.
The panel also auto-hides while the agent is running/streaming or when
action-required dialogs are shown. Press `?` again to close the panel and
insert a `?` into the prompt.
- `Tab` + `Tab` (while typing in the prompt): Toggle between minimal and full UI
details when no completion/search interaction is active. The selected mode is
remembered for future sessions. Full UI remains the default on first run, and
single `Tab` keeps its existing completion/focus behavior.
- `Shift + Tab` (while typing in the prompt): Cycle approval modes: default,
auto-edit, and plan (skipped when agent is busy).
- `\` (at end of a line) + `Enter`: Insert a newline without leaving single-line
mode.
- `Esc` pressed twice quickly: Clear the input prompt if it is not empty,
otherwise browse and rewind previous interactions.
- `Up Arrow` / `Down Arrow`: When the cursor is at the top or bottom of a
single-line input, navigate backward or forward through prompt history.
- `Number keys (1-9, multi-digit)` inside selection dialogs: Jump directly to
the numbered radio option and confirm when the full number is entered.
- `Ctrl + O`: Expand or collapse paste placeholders (`[Pasted Text: X lines]`)
inline when the cursor is over the placeholder.
- `Ctrl + X` (while a plan is presented): Open the plan in an external editor to
[collaboratively edit or comment](../cli/plan-mode.md#collaborative-plan-editing)
on the implementation strategy.
- `Double-click` on a paste placeholder (alternate buffer mode only): Expand to
view full content inline. Double-click again to collapse.
## Limitations
- On [Windows Terminal](https://en.wikipedia.org/wiki/Windows_Terminal):
- `shift+enter` is only supported in version 1.25 and higher.
- `shift+tab`
[is not supported](https://github.com/google-gemini/gemini-cli/issues/20314)
on Node 20 and earlier versions of Node 22.
- On macOS's [Terminal]():
- `shift+enter` is not supported.
================================================
FILE: docs/reference/memport.md
================================================
# Memory Import Processor
The Memory Import Processor is a feature that allows you to modularize your
GEMINI.md files by importing content from other files using the `@file.md`
syntax.
## Overview
This feature enables you to break down large GEMINI.md files into smaller, more
manageable components that can be reused across different contexts. The import
processor supports both relative and absolute paths, with built-in safety
features to prevent circular imports and ensure file access security.
## Syntax
Use the `@` symbol followed by the path to the file you want to import:
```markdown
# Main GEMINI.md file
This is the main content.
@./components/instructions.md
More content here.
@./shared/configuration.md
```
## Supported path formats
### Relative paths
- `@./file.md` - Import from the same directory
- `@../file.md` - Import from parent directory
- `@./components/file.md` - Import from subdirectory
### Absolute paths
- `@/absolute/path/to/file.md` - Import using absolute path
## Examples
### Basic import
```markdown
# My GEMINI.md
Welcome to my project!
@./get-started.md
## Features
@./features/overview.md
```
### Nested imports
The imported files can themselves contain imports, creating a nested structure:
```markdown
# main.md
@./header.md @./content.md @./footer.md
```
```markdown
# header.md
# Project Header
@./shared/title.md
```
## Safety features
### Circular import detection
The processor automatically detects and prevents circular imports:
```markdown
# file-a.md
@./file-b.md
```
```markdown
# file-b.md
@./file-a.md
```
### File access security
The `validateImportPath` function ensures that imports are only allowed from
specified directories, preventing access to sensitive files outside the allowed
scope.
### Maximum import depth
To prevent infinite recursion, there's a configurable maximum import depth
(default: 5 levels).
## Error handling
### Missing files
If a referenced file doesn't exist, the import will fail gracefully with an
error comment in the output.
### File access errors
Permission issues or other file system errors are handled gracefully with
appropriate error messages.
## Code region detection
The import processor uses the `marked` library to detect code blocks and inline
code spans, ensuring that `@` imports inside these regions are properly ignored.
This provides robust handling of nested code blocks and complex Markdown
structures.
## Import tree structure
The processor returns an import tree that shows the hierarchy of imported files,
similar to Claude's `/memory` feature. This helps users debug problems with
their GEMINI.md files by showing which files were read and their import
relationships.
Example tree structure:
```
Memory Files
L project: GEMINI.md
L a.md
L b.md
L c.md
L d.md
L e.md
L f.md
L included.md
```
The tree preserves the order that files were imported and shows the complete
import chain for debugging purposes.
## Comparison to Claude Code's `/memory` (`claude.md`) approach
Claude Code's `/memory` feature (as seen in `claude.md`) produces a flat, linear
document by concatenating all included files, always marking file boundaries
with clear comments and path names. It does not explicitly present the import
hierarchy, but the LLM receives all file contents and paths, which is sufficient
for reconstructing the hierarchy if needed.
> [!NOTE] The import tree is mainly for clarity during development and has
> limited relevance to LLM consumption.
## API reference
### `processImports(content, basePath, debugMode?, importState?)`
Processes import statements in GEMINI.md content.
**Parameters:**
- `content` (string): The content to process for imports
- `basePath` (string): The directory path where the current file is located
- `debugMode` (boolean, optional): Whether to enable debug logging (default:
false)
- `importState` (ImportState, optional): State tracking for circular import
prevention
**Returns:** Promise<ProcessImportsResult> - Object containing processed
content and import tree
### `ProcessImportsResult`
```typescript
interface ProcessImportsResult {
content: string; // The processed content with imports resolved
importTree: MemoryFile; // Tree structure showing the import hierarchy
}
```
### `MemoryFile`
```typescript
interface MemoryFile {
path: string; // The file path
imports?: MemoryFile[]; // Direct imports, in the order they were imported
}
```
### `validateImportPath(importPath, basePath, allowedDirectories)`
Validates import paths to ensure they are safe and within allowed directories.
**Parameters:**
- `importPath` (string): The import path to validate
- `basePath` (string): The base directory for resolving relative paths
- `allowedDirectories` (string[]): Array of allowed directory paths
**Returns:** boolean - Whether the import path is valid
### `findProjectRoot(startDir)`
Finds the project root by searching for a `.git` directory upwards from the
given start directory. Implemented as an **async** function using non-blocking
file system APIs to avoid blocking the Node.js event loop.
**Parameters:**
- `startDir` (string): The directory to start searching from
**Returns:** Promise<string> - The project root directory (or the start
directory if no `.git` is found)
## Best Practices
1. **Use descriptive file names** for imported components
2. **Keep imports shallow** - avoid deeply nested import chains
3. **Document your structure** - maintain a clear hierarchy of imported files
4. **Test your imports** - ensure all referenced files exist and are accessible
5. **Use relative paths** when possible for better portability
## Troubleshooting
### Common issues
1. **Import not working**: Check that the file exists and the path is correct
2. **Circular import warnings**: Review your import structure for circular
references
3. **Permission errors**: Ensure the files are readable and within allowed
directories
4. **Path resolution issues**: Use absolute paths if relative paths aren't
resolving correctly
### Debug mode
Enable debug mode to see detailed logging of the import process:
```typescript
const result = await processImports(content, basePath, true);
```
================================================
FILE: docs/reference/policy-engine.md
================================================
# Policy engine
The Gemini CLI includes a powerful policy engine that provides fine-grained
control over tool execution. It allows users and administrators to define rules
that determine whether a tool call should be allowed, denied, or require user
confirmation.
## Quick start
To create your first policy:
1. **Create the policy directory** if it doesn't exist:
**macOS/Linux**
```bash
mkdir -p ~/.gemini/policies
```
**Windows (PowerShell)**
```powershell
New-Item -ItemType Directory -Force -Path "$env:USERPROFILE\.gemini\policies"
```
2. **Create a new policy file** (e.g., `~/.gemini/policies/my-rules.toml`). You
can use any filename ending in `.toml`; all such files in this directory
will be loaded and combined:
```toml
[[rule]]
toolName = "run_shell_command"
commandPrefix = "git status"
decision = "allow"
priority = 100
```
3. **Run a command** that triggers the policy (e.g., ask Gemini CLI to
`git status`). The tool will now execute automatically without prompting for
confirmation.
## Core concepts
The policy engine operates on a set of rules. Each rule is a combination of
conditions and a resulting decision. When a large language model wants to
execute a tool, the policy engine evaluates all rules to find the
highest-priority rule that matches the tool call.
A rule consists of the following main components:
- **Conditions**: Criteria that a tool call must meet for the rule to apply.
This can include the tool's name, the arguments provided to it, or the current
approval mode.
- **Decision**: The action to take if the rule matches (`allow`, `deny`, or
`ask_user`).
- **Priority**: A number that determines the rule's precedence. Higher numbers
win.
For example, this rule will ask for user confirmation before executing any `git`
command.
```toml
[[rule]]
toolName = "run_shell_command"
commandPrefix = "git"
decision = "ask_user"
priority = 100
```
### Conditions
Conditions are the criteria that a tool call must meet for a rule to apply. The
primary conditions are the tool's name and its arguments.
#### Tool Name
The `toolName` in the rule must match the name of the tool being called.
- **Wildcards**: You can use wildcards to match multiple tools.
- `*`: Matches **any tool** (built-in or MCP).
- `mcp_server_*`: Matches any tool from a specific MCP server.
- `mcp_*_toolName`: Matches a specific tool name across **all** MCP servers.
- `mcp_*`: Matches **any tool from any MCP server**.
> **Recommendation:** While FQN wildcards are supported, the recommended
> approach for MCP tools is to use the `mcpName` field in your TOML rules. See
> [Special syntax for MCP tools](#special-syntax-for-mcp-tools).
#### Arguments pattern
If `argsPattern` is specified, the tool's arguments are converted to a stable
JSON string, which is then tested against the provided regular expression. If
the arguments don't match the pattern, the rule does not apply.
#### Execution environment
If `interactive` is specified, the rule will only apply if the CLI's execution
environment matches the specified boolean value:
- `true`: The rule applies only in interactive mode.
- `false`: The rule applies only in non-interactive (headless) mode.
If omitted, the rule applies to both interactive and non-interactive
environments.
### Decisions
There are three possible decisions a rule can enforce:
- `allow`: The tool call is executed automatically without user interaction.
- `deny`: The tool call is blocked and is not executed. For global rules (those
without an `argsPattern`), tools that are denied are **completely excluded
from the model's memory**. This means the model will not even see the tool as
an option, which is more secure and saves context window space.
- `ask_user`: The user is prompted to approve or deny the tool call. (In
non-interactive mode, this is treated as `deny`.)
> [!NOTE]
> The `deny` decision is the recommended way to exclude tools. The
> legacy `tools.exclude` setting in `settings.json` is deprecated in favor of
> policy rules with a `deny` decision.
### Priority system and tiers
The policy engine uses a sophisticated priority system to resolve conflicts when
multiple rules match a single tool call. The core principle is simple: **the
rule with the highest priority wins**.
To provide a clear hierarchy, policies are organized into three tiers. Each tier
has a designated number that forms the base of the final priority calculation.
| Tier | Base | Description |
| :-------- | :--- | :------------------------------------------------------------------------- |
| Default | 1 | Built-in policies that ship with the Gemini CLI. |
| Extension | 2 | Policies defined in extensions. |
| Workspace | 3 | Policies defined in the current workspace's configuration directory. |
| User | 4 | Custom policies defined by the user. |
| Admin | 5 | Policies managed by an administrator (e.g., in an enterprise environment). |
Within a TOML policy file, you assign a priority value from **0 to 999**. The
engine transforms this into a final priority using the following formula:
`final_priority = tier_base + (toml_priority / 1000)`
This system guarantees that:
- Admin policies always override User, Workspace, and Default policies.
- User policies override Workspace and Default policies.
- Workspace policies override Default policies.
- You can still order rules within a single tier with fine-grained control.
For example:
- A `priority: 50` rule in a Default policy file becomes `1.050`.
- A `priority: 10` rule in a Workspace policy policy file becomes `2.010`.
- A `priority: 100` rule in a User policy file becomes `3.100`.
- A `priority: 20` rule in an Admin policy file becomes `4.020`.
### Approval modes
Approval modes allow the policy engine to apply different sets of rules based on
the CLI's operational mode. A rule can be associated with one or more modes
(e.g., `yolo`, `autoEdit`, `plan`). The rule will only be active if the CLI is
running in one of its specified modes. If a rule has no modes specified, it is
always active.
- `default`: The standard interactive mode where most write tools require
confirmation.
- `autoEdit`: Optimized for automated code editing; some write tools may be
auto-approved.
- `plan`: A strict, read-only mode for research and design. See
[Customizing Plan Mode Policies](../cli/plan-mode.md#customizing-policies).
- `yolo`: A mode where all tools are auto-approved (use with extreme caution).
## Rule matching
When a tool call is made, the engine checks it against all active rules,
starting from the highest priority. The first rule that matches determines the
outcome.
A rule matches a tool call if all of its conditions are met:
1. **Tool name**: The `toolName` in the rule must match the name of the tool
being called.
- **Wildcards**: You can use wildcards like `*`, `mcp_server_*`, or
`mcp_*_toolName` to match multiple tools. See [Tool Name](#tool-name) for
details.
2. **Arguments pattern**: If `argsPattern` is specified, the tool's arguments
are converted to a stable JSON string, which is then tested against the
provided regular expression. If the arguments don't match the pattern, the
rule does not apply.
## Configuration
Policies are defined in `.toml` files. The CLI loads these files from Default,
User, and (if configured) Admin directories.
### Policy locations
| Tier | Type | Location |
| :------------ | :----- | :---------------------------------------- |
| **User** | Custom | `~/.gemini/policies/*.toml` |
| **Workspace** | Custom | `$WORKSPACE_ROOT/.gemini/policies/*.toml` |
| **Admin** | System | _See below (OS specific)_ |
#### System-wide policies (Admin)
Administrators can enforce system-wide policies (Tier 4) that override all user
and default settings. These policies can be loaded from standard system
locations or supplemental paths.
##### Standard Locations
These are the default paths the CLI searches for admin policies:
| OS | Policy Directory Path |
| :---------- | :------------------------------------------------ |
| **Linux** | `/etc/gemini-cli/policies` |
| **macOS** | `/Library/Application Support/GeminiCli/policies` |
| **Windows** | `C:\ProgramData\gemini-cli\policies` |
##### Supplemental Admin Policies
Administrators can also specify supplemental policy paths using:
- The `--admin-policy` command-line flag.
- The `adminPolicyPaths` setting in a system settings file.
These supplemental policies are assigned the same **Admin** tier (Base 4) as
policies in standard locations.
**Security Guard**: Supplemental admin policies are **ignored** if any `.toml`
policy files are found in the standard system location. This prevents flag-based
overrides when a central system policy has already been established.
#### Security Requirements
To prevent privilege escalation, the CLI enforces strict security checks on the
**standard system policy directory**. If checks fail, the policies in that
directory are **ignored**.
- **Linux / macOS:** Must be owned by `root` (UID 0) and NOT writable by group
or others (e.g., `chmod 755`).
- **Windows:** Must be in `C:\ProgramData`. Standard users (`Users`, `Everyone`)
must NOT have `Write`, `Modify`, or `Full Control` permissions. If you see a
security warning, use the folder properties to remove write permissions for
non-admin groups. You may need to "Disable inheritance" in Advanced Security
Settings.
> [!NOTE]
> Supplemental admin policies (provided via `--admin-policy` or
> `adminPolicyPaths` settings) are **NOT** subject to these strict ownership
> checks, as they are explicitly provided by the user or administrator in their
> current execution context.
### TOML rule schema
Here is a breakdown of the fields available in a TOML policy rule:
```toml
[[rule]]
# A unique name for the tool, or an array of names.
toolName = "run_shell_command"
# (Optional) The name of a subagent. If provided, the rule only applies to tool calls
# made by this specific subagent.
subagent = "generalist"
# (Optional) The name of an MCP server. Can be combined with toolName
# to form a composite FQN internally like "mcp_mcpName_toolName".
mcpName = "my-custom-server"
# (Optional) Metadata hints provided by the tool. A rule matches if all
# key-value pairs provided here are present in the tool's annotations.
toolAnnotations = { readOnlyHint = true }
# (Optional) A regex to match against the tool's arguments.
argsPattern = '"command":"(git|npm)'
# (Optional) A string or array of strings that a shell command must start with.
# This is syntactic sugar for `toolName = "run_shell_command"` and an `argsPattern`.
commandPrefix = "git"
# (Optional) A regex to match against the entire shell command.
# This is also syntactic sugar for `toolName = "run_shell_command"`.
# Note: This pattern is tested against the JSON representation of the arguments (e.g., `{"command":""}`).
# Because it prepends `"command":"`, it effectively matches from the start of the command.
# Anchors like `^` or `$` apply to the full JSON string, so `^` should usually be avoided here.
# You cannot use commandPrefix and commandRegex in the same rule.
commandRegex = "git (commit|push)"
# The decision to take. Must be "allow", "deny", or "ask_user".
decision = "ask_user"
# The priority of the rule, from 0 to 999.
priority = 10
# (Optional) A custom message to display when a tool call is denied by this rule.
# This message is returned to the model and user, useful for explaining *why* it was denied.
deny_message = "Deletion is permanent"
# (Optional) An array of approval modes where this rule is active.
modes = ["autoEdit"]
# (Optional) A boolean to restrict the rule to interactive (true) or non-interactive (false) environments.
# If omitted, the rule applies to both.
interactive = true
```
### Using arrays (lists)
To apply the same rule to multiple tools or command prefixes, you can provide an
array of strings for the `toolName` and `commandPrefix` fields.
**Example:**
This single rule will apply to both the `write_file` and `replace` tools.
```toml
[[rule]]
toolName = ["write_file", "replace"]
decision = "ask_user"
priority = 10
```
### Special syntax for `run_shell_command`
To simplify writing policies for `run_shell_command`, you can use
`commandPrefix` or `commandRegex` instead of the more complex `argsPattern`.
- `commandPrefix`: Matches if the `command` argument starts with the given
string.
- `commandRegex`: Matches if the `command` argument matches the given regular
expression.
**Example:**
This rule will ask for user confirmation before executing any `git` command.
```toml
[[rule]]
toolName = "run_shell_command"
commandPrefix = "git"
decision = "ask_user"
priority = 100
```
### Special syntax for MCP tools
You can create rules that target tools from Model Context Protocol (MCP) servers
using the `mcpName` field. **This is the recommended approach** for defining MCP
policies, as it is much more robust than manually writing Fully Qualified Names
(FQNs) or string wildcards.
> [!WARNING]
> Do not use underscores (`_`) in your MCP server names (e.g., use
> `my-server` rather than `my_server`). The policy parser splits Fully Qualified
> Names (`mcp_server_tool`) on the _first_ underscore following the `mcp_`
> prefix. If your server name contains an underscore, the parser will
> misinterpret the server identity, which can cause wildcard rules and security
> policies to fail silently.
**1. Targeting a specific tool on a server**
Combine `mcpName` and `toolName` to target a single operation. When using
`mcpName`, the `toolName` field should strictly be the simple name of the tool
(e.g., `search`), **not** the Fully Qualified Name (e.g., `mcp_server_search`).
```toml
# Allows the `search` tool on the `my-jira-server` MCP
[[rule]]
mcpName = "my-jira-server"
toolName = "search"
decision = "allow"
priority = 200
```
**2. Targeting all tools on a specific server**
Specify only the `mcpName` to apply a rule to every tool provided by that
server.
**Note:** This applies to all decision types (`allow`, `deny`, `ask_user`).
```toml
# Denies all tools from the `untrusted-server` MCP
[[rule]]
mcpName = "untrusted-server"
decision = "deny"
priority = 500
deny_message = "This server is not trusted by the admin."
```
**3. Targeting all MCP servers**
Use `mcpName = "*"` to create a rule that applies to **all** tools from **any**
registered MCP server. This is useful for setting category-wide defaults.
```toml
# Ask user for any tool call from any MCP server
[[rule]]
mcpName = "*"
decision = "ask_user"
priority = 10
```
**4. Targeting a tool name across all servers**
Use `mcpName = "*"` with a specific `toolName` to target that operation
regardless of which server provides it.
```toml
# Allow the `search` tool across all connected MCP servers
[[rule]]
mcpName = "*"
toolName = "search"
decision = "allow"
priority = 50
```
## Default policies
The Gemini CLI ships with a set of default policies to provide a safe
out-of-the-box experience.
- **Read-only tools** (like `read_file`, `glob`) are generally **allowed**.
- **Agent delegation** defaults to **`ask_user`** to ensure remote agents can
prompt for confirmation, but local sub-agent actions are executed silently and
checked individually.
- **Write tools** (like `write_file`, `run_shell_command`) default to
**`ask_user`**.
- In **`yolo`** mode, a high-priority rule allows all tools.
- In **`autoEdit`** mode, rules allow certain write operations to happen without
prompting.
================================================
FILE: docs/reference/tools.md
================================================
# Tools reference
Gemini CLI uses tools to interact with your local environment, access
information, and perform actions on your behalf. These tools extend the model's
capabilities beyond text generation, letting it read files, execute commands,
and search the web.
## How to use Gemini CLI's tools
Tools are generally invoked automatically by Gemini CLI when it needs to perform
an action. However, you can also trigger specific tools manually using shorthand
syntax.
### Automatic execution and security
When the model wants to use a tool, Gemini CLI evaluates the request against its
security policies.
- **User confirmation:** You must manually approve tools that modify files or
execute shell commands (mutators). The CLI shows you a diff or the exact
command before you confirm.
- **Sandboxing:** You can run tool executions in secure, containerized
environments to isolate changes from your host system. For more details, see
the [Sandboxing](../cli/sandbox.md) guide.
- **Trusted folders:** You can configure which directories allow the model to
use system tools. For more details, see the
[Trusted folders](../cli/trusted-folders.md) guide.
Review confirmation prompts carefully before allowing a tool to execute.
### How to use manually-triggered tools
You can directly trigger key tools using special syntax in your prompt:
- **[File access](../tools/file-system.md#read_many_files) (`@`):** Use the `@`
symbol followed by a file or directory path to include its content in your
prompt. This triggers the `read_many_files` tool.
- **[Shell commands](../tools/shell.md) (`!`):** Use the `!` symbol followed by
a system command to execute it directly. This triggers the `run_shell_command`
tool.
## How to manage tools
Using built-in commands, you can inspect available tools and configure how they
behave.
### Tool discovery
Use the `/tools` command to see what tools are currently active in your session.
- **`/tools`**: Lists all registered tools with their display names.
- **`/tools desc`**: Lists all tools with their full descriptions.
This is especially useful for verifying that
[MCP servers](../tools/mcp-server.md) or custom tools are loaded correctly.
### Tool configuration
You can enable, disable, or configure specific tools in your settings. For
example, you can set a specific pager for shell commands or configure the
browser used for web searches. See the [Settings](../cli/settings.md) guide for
details.
## Available tools
The following table lists all available tools, categorized by their primary
function.
| Category | Tool | Kind | Description |
| :---------- | :----------------------------------------------- | :------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| Execution | [`run_shell_command`](../tools/shell.md) | `Execute` | Executes arbitrary shell commands. Supports interactive sessions and background processes. Requires manual confirmation.
**Parameters:** `command`, `description`, `dir_path`, `is_background` |
| File System | [`glob`](../tools/file-system.md) | `Search` | Finds files matching specific glob patterns across the workspace.
**Parameters:** `pattern`, `dir_path`, `case_sensitive`, `respect_git_ignore`, `respect_gemini_ignore` |
| File System | [`grep_search`](../tools/file-system.md) | `Search` | Searches for a regular expression pattern within file contents. Legacy alias: `search_file_content`.
**Parameters:** `pattern`, `dir_path`, `include`, `exclude_pattern`, `names_only`, `max_matches_per_file`, `total_max_matches` |
| File System | [`list_directory`](../tools/file-system.md) | `Read` | Lists the names of files and subdirectories within a specified path.
**Parameters:** `dir_path`, `ignore`, `file_filtering_options` |
| File System | [`read_file`](../tools/file-system.md) | `Read` | Reads the content of a specific file. Supports text, images, audio, and PDF.
**Parameters:** `file_path`, `start_line`, `end_line` |
| File System | [`read_many_files`](../tools/file-system.md) | `Read` | Reads and concatenates content from multiple files. Often triggered by the `@` symbol in your prompt.
**Parameters:** `include`, `exclude`, `recursive`, `useDefaultExcludes`, `file_filtering_options` |
| File System | [`replace`](../tools/file-system.md) | `Edit` | Performs precise text replacement within a file. Requires manual confirmation.
**Parameters:** `file_path`, `instruction`, `old_string`, `new_string`, `allow_multiple` |
| File System | [`write_file`](../tools/file-system.md) | `Edit` | Creates or overwrites a file with new content. Requires manual confirmation.
**Parameters:** `file_path`, `content` |
| Interaction | [`ask_user`](../tools/ask-user.md) | `Communicate` | Requests clarification or missing information via an interactive dialog.
**Parameters:** `questions` |
| Interaction | [`write_todos`](../tools/todos.md) | `Other` | Maintains an internal list of subtasks. The model uses this to track its own progress and display it to you.
**Parameters:** `todos` |
| Memory | [`activate_skill`](../tools/activate-skill.md) | `Other` | Loads specialized procedural expertise for specific tasks from the `.gemini/skills` directory.
**Parameters:** `name` |
| Memory | [`get_internal_docs`](../tools/internal-docs.md) | `Think` | Accesses Gemini CLI's own documentation to provide more accurate answers about its capabilities.
**Parameters:** `path` |
| Memory | [`save_memory`](../tools/memory.md) | `Think` | Persists specific facts and project details to your `GEMINI.md` file to retain context.
**Parameters:** `fact` |
| Planning | [`enter_plan_mode`](../tools/planning.md) | `Plan` | Switches the CLI to a safe, read-only "Plan Mode" for researching complex changes.
**Parameters:** `reason` |
| Planning | [`exit_plan_mode`](../tools/planning.md) | `Plan` | Finalizes a plan, presents it for review, and requests approval to start implementation.
**Parameters:** `plan` |
| System | `complete_task` | `Other` | Finalizes a subagent's mission and returns the result to the parent agent. This tool is not available to the user.
**Parameters:** `result` |
| Web | [`google_web_search`](../tools/web-search.md) | `Search` | Performs a Google Search to find up-to-date information.
**Parameters:** `query` |
| Web | [`web_fetch`](../tools/web-fetch.md) | `Fetch` | Retrieves and processes content from specific URLs. **Warning:** This tool can access local and private network addresses (e.g., localhost), which may pose a security risk if used with untrusted prompts.
**Parameters:** `prompt` |
## Under the hood
For developers, the tool system is designed to be extensible and robust. The
`ToolRegistry` class manages all available tools.
You can extend Gemini CLI with custom tools by configuring
`tools.discoveryCommand` in your settings or by connecting to MCP servers.
> [!NOTE]
> For a deep dive into the internal Tool API and how to implement your
> own tools in the codebase, see the `packages/core/src/tools/` directory in
> GitHub.
## Next steps
- Learn how to [Set up an MCP server](../tools/mcp-server.md).
- Explore [Agent Skills](../cli/skills.md) for specialized expertise.
- See the [Command reference](./commands.md) for slash commands.
================================================
FILE: docs/release-confidence.md
================================================
# Release confidence strategy
This document outlines the strategy for gaining confidence in every release of
the Gemini CLI. It serves as a checklist and quality gate for release manager to
ensure we are shipping a high-quality product.
## The goal
To answer the question, "Is this release _truly_ ready for our users?" with a
high degree of confidence, based on a holistic evaluation of automated signals,
manual verification, and data.
## Level 1: Automated gates (must pass)
These are the baseline requirements. If any of these fail, the release is a
no-go.
### 1. CI/CD health
All workflows in `.github/workflows/ci.yml` must pass on the `main` branch (for
nightly) or the release branch (for preview/stable).
- **Platforms:** Tests must pass on **Linux and macOS**.
> [!NOTE]
> Windows tests currently run with `continue-on-error: true`. While a
> failure here doesn't block the release technically, it should be
> investigated.
- **Checks:**
- **Linting:** No linting errors (ESLint, Prettier, etc.).
- **Typechecking:** No TypeScript errors.
- **Unit Tests:** All unit tests in `packages/core` and `packages/cli` must
pass.
- **Build:** The project must build and bundle successfully.
### 2. End-to-end (E2E) tests
All workflows in `.github/workflows/chained_e2e.yml` must pass.
- **Platforms:** **Linux, macOS and Windows**.
- **Sandboxing:** Tests must pass with both `sandbox:none` and `sandbox:docker`
on Linux.
### 3. Post-deployment smoke tests
After a release is published to npm, the `smoke-test.yml` workflow runs. This
must pass to confirm the package is installable and the binary is executable.
- **Command:** `npx -y @google/gemini-cli@ --version` must return the
correct version without error.
- **Platform:** Currently runs on `ubuntu-latest`.
## Level 2: Manual verification and dogfooding
Automated tests cannot catch everything, especially UX issues.
### 1. Dogfooding via `preview` tag
The weekly release cadence promotes code from `main` -> `nightly` -> `preview`
-> `stable`.
- **Requirement:** The `preview` release must be used by maintainers for at
least **one week** before being promoted to `stable`.
- **Action:** Maintainers should install the preview version locally:
```bash
npm install -g @google/gemini-cli@preview
```
- **Goal:** To catch regressions and UX issues in day-to-day usage before they
reach the broad user base.
### 2. Critical user journey (CUJ) checklist
Before promoting a `preview` release to `stable`, a release manager must
manually run through this checklist.
- **Setup:**
- [ ] Uninstall any existing global version:
`npm uninstall -g @google/gemini-cli`
- [ ] Clear npx cache (optional but recommended): `npm cache clean --force`
- [ ] Install the preview version: `npm install -g @google/gemini-cli@preview`
- [ ] Verify version: `gemini --version`
- **Authentication:**
- [ ] In interactive mode run `/auth` and verify all sign in flows work:
- [ ] Sign in with Google
- [ ] API Key
- [ ] Vertex AI
- **Basic prompting:**
- [ ] Run `gemini "Tell me a joke"` and verify a sensible response.
- [ ] Run in interactive mode: `gemini`. Ask a follow-up question to test
context.
- **Piped input:**
- [ ] Run `echo "Summarize this" | gemini` and verify it processes stdin.
- **Context management:**
- [ ] In interactive mode, use `@file` to add a local file to context. Ask a
question about it.
- **Settings:**
- [ ] In interactive mode run `/settings` and make modifications
- [ ] Validate that setting is changed
- **Function calling:**
- [ ] In interactive mode, ask gemini to "create a file named hello.md with
the content 'hello world'" and verify the file is created correctly.
If any of these CUJs fail, the release is a no-go until a patch is applied to
the `preview` channel.
### 3. Pre-Launch bug bash (tier 1 and 2 launches)
For high-impact releases, an organized bug bash is required to ensure a higher
level of quality and to catch issues across a wider range of environments and
use cases.
**Definition of tiers:**
- **Tier 1:** Industry-Moving News 🚀
- **Tier 2:** Important News for Our Users 📣
- **Tier 3:** Relevant, but Not Life-Changing 💡
- **Tier 4:** Bug Fixes ⚒️
**Requirement:**
A bug bash must be scheduled at least **72 hours in advance** of any Tier 1 or
Tier 2 launch.
**Rule of thumb:**
A bug bash should be considered for any release that involves:
- A blog post
- Coordinated social media announcements
- Media relations or press outreach
- A "Turbo" launch event
## Level 3: Telemetry and data review
### Dashboard health
- [ ] Go to `go/gemini-cli-dash`.
- [ ] Navigate to the "Tool Call" tab.
- [ ] Validate that there are no spikes in errors for the release you would like
to promote.
### Model evaluation
- [ ] Navigate to `go/gemini-cli-offline-evals-dash`.
- [ ] Make sure that the release you want to promote's recurring run is within
average eval runs.
## The "go/no-go" decision
Before triggering the `Release: Promote` workflow to move `preview` to `stable`:
1. [ ] **Level 1:** CI and E2E workflows are green for the commit corresponding
to the current `preview` tag.
2. [ ] **Level 2:** The `preview` version has been out for one week, and the
CUJ checklist has been completed successfully by a release manager. No
blocking issues have been reported.
3. [ ] **Level 3:** Dashboard Health and Model Evaluation checks have been
completed and show no regressions.
If all checks pass, proceed with the promotion.
================================================
FILE: docs/releases.md
================================================
# Gemini CLI releases
## `dev` vs `prod` environment
Our release flows support both `dev` and `prod` environments.
The `dev` environment pushes to a private Github-hosted NPM repository, with the
package names beginning with `@google-gemini/**` instead of `@google/**`.
The `prod` environment pushes to the public global NPM registry via Wombat
Dressing Room, which is Google's system for managing NPM packages in the
`@google/**` namespace. The packages are all named `@google/**`.
More information can be found about these systems in the
[NPM Package Overview](npm.md)
### Package scopes
| Package | `prod` (Wombat Dressing Room) | `dev` (Github Private NPM Repo) |
| ---------- | ----------------------------- | ----------------------------------------- |
| CLI | @google/gemini-cli | @google-gemini/gemini-cli |
| Core | @google/gemini-cli-core | @google-gemini/gemini-cli-core A2A Server |
| A2A Server | @google/gemini-cli-a2a-server | @google-gemini/gemini-cli-a2a-server |
## Release cadence and tags
We will follow https://semver.org/ as closely as possible but will call out when
or if we have to deviate from it. Our weekly releases will be minor version
increments and any bug or hotfixes between releases will go out as patch
versions on the most recent release.
Each Tuesday ~20:00 UTC new Stable and Preview releases will be cut. The
promotion flow is:
- Code is committed to main and pushed each night to nightly
- After no more than 1 week on main, code is promoted to the `preview` channel
- After 1 week the most recent `preview` channel is promoted to `stable` channel
- Patch fixes will be produced against both `preview` and `stable` as needed,
with the final 'patch' version number incrementing each time.
### Preview
These releases will not have been fully vetted and may contain regressions or
other outstanding issues. Please help us test and install with `preview` tag.
```bash
npm install -g @google/gemini-cli@preview
```
### Stable
This will be the full promotion of last week's release + any bug fixes and
validations. Use `latest` tag.
```bash
npm install -g @google/gemini-cli@latest
```
### Nightly
- New releases will be published each day at UTC 00:00. This will be all changes
from the main branch as represented at time of release. It should be assumed
there are pending validations and issues. Use `nightly` tag.
```bash
npm install -g @google/gemini-cli@nightly
```
## Weekly release promotion
Each Tuesday, the on-call engineer will trigger the "Promote Release" workflow.
This single action automates the entire weekly release process:
1. **Promotes preview to stable:** The workflow identifies the latest `preview`
release and promotes it to `stable`. This becomes the new `latest` version
on npm.
2. **Promotes nightly to preview:** The latest `nightly` release is then
promoted to become the new `preview` version.
3. **Prepares for next nightly:** A pull request is automatically created and
merged to bump the version in `main` in preparation for the next nightly
release.
This process ensures a consistent and reliable release cadence with minimal
manual intervention.
### Source of truth for versioning
To ensure the highest reliability, the release promotion process uses the **NPM
registry as the single source of truth** for determining the current version of
each release channel (`stable`, `preview`, and `nightly`).
1. **Fetch from NPM:** The workflow begins by querying NPM's `dist-tags`
(`latest`, `preview`, `nightly`) to get the exact version strings for the
packages currently available to users.
2. **Cross-check for integrity:** For each version retrieved from NPM, the
workflow performs a critical integrity check:
- It verifies that a corresponding **git tag** exists in the repository.
- It verifies that a corresponding **GitHub release** has been created.
3. **Halt on discrepancy:** If either the git tag or the GitHub Release is
missing for a version listed on NPM, the workflow will immediately fail.
This strict check prevents promotions from a broken or incomplete previous
release and alerts the on-call engineer to a release state inconsistency
that must be manually resolved.
4. **Calculate next version:** Only after these checks pass does the workflow
proceed to calculate the next semantic version based on the trusted version
numbers retrieved from NPM.
This NPM-first approach, backed by integrity checks, makes the release process
highly robust and prevents the kinds of versioning discrepancies that can arise
from relying solely on git history or API outputs.
## Manual releases
For situations requiring a release outside of the regular nightly and weekly
promotion schedule, and NOT already covered by patching process, you can use the
`Release: Manual` workflow. This workflow provides a direct way to publish a
specific version from any branch, tag, or commit SHA.
### How to create a manual release
1. Navigate to the **Actions** tab of the repository.
2. Select the **Release: Manual** workflow from the list.
3. Click the **Run workflow** dropdown button.
4. Fill in the required inputs:
- **Version**: The exact version to release (e.g., `v0.6.1`). This must be a
valid semantic version with a `v` prefix.
- **Ref**: The branch, tag, or full commit SHA to release from.
- **NPM Channel**: The npm channel to publish to. The options are `preview`,
`nightly`, `latest` (for stable releases), and `dev`. The default is
`dev`.
- **Dry Run**: Leave as `true` to run all steps without publishing, or set
to `false` to perform a live release.
- **Force Skip Tests**: Set to `true` to skip the test suite. This is not
recommended for production releases.
- **Skip GitHub Release**: Set to `true` to skip creating a GitHub release
and create an npm release only.
- **Environment**: Select the appropriate environment. The `dev` environment
is intended for testing. The `prod` environment is intended for production
releases. `prod` is the default and will require authorization from a
release administrator.
5. Click **Run workflow**.
The workflow will then proceed to test (if not skipped), build, and publish the
release. If the workflow fails during a non-dry run, it will automatically
create a GitHub issue with the failure details.
## Rollback/rollforward
In the event that a release has a critical regression, you can quickly roll back
to a previous stable version or roll forward to a new patch by changing the npm
`dist-tag`. The `Release: Change Tags` workflow provides a safe and controlled
way to do this.
This is the preferred method for both rollbacks and rollforwards, as it does not
require a full release cycle.
### How to change a release tag
1. Navigate to the **Actions** tab of the repository.
2. Select the **Release: Change Tags** workflow from the list.
3. Click the **Run workflow** dropdown button.
4. Fill in the required inputs:
- **Version**: The existing package version that you want to point the tag
to (e.g., `0.5.0-preview-2`). This version **must** already be published
to the npm registry.
- **Channel**: The npm `dist-tag` to apply (e.g., `preview`, `stable`).
- **Dry Run**: Leave as `true` to log the action without making changes, or
set to `false` to perform the live tag change.
- **Environment**: Select the appropriate environment. The `dev` environment
is intended for testing. The `prod` environment is intended for production
releases. `prod` is the default and will require authorization from a
release administrator.
5. Click **Run workflow**.
The workflow will then run `npm dist-tag add` for the appropriate `gemini-cli`,
`gemini-cli-core` and `gemini-cli-a2a-server` packages, pointing the specified
channel to the specified version.
## Patching
If a critical bug that is already fixed on `main` needs to be patched on a
`stable` or `preview` release, the process is now highly automated.
### How to patch
#### 1. Create the patch pull request
There are two ways to create a patch pull request:
**Option A: From a GitHub comment (recommended)**
After a pull request containing the fix has been merged, a maintainer can add a
comment on that same PR with the following format:
`/patch [channel]`
- **channel** (optional):
- _no channel_ - patches both stable and preview channels (default,
recommended for most fixes)
- `both` - patches both stable and preview channels (same as default)
- `stable` - patches only the stable channel
- `preview` - patches only the preview channel
Examples:
- `/patch` (patches both stable and preview - default)
- `/patch both` (patches both stable and preview - explicit)
- `/patch stable` (patches only stable)
- `/patch preview` (patches only preview)
The `Release: Patch from Comment` workflow will automatically find the merge
commit SHA and trigger the `Release: Patch (1) Create PR` workflow. If the PR is
not yet merged, it will post a comment indicating the failure.
**Option B: Manually triggering the workflow**
Navigate to the **Actions** tab and run the **Release: Patch (1) Create PR**
workflow.
- **Commit**: The full SHA of the commit on `main` that you want to cherry-pick.
- **Channel**: The channel you want to patch (`stable` or `preview`).
This workflow will automatically:
1. Find the latest release tag for the channel.
2. Create a release branch from that tag if one doesn't exist (e.g.,
`release/v0.5.1-pr-12345`).
3. Create a new hotfix branch from the release branch.
4. Cherry-pick your specified commit into the hotfix branch.
5. Create a pull request from the hotfix branch back to the release branch.
#### 2. Review and merge
Review the automatically created pull request(s) to ensure the cherry-pick was
successful and the changes are correct. Once approved, merge the pull request.
> [!WARNING]
> The `release/*` branches are protected by branch protection
> rules. A pull request to one of these branches requires at least one review from
> a code owner before it can be merged. This ensures that no unauthorized code is
> released.
#### 2.5. Adding multiple commits to a hotfix (advanced)
If you need to include multiple fixes in a single patch release, you can add
additional commits to the hotfix branch after the initial patch PR has been
created:
1. **Start with the primary fix**: Use `/patch` (or `/patch both`) on the most
important PR to create the initial hotfix branch and PR.
2. **Checkout the hotfix branch locally**:
```bash
git fetch origin
git checkout hotfix/v0.5.1/stable/cherry-pick-abc1234 # Use the actual branch name from the PR
```
3. **Cherry-pick additional commits**:
```bash
git cherry-pick
git cherry-pick
# Add as many commits as needed
```
4. **Push the updated branch**:
```bash
git push origin hotfix/v0.5.1/stable/cherry-pick-abc1234
```
5. **Test and review**: The existing patch PR will automatically update with
your additional commits. Test thoroughly since you're now releasing multiple
changes together.
6. **Update the PR description**: Consider updating the PR title and description
to reflect that it includes multiple fixes.
This approach allows you to group related fixes into a single patch release
while maintaining full control over what gets included and how conflicts are
resolved.
#### 3. Automatic release
Upon merging the pull request, the `Release: Patch (2) Trigger` workflow is
automatically triggered. It will then start the `Release: Patch (3) Release`
workflow, which will:
1. Build and test the patched code.
2. Publish the new patch version to npm.
3. Create a new GitHub release with the patch notes.
This fully automated process ensures that patches are created and released
consistently and reliably.
#### Troubleshooting: Older branch workflows
**Issue**: If the patch trigger workflow fails with errors like "Resource not
accessible by integration" or references to non-existent workflow files (e.g.,
`patch-release.yml`), this indicates the hotfix branch contains an outdated
version of the workflow files.
**Root cause**: When a PR is merged, GitHub Actions runs the workflow definition
from the **source branch** (the hotfix branch), not from the target branch (the
release branch). If the hotfix branch was created from an older release branch
that predates workflow improvements, it will use the old workflow logic.
**Solutions**:
**Option 1: Manual trigger (quick fix)** Manually trigger the updated workflow
from the branch with the latest workflow code:
```bash
# For a preview channel patch with tests skipped
gh workflow run release-patch-2-trigger.yml --ref \
--field ref="hotfix/v0.6.0-preview.2/preview/cherry-pick-abc1234" \
--field workflow_ref= \
--field dry_run=false \
--field force_skip_tests=true
# For a stable channel patch
gh workflow run release-patch-2-trigger.yml --ref \
--field ref="hotfix/v0.5.1/stable/cherry-pick-abc1234" \
--field workflow_ref= \
--field dry_run=false \
--field force_skip_tests=false
# Example using main branch (most common case)
gh workflow run release-patch-2-trigger.yml --ref main \
--field ref="hotfix/v0.6.0-preview.2/preview/cherry-pick-abc1234" \
--field workflow_ref=main \
--field dry_run=false \
--field force_skip_tests=true
```
**Note**: Replace `` with the branch containing
the latest workflow improvements (usually `main`, but could be a feature branch
if testing updates).
**Option 2: Update the hotfix branch** Merge the latest main branch into your
hotfix branch to get the updated workflows:
```bash
git checkout hotfix/v0.6.0-preview.2/preview/cherry-pick-abc1234
git merge main
git push
```
Then close and reopen the PR to retrigger the workflow with the updated version.
**Option 3: Direct release trigger** Skip the trigger workflow entirely and
directly run the release workflow:
```bash
# Replace channel and release_ref with appropriate values
gh workflow run release-patch-3-release.yml --ref main \
--field type="preview" \
--field dry_run=false \
--field force_skip_tests=true \
--field release_ref="release/v0.6.0-preview.2"
```
### Docker
We also run a Google cloud build called
[release-docker.yml](../.gcp/release-docker.yml). Which publishes the sandbox
docker to match your release. This will also be moved to GH and combined with
the main release file once service account permissions are sorted out.
## Release validation
After pushing a new release smoke testing should be performed to ensure that the
packages are working as expected. This can be done by installing the packages
locally and running a set of tests to ensure that they are functioning
correctly.
- `npx -y @google/gemini-cli@latest --version` to validate the push worked as
expected if you were not doing a rc or dev tag
- `npx -y @google/gemini-cli@ --version` to validate the tag pushed
appropriately
- _This is destructive locally_
`npm uninstall @google/gemini-cli && npm uninstall -g @google/gemini-cli && npm cache clean --force && npm install @google/gemini-cli@`
- Smoke testing a basic run through of exercising a few llm commands and tools
is recommended to ensure that the packages are working as expected. We'll
codify this more in the future.
## Local testing and validation: Changes to the packaging and publishing process
If you need to test the release process without actually publishing to NPM or
creating a public GitHub release, you can trigger the workflow manually from the
GitHub UI.
1. Go to the
[Actions tab](https://github.com/google-gemini/gemini-cli/actions/workflows/release-manual.yml)
of the repository.
2. Click on the "Run workflow" dropdown.
3. Leave the `dry_run` option checked (`true`).
4. Click the "Run workflow" button.
This will run the entire release process but will skip the `npm publish` and
`gh release create` steps. You can inspect the workflow logs to ensure
everything is working as expected.
It is crucial to test any changes to the packaging and publishing process
locally before committing them. This ensures that the packages will be published
correctly and that they will work as expected when installed by a user.
To validate your changes, you can perform a dry run of the publishing process.
This will simulate the publishing process without actually publishing the
packages to the npm registry.
```bash
npm_package_version=9.9.9 SANDBOX_IMAGE_REGISTRY="registry" SANDBOX_IMAGE_NAME="thename" npm run publish:npm --dry-run
```
This command will do the following:
1. Build all the packages.
2. Run all the prepublish scripts.
3. Create the package tarballs that would be published to npm.
4. Print a summary of the packages that would be published.
You can then inspect the generated tarballs to ensure that they contain the
correct files and that the `package.json` files have been updated correctly. The
tarballs will be created in the root of each package's directory (e.g.,
`packages/cli/google-gemini-cli-0.1.6.tgz`).
By performing a dry run, you can be confident that your changes to the packaging
process are correct and that the packages will be published successfully.
## Release deep dive
The release process creates two distinct types of artifacts for different
distribution channels: standard packages for the NPM registry and a single,
self-contained executable for GitHub Releases.
Here are the key stages:
**Stage 1: Pre-release sanity checks and versioning**
- **What happens:** Before any files are moved, the process ensures the project
is in a good state. This involves running tests, linting, and type-checking
(`npm run preflight`). The version number in the root `package.json` and
`packages/cli/package.json` is updated to the new release version.
**Stage 2: Building the source code for NPM**
- **What happens:** The TypeScript source code in `packages/core/src` and
`packages/cli/src` is compiled into standard JavaScript.
- **File movement:**
- `packages/core/src/**/*.ts` -> compiled to -> `packages/core/dist/`
- `packages/cli/src/**/*.ts` -> compiled to -> `packages/cli/dist/`
- **Why:** The TypeScript code written during development needs to be converted
into plain JavaScript that can be run by Node.js. The `core` package is built
first as the `cli` package depends on it.
**Stage 3: Publishing standard packages to NPM**
- **What happens:** The `npm publish` command is run for the
`@google/gemini-cli-core` and `@google/gemini-cli` packages.
- **Why:** This publishes them as standard Node.js packages. Users installing
via `npm install -g @google/gemini-cli` will download these packages, and
`npm` will handle installing the `@google/gemini-cli-core` dependency
automatically. The code in these packages is not bundled into a single file.
**Stage 4: Assembling and creating the GitHub release asset**
This stage happens _after_ the NPM publish and creates the single-file
executable that enables `npx` usage directly from the GitHub repository.
1. **The JavaScript bundle is created:**
- **What happens:** The built JavaScript from both `packages/core/dist` and
`packages/cli/dist`, along with all third-party JavaScript dependencies,
are bundled by `esbuild` into a single, executable JavaScript file (e.g.,
`gemini.js`). The `node-pty` library is excluded from this bundle as it
contains native binaries.
- **Why:** This creates a single, optimized file that contains all the
necessary application code. It simplifies execution for users who want to
run the CLI without a full `npm install`, as all dependencies (including
the `core` package) are included directly.
2. **The `bundle` directory is assembled:**
- **What happens:** A temporary `bundle` folder is created at the project
root. The single `gemini.js` executable is placed inside it, along with
other essential files.
- **File movement:**
- `gemini.js` (from esbuild) -> `bundle/gemini.js`
- `README.md` -> `bundle/README.md`
- `LICENSE` -> `bundle/LICENSE`
- `packages/cli/src/utils/*.sb` (sandbox profiles) -> `bundle/`
- **Why:** This creates a clean, self-contained directory with everything
needed to run the CLI and understand its license and usage.
3. **The GitHub release is created:**
- **What happens:** The contents of the `bundle` directory, including the
`gemini.js` executable, are attached as assets to a new GitHub Release.
- **Why:** This makes the single-file version of the CLI available for
direct download and enables the
`npx https://github.com/google-gemini/gemini-cli` command, which downloads
and runs this specific bundled asset.
**Summary of artifacts**
- **NPM:** Publishes standard, un-bundled Node.js packages. The primary artifact
is the code in `packages/cli/dist`, which depends on
`@google/gemini-cli-core`.
- **GitHub release:** Publishes a single, bundled `gemini.js` file that contains
all dependencies, for easy execution via `npx`.
This dual-artifact process ensures that both traditional `npm` users and those
who prefer the convenience of `npx` have an optimized experience.
## Notifications
Failing release workflows will automatically create an issue with the label
`release-failure`.
A notification will be posted to the maintainer's chat channel when issues with
this type are created.
### Modifying chat notifications
Notifications use
[GitHub for Google Chat](https://workspace.google.com/marketplace/app/github_for_google_chat/536184076190).
To modify the notifications, use `/github-settings` within the chat space.
> [!WARNING]
> The following instructions describe a fragile workaround that depends on the
> internal structure of the chat application's UI. It is likely to break with
> future updates.
The list of available labels is not currently populated correctly. If you want
to add a label that does not appear alphabetically in the first 30 labels in the
repo, you must use your browser's developer tools to manually modify the UI:
1. Open your browser's developer tools (e.g., Chrome DevTools).
2. In the `/github-settings` dialog, inspect the list of labels.
3. Locate one of the `
` elements representing a label.
4. In the HTML, modify the `data-option-value` attribute of that `
` element
to the desired label name (e.g., `release-failure`).
5. Click on your modified label in the UI to select it, then save your settings.
================================================
FILE: docs/resources/faq.md
================================================
# Frequently asked questions (FAQ)
This page provides answers to common questions and solutions to frequent
problems encountered while using Gemini CLI.
## General issues
This section addresses common questions about Gemini CLI usage, security, and
troubleshooting general errors.
### Why can't I use third-party software (e.g. Claude Code, OpenClaw, OpenCode) with Gemini CLI?
Using third-party software, tools, or services to harvest or piggyback on Gemini
CLI's OAuth authentication to access our backend services is a direct violation
of our [applicable terms and policies](tos-privacy.md). Doing so bypasses our
intended authentication and security structures, and such actions may be grounds
for immediate suspension or termination of your account. If you would like to
use a third-party coding agent with Gemini, the supported and secure method is
to use a Vertex AI or Google AI Studio API key.
### Why am I getting an `API error: 429 - Resource exhausted`?
This error indicates that you have exceeded your API request limit. The Gemini
API has rate limits to prevent abuse and ensure fair usage.
To resolve this, you can:
- **Check your usage:** Review your API usage in the Google AI Studio or your
Google Cloud project dashboard.
- **Optimize your prompts:** If you are making many requests in a short period,
try to batch your prompts or introduce delays between requests.
- **Request a quota increase:** If you consistently need a higher limit, you can
request a quota increase from Google.
### Why am I getting an `ERR_REQUIRE_ESM` error when running `npm run start`?
This error typically occurs in Node.js projects when there is a mismatch between
CommonJS and ES Modules.
This is often due to a misconfiguration in your `package.json` or
`tsconfig.json`. Ensure that:
1. Your `package.json` has `"type": "module"`.
2. Your `tsconfig.json` has `"module": "NodeNext"` or a compatible setting in
the `compilerOptions`.
If the problem persists, try deleting your `node_modules` directory and
`package-lock.json` file, and then run `npm install` again.
### Why don't I see cached token counts in my stats output?
Cached token information is only displayed when cached tokens are being used.
This feature is available for API key users (Gemini API key or Google Cloud
Vertex AI) but not for OAuth users (such as Google Personal/Enterprise accounts
like Google Gmail or Google Workspace, respectively). This is because the Gemini
Code Assist API does not support cached content creation. You can still view
your total token usage using the `/stats` command in Gemini CLI.
## Installation and updates
### How do I check which version of Gemini CLI I'm currently running?
You can check your current Gemini CLI version using one of these methods:
- Run `gemini --version` or `gemini -v` from your terminal
- Check the globally installed version using your package manager:
- npm: `npm list -g @google/gemini-cli`
- pnpm: `pnpm list -g @google/gemini-cli`
- yarn: `yarn global list @google/gemini-cli`
- bun: `bun pm ls -g @google/gemini-cli`
- homebrew: `brew list --versions gemini-cli`
- Inside an active Gemini CLI session, use the `/about` command
### How do I update Gemini CLI to the latest version?
If you installed it globally via `npm`, update it using the command
`npm install -g @google/gemini-cli@latest`. If you compiled it from source, pull
the latest changes from the repository, and then rebuild using the command
`npm run build`.
## Platform-specific issues
### Why does the CLI crash on Windows when I run a command like `chmod +x`?
Commands like `chmod` are specific to Unix-like operating systems (Linux,
macOS). They are not available on Windows by default.
To resolve this, you can:
- **Use Windows-equivalent commands:** Instead of `chmod`, you can use `icacls`
to modify file permissions on Windows.
- **Use a compatibility layer:** Tools like Git Bash or Windows Subsystem for
Linux (WSL) provide a Unix-like environment on Windows where these commands
will work.
## Configuration
### How do I configure my `GOOGLE_CLOUD_PROJECT`?
You can configure your Google Cloud Project ID using an environment variable.
Set the `GOOGLE_CLOUD_PROJECT` environment variable in your shell:
**macOS/Linux**
```bash
export GOOGLE_CLOUD_PROJECT="your-project-id"
```
**Windows (PowerShell)**
```powershell
$env:GOOGLE_CLOUD_PROJECT="your-project-id"
```
To make this setting permanent, add this line to your shell's startup file
(e.g., `~/.bashrc`, `~/.zshrc`).
### What is the best way to store my API keys securely?
Exposing API keys in scripts or checking them into source control is a security
risk.
To store your API keys securely, you can:
- **Use a `.env` file:** Create a `.env` file in your project's `.gemini`
directory (`.gemini/.env`) and store your keys there. Gemini CLI will
automatically load these variables.
- **Use your system's keyring:** For the most secure storage, use your operating
system's secret management tool (like macOS Keychain, Windows Credential
Manager, or a secret manager on Linux). You can then have your scripts or
environment load the key from the secure storage at runtime.
### Where are the Gemini CLI configuration and settings files stored?
The Gemini CLI configuration is stored in two `settings.json` files:
1. In your home directory: `~/.gemini/settings.json`.
2. In your project's root directory: `./.gemini/settings.json`.
Refer to [Gemini CLI Configuration](../reference/configuration.md) for more
details.
## Google AI Pro/Ultra and subscription FAQs
### Where can I learn more about my Google AI Pro or Google AI Ultra subscription?
To learn more about your Google AI Pro or Google AI Ultra subscription, visit
**Manage subscription** in your [subscription settings](https://one.google.com).
### How do I know if I have higher limits for Google AI Pro or Ultra?
If you're subscribed to Google AI Pro or Ultra, you automatically have higher
limits to Gemini Code Assist and Gemini CLI. These are shared across Gemini CLI
and agent mode in the IDE. You can confirm you have higher limits by checking if
you are still subscribed to Google AI Pro or Ultra in your
[subscription settings](https://one.google.com).
### What is the privacy policy for using Gemini Code Assist or Gemini CLI if I've subscribed to Google AI Pro or Ultra?
To learn more about your privacy policy and terms of service governed by your
subscription, visit
[Gemini Code Assist: Terms of Service and Privacy Policies](https://developers.google.com/gemini-code-assist/resources/privacy-notices).
### I've upgraded to Google AI Pro or Ultra but it still says I am hitting quota limits. Is this a bug?
The higher limits in your Google AI Pro or Ultra subscription are for Gemini 2.5
across both Gemini 2.5 Pro and Flash. They are shared quota across Gemini CLI
and agent mode in Gemini Code Assist IDE extensions. You can learn more about
quota limits for Gemini CLI, Gemini Code Assist and agent mode in Gemini Code
Assist at
[Quotas and limits](https://developers.google.com/gemini-code-assist/resources/quotas).
### If I upgrade to higher limits for Gemini CLI and Gemini Code Assist by purchasing a Google AI Pro or Ultra subscription, will Gemini start using my data to improve its machine learning models?
Google does not use your data to improve Google's machine learning models if you
purchase a paid plan. Note: If you decide to remain on the free version of
Gemini Code Assist, Gemini Code Assist for individuals, you can also opt out of
using your data to improve Google's machine learning models. See the
[Gemini Code Assist for individuals privacy notice](https://developers.google.com/gemini-code-assist/resources/privacy-notice-gemini-code-assist-individuals)
for more information.
## Not seeing your question?
Search the
[Gemini CLI Q&A discussions on GitHub](https://github.com/google-gemini/gemini-cli/discussions/categories/q-a)
or
[start a new discussion on GitHub](https://github.com/google-gemini/gemini-cli/discussions/new?category=q-a)
================================================
FILE: docs/resources/quota-and-pricing.md
================================================
# Gemini CLI: Quotas and pricing
Gemini CLI offers a generous free tier that covers many individual developers'
use cases. For enterprise or professional usage, or if you need increased quota,
several options are available depending on your authentication account type.
For a high-level comparison of available subscriptions and to select the right
quota for your needs, see the [Plans page](https://geminicli.com/plans/).
## Overview
This article outlines the specific quotas and pricing applicable to Gemini CLI
when using different authentication methods.
Generally, there are three categories to choose from:
- Free Usage: Ideal for experimentation and light use.
- Paid Tier (fixed price): For individual developers or enterprises who need
more generous daily quotas and predictable costs.
- Pay-As-You-Go: The most flexible option for professional use, long-running
tasks, or when you need full control over your usage.
## Free usage
Access to Gemini CLI begins with a generous free tier, perfect for
experimentation and light use.
Your free usage is governed by the following limits, which depend on your
authorization type.
### Log in with Google (Gemini Code Assist for individuals)
For users who authenticate by using their Google account to access Gemini Code
Assist for individuals. This includes:
- 1000 model requests / user / day
- 60 model requests / user / minute
- Model requests will be made across the Gemini model family as determined by
Gemini CLI.
Learn more at
[Gemini Code Assist for Individuals Limits](https://developers.google.com/gemini-code-assist/resources/quotas#quotas-for-agent-mode-gemini-cli).
### Log in with Gemini API Key (unpaid)
If you are using a Gemini API key, you can also benefit from a free tier. This
includes:
- 250 model requests / user / day
- 10 model requests / user / minute
- Model requests to Flash model only.
Learn more at
[Gemini API Rate Limits](https://ai.google.dev/gemini-api/docs/rate-limits).
### Log in with Vertex AI (Express Mode)
Vertex AI offers an Express Mode without the need to enable billing. This
includes:
- 90 days before you need to enable billing.
- Quotas and models are variable and specific to your account.
Learn more at
[Vertex AI Express Mode Limits](https://cloud.google.com/vertex-ai/generative-ai/docs/start/express-mode/overview#quotas).
## Paid tier: Higher limits for a fixed cost
If you use up your initial number of requests, you can continue to benefit from
Gemini CLI by upgrading to one of the following subscriptions:
### Individuals
These tiers apply when you sign in with a personal account. To verify whether
you're on a personal account, visit
[Google One](https://one.google.com/about/plans?hl=en-US&g1_landing_page=0):
- If you are on a personal account, you will see your personal dashboard.
- If you are not on a personal account, you will see: "You're currently signed
in to your Google Workspace Account."
**Supported tiers:** _- Tiers not listed above, including Google AI Plus, are
not supported._
- [Google AI Pro and AI Ultra](https://gemini.google/subscriptions/). This is
recommended for individual developers. Quotas and pricing are based on a fixed
price subscription.
For predictable costs, you can log in with Google.
Learn more at
[Gemini Code Assist Quotas and Limits](https://developers.google.com/gemini-code-assist/resources/quotas)
### Through your organization
These tiers are applicable when you are signing in with a Google Workspace
account.
- To verify your account type, visit
[the Google One page](https://one.google.com/about/plans?hl=en-US&g1_landing_page=0).
- You are on a workspace account if you see the message "You're currently signed
in to your Google Workspace Account".
**Supported tiers:** _- Tiers not listed above, including Workspace AI
Standard/Plus and AI Expanded, are not supported._
- [Workspace AI Ultra Access](https://workspace.google.com/products/ai-ultra/).
- [Purchase a Gemini Code Assist Subscription through Google Cloud](https://cloud.google.com/gemini/docs/codeassist/overview).
Quotas and pricing are based on a fixed price subscription with assigned
license seats. For predictable costs, you can sign in with Google.
This includes the following request limits:
- Gemini Code Assist Standard edition:
- 1500 model requests / user / day
- 120 model requests / user / minute
- Gemini Code Assist Enterprise edition:
- 2000 model requests / user / day
- 120 model requests / user / minute
- Model requests will be made across the Gemini model family as determined by
Gemini CLI.
[Learn more about Gemini Code Assist license limits](https://developers.google.com/gemini-code-assist/resources/quotas#quotas-for-agent-mode-gemini-cli).
## Pay as you go
If you hit your daily request limits or exhaust your Gemini Pro quota even after
upgrading, the most flexible solution is to switch to a pay-as-you-go model,
where you pay for the specific amount of processing you use. This is the
recommended path for uninterrupted access.
To do this, log in using a Gemini API key or Vertex AI.
### Vertex AI (regular mode)
An enterprise-grade platform for building, deploying, and managing AI models,
including Gemini. It offers enhanced security, data governance, and integration
with other Google Cloud services.
- Quota: Governed by a dynamic shared quota system or pre-purchased provisioned
throughput.
- Cost: Based on model and token usage.
Learn more at
[Vertex AI Dynamic Shared Quota](https://cloud.google.com/vertex-ai/generative-ai/docs/resources/dynamic-shared-quota)
and [Vertex AI Pricing](https://cloud.google.com/vertex-ai/pricing).
### Gemini API key
Ideal for developers who want to quickly build applications with the Gemini
models. This is the most direct way to use the models.
- Quota: Varies by pricing tier.
- Cost: Varies by pricing tier and model/token usage.
Learn more at
[Gemini API Rate Limits](https://ai.google.dev/gemini-api/docs/rate-limits),
[Gemini API Pricing](https://ai.google.dev/gemini-api/docs/pricing)
It’s important to highlight that when using an API key, you pay per token/call.
This can be more expensive for many small calls with few tokens, but it's the
only way to ensure your workflow isn't interrupted by reaching a limit on your
quota.
## Gemini for workspace plans
These plans currently apply only to the use of Gemini web-based products
provided by Google-based experiences (for example, the Gemini web app or the
Flow video editor). These plans do not apply to the API usage which powers the
Gemini CLI. Supporting these plans is under active consideration for future
support.
## Check usage and limits
You can check your current token usage and applicable limits using the
`/stats model` command. This command provides a snapshot of your current
session's token usage, as well as information about the limits associated with
your current quota.
For more information on the `/stats` command and its subcommands, see the
[Command Reference](../reference/commands.md#stats).
A summary of model usage is also presented on exit at the end of a session.
## Tips to avoid high costs
When using a pay-as-you-go plan, be mindful of your usage to avoid unexpected
costs.
- **Be selective with suggestions**: Before accepting a suggestion, especially
for a computationally intensive task like refactoring a large codebase,
consider if it's the most cost-effective approach.
- **Use precise prompts**: You are paying per call, so think about the most
efficient way to get your desired result. A well-crafted prompt can often get
you the answer you need in a single call, rather than multiple back-and-forth
interactions.
- **Monitor your usage**: Use the `/stats model` command to track your token
usage during a session. This can help you stay aware of your spending in real
time.
================================================
FILE: docs/resources/tos-privacy.md
================================================
# Gemini CLI: License, Terms of Service, and Privacy Notices
Gemini CLI is an open-source tool that lets you interact with Google's powerful
AI services directly from your command-line interface. The Gemini CLI software
is licensed under the
[Apache 2.0 license](https://github.com/google-gemini/gemini-cli/blob/main/LICENSE).
When you use Gemini CLI to access or use Google’s services, the Terms of Service
and Privacy Notices applicable to those services apply to such access and use.
Directly accessing the services powering Gemini CLI (e.g., the Gemini Code
Assist service) using third-party software, tools, or services (for example,
using OpenClaw with Gemini CLI OAuth) is a violation of applicable terms and
policies. Such actions may be grounds for suspension or termination of your
account.
Your Gemini CLI Usage Statistics are handled in accordance with Google's Privacy
Policy.
> [!NOTE]
> See [quotas and pricing](quota-and-pricing.md) for the quota and
> pricing details that apply to your usage of the Gemini CLI.
## Supported authentication methods
Your authentication method refers to the method you use to log into and access
Google’s services with Gemini CLI. Supported authentication methods include:
- Logging in with your Google account to Gemini Code Assist.
- Using an API key with Gemini Developer API.
- Using an API key with Vertex AI GenAI API.
The Terms of Service and Privacy Notices applicable to the aforementioned Google
services are set forth in the table below.
If you log in with your Google account and you do not already have a Gemini Code
Assist account associated with your Google account, you will be directed to the
sign up flow for Gemini Code Assist for individuals. If your Google account is
managed by your organization, your administrator may not permit access to Gemini
Code Assist for individuals. Please see the
[Gemini Code Assist for individuals FAQs](https://developers.google.com/gemini-code-assist/resources/faqs)
for further information.
| Authentication Method | Service(s) | Terms of Service | Privacy Notice |
| :----------------------- | :--------------------------- | :------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------- |
| Google Account | Gemini Code Assist services | [Terms of Service](https://developers.google.com/gemini-code-assist/resources/privacy-notices) | [Privacy Notices](https://developers.google.com/gemini-code-assist/resources/privacy-notices) |
| Gemini Developer API Key | Gemini API - Unpaid Services | [Gemini API Terms of Service - Unpaid Services](https://ai.google.dev/gemini-api/terms#unpaid-services) | [Google Privacy Policy](https://policies.google.com/privacy) |
| Gemini Developer API Key | Gemini API - Paid Services | [Gemini API Terms of Service - Paid Services](https://ai.google.dev/gemini-api/terms#paid-services) | [Google Privacy Policy](https://policies.google.com/privacy) |
| Vertex AI GenAI API Key | Vertex AI GenAI API | [Google Cloud Platform Terms of Service](https://cloud.google.com/terms/service-terms/) | [Google Cloud Privacy Notice](https://cloud.google.com/terms/cloud-privacy-notice) |
## 1. If you have signed in with your Google account to Gemini Code Assist
For users who use their Google account to access
[Gemini Code Assist](https://codeassist.google), these Terms of Service and
Privacy Notice documents apply:
- Gemini Code Assist for individuals:
[Google Terms of Service](https://policies.google.com/terms) and
[Gemini Code Assist for individuals Privacy Notice](https://developers.google.com/gemini-code-assist/resources/privacy-notice-gemini-code-assist-individuals).
- Gemini Code Assist with Google AI Pro or Ultra subscription:
[Google Terms of Service](https://policies.google.com/terms),
[Google One Additional Terms of Service](https://one.google.com/terms-of-service)
and [Google Privacy Policy\*](https://policies.google.com/privacy).
- Gemini Code Assist Standard and Enterprise editions:
[Google Cloud Platform Terms of Service](https://cloud.google.com/terms) and
[Google Cloud Privacy Notice](https://cloud.google.com/terms/cloud-privacy-notice).
_\* If your account is also associated with an active subscription to Gemini
Code Assist Standard or Enterprise edition, the terms and privacy policy of
Gemini Code Assist Standard or Enterprise edition will apply to all your use of
Gemini Code Assist._
## 2. If you have signed in with a Gemini API key to the Gemini Developer API
If you are using a Gemini API key for authentication with the
[Gemini Developer API](https://ai.google.dev/gemini-api/docs), these Terms of
Service and Privacy Notice documents apply:
- Terms of Service: Your use of the Gemini CLI is governed by the
[Gemini API Terms of Service](https://ai.google.dev/gemini-api/terms). These
terms may differ depending on whether you are using an unpaid or paid service:
- For unpaid services, refer to the
[Gemini API Terms of Service - Unpaid Services](https://ai.google.dev/gemini-api/terms#unpaid-services).
- For paid services, refer to the
[Gemini API Terms of Service - Paid Services](https://ai.google.dev/gemini-api/terms#paid-services).
- Privacy Notice: The collection and use of your data is described in the
[Google Privacy Policy](https://policies.google.com/privacy).
## 3. If you have signed in with a Gemini API key to the Vertex AI GenAI API
If you are using a Gemini API key for authentication with a
[Vertex AI GenAI API](https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest)
backend, these Terms of Service and Privacy Notice documents apply:
- Terms of Service: Your use of the Gemini CLI is governed by the
[Google Cloud Platform Service Terms](https://cloud.google.com/terms/service-terms/).
- Privacy Notice: The collection and use of your data is described in the
[Google Cloud Privacy Notice](https://cloud.google.com/terms/cloud-privacy-notice).
## Usage statistics opt-out
You may opt-out from sending Gemini CLI Usage Statistics to Google by following
the instructions available here:
[Usage Statistics Configuration](https://github.com/google-gemini/gemini-cli/blob/main/docs/reference/configuration.md#usage-statistics).
================================================
FILE: docs/resources/troubleshooting.md
================================================
# Troubleshooting guide
This guide provides solutions to common issues and debugging tips, including
topics on:
- Authentication or login errors
- Frequently asked questions (FAQs)
- Debugging tips
- Existing GitHub Issues similar to yours or creating new Issues
## Authentication or login errors
- **Error:
`You must be a named user on your organization's Gemini Code Assist Standard edition subscription to use this service. Please contact your administrator to request an entitlement to Gemini Code Assist Standard edition.`**
- **Cause:** This error might occur if Gemini CLI detects the
`GOOGLE_CLOUD_PROJECT` or `GOOGLE_CLOUD_PROJECT_ID` environment variable is
defined. Setting these variables forces an organization subscription check.
This might be an issue if you are using an individual Google account not
linked to an organizational subscription.
- **Solution:**
- **Individual Users:** Unset the `GOOGLE_CLOUD_PROJECT` and
`GOOGLE_CLOUD_PROJECT_ID` environment variables. Check and remove these
variables from your shell configuration files (for example, `.bashrc`,
`.zshrc`) and any `.env` files. If this doesn't resolve the issue, try
using a different Google account.
- **Organizational Users:** Contact your Google Cloud administrator to be
added to your organization's Gemini Code Assist subscription.
- **Error:
`Failed to sign in. Message: Your current account is not eligible... because it is not currently available in your location.`**
- **Cause:** Gemini CLI does not currently support your location. For a full
list of supported locations, see the following pages:
- Gemini Code Assist for individuals:
[Available locations](https://developers.google.com/gemini-code-assist/resources/available-locations#americas)
- **Error: `Failed to sign in. Message: Request contains an invalid argument`**
- **Cause:** Users with Google Workspace accounts or Google Cloud accounts
associated with their Gmail accounts may not be able to activate the free
tier of the Google Code Assist plan.
- **Solution:** For Google Cloud accounts, you can work around this by setting
`GOOGLE_CLOUD_PROJECT` to your project ID. Alternatively, you can obtain the
Gemini API key from
[Google AI Studio](http://aistudio.google.com/app/apikey), which also
includes a separate free tier.
- **Error: `UNABLE_TO_GET_ISSUER_CERT_LOCALLY` or
`unable to get local issuer certificate`**
- **Cause:** You may be on a corporate network with a firewall that intercepts
and inspects SSL/TLS traffic. This often requires a custom root CA
certificate to be trusted by Node.js.
- **Solution:** First try setting `NODE_USE_SYSTEM_CA`; if that does not
resolve the issue, set `NODE_EXTRA_CA_CERTS`.
- Set the `NODE_USE_SYSTEM_CA=1` environment variable to tell Node.js to use
the operating system's native certificate store (where corporate
certificates are typically already installed).
- Example: `export NODE_USE_SYSTEM_CA=1` (Windows PowerShell:
`$env:NODE_USE_SYSTEM_CA=1`)
- Set the `NODE_EXTRA_CA_CERTS` environment variable to the absolute path of
your corporate root CA certificate file.
- Example: `export NODE_EXTRA_CA_CERTS=/path/to/your/corporate-ca.crt`
(Windows PowerShell:
`$env:NODE_EXTRA_CA_CERTS="C:\path\to\your\corporate-ca.crt"`)
## Common error messages and solutions
- **Error: `EADDRINUSE` (Address already in use) when starting an MCP server.**
- **Cause:** Another process is already using the port that the MCP server is
trying to bind to.
- **Solution:** Either stop the other process that is using the port or
configure the MCP server to use a different port.
- **Error: Command not found (when attempting to run Gemini CLI with
`gemini`).**
- **Cause:** Gemini CLI is not correctly installed or it is not in your
system's `PATH`.
- **Solution:** The update depends on how you installed Gemini CLI:
- If you installed `gemini` globally, check that your `npm` global binary
directory is in your `PATH`. You can update Gemini CLI using the command
`npm install -g @google/gemini-cli@latest`.
- If you are running `gemini` from source, ensure you are using the correct
command to invoke it (e.g., `node packages/cli/dist/index.js ...`). To
update Gemini CLI, pull the latest changes from the repository, and then
rebuild using the command `npm run build`.
- **Error: `MODULE_NOT_FOUND` or import errors.**
- **Cause:** Dependencies are not installed correctly, or the project hasn't
been built.
- **Solution:**
1. Run `npm install` to ensure all dependencies are present.
2. Run `npm run build` to compile the project.
3. Verify that the build completed successfully with `npm run start`.
- **Error: "Operation not permitted", "Permission denied", or similar.**
- **Cause:** When sandboxing is enabled, Gemini CLI may attempt operations
that are restricted by your sandbox configuration, such as writing outside
the project directory or system temp directory.
- **Solution:** Refer to the [Configuration: Sandboxing](../cli/sandbox.md)
documentation for more information, including how to customize your sandbox
configuration.
- **Gemini CLI is not running in interactive mode in "CI" environments**
- **Issue:** The Gemini CLI does not enter interactive mode (no prompt
appears) if an environment variable starting with `CI_` (e.g., `CI_TOKEN`)
is set. This is because the `is-in-ci` package, used by the underlying UI
framework, detects these variables and assumes a non-interactive CI
environment.
- **Cause:** The `is-in-ci` package checks for the presence of `CI`,
`CONTINUOUS_INTEGRATION`, or any environment variable with a `CI_` prefix.
When any of these are found, it signals that the environment is
non-interactive, which prevents the Gemini CLI from starting in its
interactive mode.
- **Solution:** If the `CI_` prefixed variable is not needed for the CLI to
function, you can temporarily unset it for the command. e.g.,
`env -u CI_TOKEN gemini`
- **DEBUG mode not working from project .env file**
- **Issue:** Setting `DEBUG=true` in a project's `.env` file doesn't enable
debug mode for gemini-cli.
- **Cause:** The `DEBUG` and `DEBUG_MODE` variables are automatically excluded
from project `.env` files to prevent interference with gemini-cli behavior.
- **Solution:** Use a `.gemini/.env` file instead, or configure the
`advanced.excludedEnvVars` setting in your `settings.json` to exclude fewer
variables.
- **Warning: `npm WARN deprecated node-domexception@1.0.0` or
`npm WARN deprecated glob` during install/update**
- **Issue:** When installing or updating the Gemini CLI globally via
`npm install -g @google/gemini-cli` or `npm update -g @google/gemini-cli`,
you might see deprecation warnings regarding `node-domexception` or old
versions of `glob`.
- **Cause:** These warnings occur because some dependencies (or their
sub-dependencies, like `google-auth-library`) rely on older package
versions. Since Gemini CLI requires Node.js 20 or higher, the platform's
native features (like the native `DOMException`) are used, making these
warnings purely informational.
- **Solution:** These warnings are harmless and can be safely ignored. Your
installation or update will complete successfully and function properly
without any action required.
## Exit codes
The Gemini CLI uses specific exit codes to indicate the reason for termination.
This is especially useful for scripting and automation.
| Exit Code | Error Type | Description |
| --------- | -------------------------- | --------------------------------------------------------------------------------------------------- |
| 41 | `FatalAuthenticationError` | An error occurred during the authentication process. |
| 42 | `FatalInputError` | Invalid or missing input was provided to the CLI. (non-interactive mode only) |
| 44 | `FatalSandboxError` | An error occurred with the sandboxing environment (e.g., Docker, Podman, or Seatbelt). |
| 52 | `FatalConfigError` | A configuration file (`settings.json`) is invalid or contains errors. |
| 53 | `FatalTurnLimitedError` | The maximum number of conversational turns for the session was reached. (non-interactive mode only) |
## Debugging tips
- **CLI debugging:**
- Use the `--debug` flag for more detailed output. In interactive mode, press
F12 to view the debug console.
- Check the CLI logs, often found in a user-specific configuration or cache
directory.
- **Core debugging:**
- Check the server console output for error messages or stack traces.
- Increase log verbosity if configurable. For example, set the `DEBUG_MODE`
environment variable to `true` or `1`.
- Use Node.js debugging tools (e.g., `node --inspect`) if you need to step
through server-side code.
- **Tool issues:**
- If a specific tool is failing, try to isolate the issue by running the
simplest possible version of the command or operation the tool performs.
- For `run_shell_command`, check that the command works directly in your shell
first.
- For _file system tools_, verify that paths are correct and check the
permissions.
- **Pre-flight checks:**
- Always run `npm run preflight` before committing code. This can catch many
common issues related to formatting, linting, and type errors.
## Existing GitHub issues similar to yours or creating new issues
If you encounter an issue that was not covered here in this _Troubleshooting
guide_, consider searching the Gemini CLI
[Issue tracker on GitHub](https://github.com/google-gemini/gemini-cli/issues).
If you can't find an issue similar to yours, consider creating a new GitHub
Issue with a detailed description. Pull requests are also welcome!
> [!NOTE]
> Issues tagged as "🔒Maintainers only" are reserved for project
> maintainers. We will not accept pull requests related to these issues.
================================================
FILE: docs/resources/uninstall.md
================================================
# Uninstalling the CLI
Your uninstall method depends on how you ran the CLI. Follow the instructions
for either npx or a global npm installation.
## Method 1: Using npx
npx runs packages from a temporary cache without a permanent installation. To
"uninstall" the CLI, you must clear this cache, which will remove gemini-cli and
any other packages previously executed with npx.
The npx cache is a directory named `_npx` inside your main npm cache folder. You
can find your npm cache path by running `npm config get cache`.
**For macOS / Linux**
```bash
# The path is typically ~/.npm/_npx
rm -rf "$(npm config get cache)/_npx"
```
**For Windows (PowerShell)**
```powershell
# The path is typically $env:LocalAppData\npm-cache\_npx
Remove-Item -Path (Join-Path $env:LocalAppData "npm-cache\_npx") -Recurse -Force
```
## Method 2: Using npm (global install)
If you installed the CLI globally (e.g., `npm install -g @google/gemini-cli`),
use the `npm uninstall` command with the `-g` flag to remove it.
```bash
npm uninstall -g @google/gemini-cli
```
This command completely removes the package from your system.
## Method 3: Homebrew
If you installed the CLI globally using Homebrew (e.g.,
`brew install gemini-cli`), use the `brew uninstall` command to remove it.
```bash
brew uninstall gemini-cli
```
## Method 4: MacPorts
If you installed the CLI globally using MacPorts (e.g.,
`sudo port install gemini-cli`), use the `port uninstall` command to remove it.
```bash
sudo port uninstall gemini-cli
```
================================================
FILE: docs/sidebar.json
================================================
[
{
"label": "docs_tab",
"items": [
{
"label": "Get started",
"items": [
{ "label": "Overview", "slug": "docs" },
{ "label": "Quickstart", "slug": "docs/get-started" },
{ "label": "Installation", "slug": "docs/get-started/installation" },
{
"label": "Authentication",
"slug": "docs/get-started/authentication"
},
{ "label": "Examples", "slug": "docs/get-started/examples" },
{ "label": "CLI cheatsheet", "slug": "docs/cli/cli-reference" },
{
"label": "Gemini 3 on Gemini CLI",
"slug": "docs/get-started/gemini-3"
}
]
},
{
"label": "Use Gemini CLI",
"items": [
{
"label": "File management",
"slug": "docs/cli/tutorials/file-management"
},
{
"label": "Get started with Agent skills",
"slug": "docs/cli/tutorials/skills-getting-started"
},
{
"label": "Manage context and memory",
"slug": "docs/cli/tutorials/memory-management"
},
{
"label": "Execute shell commands",
"slug": "docs/cli/tutorials/shell-commands"
},
{
"label": "Manage sessions and history",
"slug": "docs/cli/tutorials/session-management"
},
{
"label": "Plan tasks with todos",
"slug": "docs/cli/tutorials/task-planning"
},
{
"label": "Use Plan Mode with model steering",
"badge": "🔬",
"slug": "docs/cli/tutorials/plan-mode-steering"
},
{
"label": "Web search and fetch",
"slug": "docs/cli/tutorials/web-tools"
},
{
"label": "Set up an MCP server",
"slug": "docs/cli/tutorials/mcp-setup"
},
{ "label": "Automate tasks", "slug": "docs/cli/tutorials/automation" }
]
},
{
"label": "Features",
"items": [
{
"label": "Extensions",
"collapsed": true,
"items": [
{
"label": "Overview",
"slug": "docs/extensions"
},
{
"label": "User guide: Install and manage",
"link": "/docs/extensions/#manage-extensions"
},
{
"label": "Developer guide: Build extensions",
"slug": "docs/extensions/writing-extensions"
},
{
"label": "Developer guide: Best practices",
"slug": "docs/extensions/best-practices"
},
{
"label": "Developer guide: Releasing",
"slug": "docs/extensions/releasing"
},
{
"label": "Developer guide: Reference",
"slug": "docs/extensions/reference"
}
]
},
{ "label": "Agent Skills", "slug": "docs/cli/skills" },
{ "label": "Checkpointing", "slug": "docs/cli/checkpointing" },
{ "label": "Headless mode", "slug": "docs/cli/headless" },
{
"label": "Hooks",
"collapsed": true,
"items": [
{ "label": "Overview", "slug": "docs/hooks" },
{ "label": "Reference", "slug": "docs/hooks/reference" }
]
},
{ "label": "IDE integration", "slug": "docs/ide-integration" },
{ "label": "MCP servers", "slug": "docs/tools/mcp-server" },
{ "label": "Model routing", "slug": "docs/cli/model-routing" },
{ "label": "Model selection", "slug": "docs/cli/model" },
{
"label": "Model steering",
"badge": "🔬",
"slug": "docs/cli/model-steering"
},
{
"label": "Notifications",
"badge": "🔬",
"slug": "docs/cli/notifications"
},
{ "label": "Plan mode", "slug": "docs/cli/plan-mode" },
{
"label": "Subagents",
"badge": "🔬",
"slug": "docs/core/subagents"
},
{
"label": "Remote subagents",
"badge": "🔬",
"slug": "docs/core/remote-agents"
},
{ "label": "Rewind", "slug": "docs/cli/rewind" },
{ "label": "Sandboxing", "slug": "docs/cli/sandbox" },
{ "label": "Settings", "slug": "docs/cli/settings" },
{ "label": "Telemetry", "slug": "docs/cli/telemetry" },
{ "label": "Token caching", "slug": "docs/cli/token-caching" }
]
},
{
"label": "Configuration",
"items": [
{ "label": "Custom commands", "slug": "docs/cli/custom-commands" },
{
"label": "Enterprise configuration",
"slug": "docs/cli/enterprise"
},
{
"label": "Ignore files (.geminiignore)",
"slug": "docs/cli/gemini-ignore"
},
{
"label": "Model configuration",
"slug": "docs/cli/generation-settings"
},
{
"label": "Project context (GEMINI.md)",
"slug": "docs/cli/gemini-md"
},
{ "label": "Settings", "slug": "docs/cli/settings" },
{
"label": "System prompt override",
"slug": "docs/cli/system-prompt"
},
{ "label": "Themes", "slug": "docs/cli/themes" },
{ "label": "Trusted folders", "slug": "docs/cli/trusted-folders" }
]
},
{
"label": "Development",
"items": [
{ "label": "Contribution guide", "slug": "docs/contributing" },
{ "label": "Integration testing", "slug": "docs/integration-tests" },
{
"label": "Issue and PR automation",
"slug": "docs/issue-and-pr-automation"
},
{ "label": "Local development", "slug": "docs/local-development" },
{ "label": "NPM package structure", "slug": "docs/npm" }
]
}
]
},
{
"label": "reference_tab",
"items": [
{
"label": "Reference",
"items": [
{ "label": "Command reference", "slug": "docs/reference/commands" },
{
"label": "Configuration reference",
"slug": "docs/reference/configuration"
},
{
"label": "Keyboard shortcuts",
"slug": "docs/reference/keyboard-shortcuts"
},
{
"label": "Memory import processor",
"slug": "docs/reference/memport"
},
{ "label": "Policy engine", "slug": "docs/reference/policy-engine" },
{ "label": "Tools reference", "slug": "docs/reference/tools" }
]
}
]
},
{
"label": "resources_tab",
"items": [
{
"label": "Resources",
"items": [
{ "label": "FAQ", "slug": "docs/resources/faq" },
{
"label": "Quota and pricing",
"slug": "docs/resources/quota-and-pricing"
},
{
"label": "Terms and privacy",
"slug": "docs/resources/tos-privacy"
},
{
"label": "Troubleshooting",
"slug": "docs/resources/troubleshooting"
},
{ "label": "Uninstall", "slug": "docs/resources/uninstall" }
]
}
]
},
{
"label": "releases_tab",
"items": [
{
"label": "Releases",
"items": [
{ "label": "Release notes", "slug": "docs/changelogs/" },
{ "label": "Stable release", "slug": "docs/changelogs/latest" },
{ "label": "Preview release", "slug": "docs/changelogs/preview" }
]
}
]
}
]
================================================
FILE: docs/tools/activate-skill.md
================================================
# Activate skill tool (`activate_skill`)
The `activate_skill` tool lets Gemini CLI load specialized procedural expertise
and resources when they are relevant to your request.
## Description
Skills are packages of instructions and tools designed for specific engineering
tasks, such as reviewing code or creating pull requests. Gemini CLI uses this
tool to "activate" a skill, which provides it with detailed guidelines and
specialized tools tailored to that task.
### Arguments
`activate_skill` takes one argument:
- `name` (enum, required): The name of the skill to activate (for example,
`code-reviewer`, `pr-creator`, or `docs-writer`).
## Usage
The `activate_skill` tool is used exclusively by the Gemini agent. You cannot
invoke this tool manually.
When the agent identifies that a task matches a discovered skill, it requests to
activate that skill. Once activated, the agent's behavior is guided by the
skill's specific instructions until the task is complete.
## Behavior
The agent uses this tool to provide professional-grade assistance:
- **Specialized logic:** Skills contain expert-level procedures for complex
workflows.
- **Dynamic capability:** Activating a skill can grant the agent access to new,
task-specific tools.
- **Contextual awareness:** Skills help the agent focus on the most relevant
standards and conventions for a particular task.
## Next steps
- Learn how to [Use Agent Skills](../cli/skills.md).
- See the [Creating Agent Skills](../cli/creating-skills.md) guide.
================================================
FILE: docs/tools/ask-user.md
================================================
# Ask User Tool
The `ask_user` tool lets Gemini CLI ask you one or more questions to gather
preferences, clarify requirements, or make decisions. It supports multiple
question types including multiple-choice, free-form text, and Yes/No
confirmation.
## `ask_user` (Ask User)
- **Tool name:** `ask_user`
- **Display name:** Ask User
- **File:** `ask-user.ts`
- **Parameters:**
- `questions` (array of objects, required): A list of 1 to 4 questions to ask.
Each question object has the following properties:
- `question` (string, required): The complete question text.
- `header` (string, required): A short label (max 16 chars) displayed as a
chip/tag (e.g., "Auth", "Database").
- `type` (string, optional): The type of question. Defaults to `'choice'`.
- `'choice'`: Multiple-choice with options (supports multi-select).
- `'text'`: Free-form text input.
- `'yesno'`: Yes/No confirmation.
- `options` (array of objects, optional): Required for `'choice'` type. 2-4
selectable options.
- `label` (string, required): Display text (1-5 words).
- `description` (string, required): Brief explanation.
- `multiSelect` (boolean, optional): For `'choice'` type, allows selecting
multiple options. Automatically adds an "All the above" option if there
are multiple standard options.
- `placeholder` (string, optional): Hint text for input fields.
- **Behavior:**
- Presents an interactive dialog to the user with the specified questions.
- Pauses execution until the user provides answers or dismisses the dialog.
- Returns the user's answers to the model.
- **Output (`llmContent`):** A JSON string containing the user's answers,
indexed by question position (e.g.,
`{"answers":{"0": "Option A", "1": "Some text"}}`).
- **Confirmation:** Yes. The tool inherently involves user interaction.
## Usage Examples
### Multiple Choice Question
```json
{
"questions": [
{
"header": "Database",
"question": "Which database would you like to use?",
"type": "choice",
"options": [
{
"label": "PostgreSQL",
"description": "Powerful, open source object-relational database system."
},
{
"label": "SQLite",
"description": "C-library that implements a SQL database engine."
}
]
}
]
}
```
### Text Input Question
```json
{
"questions": [
{
"header": "Project Name",
"question": "What is the name of your new project?",
"type": "text",
"placeholder": "e.g., my-awesome-app"
}
]
}
```
### Yes/No Question
```json
{
"questions": [
{
"header": "Deploy",
"question": "Do you want to deploy the application now?",
"type": "yesno"
}
]
}
```
================================================
FILE: docs/tools/file-system.md
================================================
# File system tools reference
The Gemini CLI core provides a suite of tools for interacting with the local
file system. These tools allow the model to explore and modify your codebase.
## Technical reference
All file system tools operate within a `rootDirectory` (the current working
directory or workspace root) for security.
### `list_directory` (ReadFolder)
Lists the names of files and subdirectories directly within a specified path.
- **Tool name:** `list_directory`
- **Arguments:**
- `dir_path` (string, required): Absolute or relative path to the directory.
- `ignore` (array, optional): Glob patterns to exclude.
- `file_filtering_options` (object, optional): Configuration for `.gitignore`
and `.geminiignore` compliance.
### `read_file` (ReadFile)
Reads and returns the content of a specific file. Supports text, images, audio,
and PDF.
- **Tool name:** `read_file`
- **Arguments:**
- `file_path` (string, required): Path to the file.
- `offset` (number, optional): Start line for text files (0-based).
- `limit` (number, optional): Maximum lines to read.
### `write_file` (WriteFile)
Writes content to a specified file, overwriting it if it exists or creating it
if not.
- **Tool name:** `write_file`
- **Arguments:**
- `file_path` (string, required): Path to the file.
- `content` (string, required): Data to write.
- **Confirmation:** Requires manual user approval.
### `glob` (FindFiles)
Finds files matching specific glob patterns across the workspace.
- **Tool name:** `glob`
- **Display name:** FindFiles
- **File:** `glob.ts`
- **Parameters:**
- `pattern` (string, required): The glob pattern to match against (e.g.,
`"*.py"`, `"src/**/*.js"`).
- `path` (string, optional): The absolute path to the directory to search
within. If omitted, searches the tool's root directory.
- `case_sensitive` (boolean, optional): Whether the search should be
case-sensitive. Defaults to `false`.
- `respect_git_ignore` (boolean, optional): Whether to respect .gitignore
patterns when finding files. Defaults to `true`.
- **Behavior:**
- Searches for files matching the glob pattern within the specified directory.
- Returns a list of absolute paths, sorted with the most recently modified
files first.
- Ignores common nuisance directories like `node_modules` and `.git` by
default.
- **Output (`llmContent`):** A message like:
`Found 5 file(s) matching "*.ts" within src, sorted by modification time (newest first):\nsrc/file1.ts\nsrc/subdir/file2.ts...`
- **Confirmation:** No.
### `grep_search` (SearchText)
`grep_search` searches for a regular expression pattern within the content of
files in a specified directory. Can filter files by a glob pattern. Returns the
lines containing matches, along with their file paths and line numbers.
- **Tool name:** `grep_search`
- **Display name:** SearchText
- **File:** `grep.ts`
- **Parameters:**
- `pattern` (string, required): The regular expression (regex) to search for
(e.g., `"function\s+myFunction"`).
- `path` (string, optional): The absolute path to the directory to search
within. Defaults to the current working directory.
- `include` (string, optional): A glob pattern to filter which files are
searched (e.g., `"*.js"`, `"src/**/*.{ts,tsx}"`). If omitted, searches most
files (respecting common ignores).
- **Behavior:**
- Uses `git grep` if available in a Git repository for speed; otherwise, falls
back to system `grep` or a JavaScript-based search.
- Returns a list of matching lines, each prefixed with its file path (relative
to the search directory) and line number.
- **Output (`llmContent`):** A formatted string of matches, e.g.:
```
Found 3 matches for pattern "myFunction" in path "." (filter: "*.ts"):
---
File: src/utils.ts
L15: export function myFunction() {
L22: myFunction.call();
---
File: src/index.ts
L5: import { myFunction } from './utils';
---
```
- **Confirmation:** No.
### `replace` (Edit)
`replace` replaces text within a file. By default, the tool expects to find and
replace exactly ONE occurrence of `old_string`. If you want to replace multiple
occurrences of the exact same string, set `allow_multiple` to `true`. This tool
is designed for precise, targeted changes and requires significant context
around the `old_string` to ensure it modifies the correct location.
- **Tool name:** `replace`
- **Arguments:**
- `file_path` (string, required): Path to the file.
- `instruction` (string, required): Semantic description of the change.
- `old_string` (string, required): Exact literal text to find.
- `new_string` (string, required): Exact literal text to replace with.
- `allow_multiple` (boolean, optional): If `true`, replaces all occurrences.
If `false` (default), only succeeds if exactly one occurrence is found.
- **Confirmation:** Requires manual user approval.
## Next steps
- Follow the [File management tutorial](../cli/tutorials/file-management.md) for
practical examples.
- Learn about [Trusted folders](../cli/trusted-folders.md) to manage access
permissions.
================================================
FILE: docs/tools/internal-docs.md
================================================
# Internal documentation tool (`get_internal_docs`)
The `get_internal_docs` tool lets Gemini CLI access its own technical
documentation to provide more accurate answers about its capabilities and usage.
## Description
This tool is used when Gemini CLI needs to verify specific details about Gemini
CLI's internal features, built-in commands, or configuration options. It
provides direct access to the Markdown files in the `docs/` directory.
### Arguments
`get_internal_docs` takes one optional argument:
- `path` (string, optional): The relative path to a specific documentation file
(for example, `reference/commands.md`). If omitted, the tool returns a list of
all available documentation paths.
## Usage
The `get_internal_docs` tool is used exclusively by Gemini CLI. You cannot
invoke this tool manually.
When Gemini CLI uses this tool, it retrieves the content of the requested
documentation file and processes it to answer your question. This ensures that
the information provided by the AI is grounded in the latest project
documentation.
## Behavior
Gemini CLI uses this tool to ensure technical accuracy:
- **Capability discovery:** If Gemini CLI is unsure how a feature works, it can
lookup the corresponding documentation.
- **Reference lookup:** Gemini CLI can verify slash command sub-commands or
specific setting names.
- **Self-correction:** Gemini CLI can use the documentation to correct its
understanding of Gemini CLI's system logic.
## Next steps
- Explore the [Command reference](../reference/commands.md) for a detailed guide
to slash commands.
- See the [Configuration guide](../reference/configuration.md) for settings
reference.
================================================
FILE: docs/tools/mcp-server.md
================================================
# MCP servers with the Gemini CLI
This document provides a guide to configuring and using Model Context Protocol
(MCP) servers with the Gemini CLI.
## What is an MCP server?
An MCP server is an application that exposes tools and resources to the Gemini
CLI through the Model Context Protocol, allowing it to interact with external
systems and data sources. MCP servers act as a bridge between the Gemini model
and your local environment or other services like APIs.
An MCP server enables the Gemini CLI to:
- **Discover tools:** List available tools, their descriptions, and parameters
through standardized schema definitions.
- **Execute tools:** Call specific tools with defined arguments and receive
structured responses.
- **Access resources:** Read data from specific resources that the server
exposes (files, API payloads, reports, etc.).
With an MCP server, you can extend the Gemini CLI's capabilities to perform
actions beyond its built-in features, such as interacting with databases, APIs,
custom scripts, or specialized workflows.
## Core integration architecture
The Gemini CLI integrates with MCP servers through a sophisticated discovery and
execution system built into the core package (`packages/core/src/tools/`):
### Discovery Layer (`mcp-client.ts`)
The discovery process is orchestrated by `discoverMcpTools()`, which:
1. **Iterates through configured servers** from your `settings.json`
`mcpServers` configuration
2. **Establishes connections** using appropriate transport mechanisms (Stdio,
SSE, or Streamable HTTP)
3. **Fetches tool definitions** from each server using the MCP protocol
4. **Sanitizes and validates** tool schemas for compatibility with the Gemini
API
5. **Registers tools** in the global tool registry with conflict resolution
6. **Fetches and registers resources** if the server exposes any
### Execution layer (`mcp-tool.ts`)
Each discovered MCP tool is wrapped in a `DiscoveredMCPTool` instance that:
- **Handles confirmation logic** based on server trust settings and user
preferences
- **Manages tool execution** by calling the MCP server with proper parameters
- **Processes responses** for both the LLM context and user display
- **Maintains connection state** and handles timeouts
### Transport mechanisms
The Gemini CLI supports three MCP transport types:
- **Stdio Transport:** Spawns a subprocess and communicates via stdin/stdout
- **SSE Transport:** Connects to Server-Sent Events endpoints
- **Streamable HTTP Transport:** Uses HTTP streaming for communication
## Working with MCP resources
Some MCP servers expose contextual “resources” in addition to the tools and
prompts. Gemini CLI discovers these automatically and gives you the possibility
to reference them in the chat.
### Discovery and listing
- When discovery runs, the CLI fetches each server’s `resources/list` results.
- The `/mcp` command displays a Resources section alongside Tools and Prompts
for every connected server.
This returns a concise, plain-text list of URIs plus metadata.
### Referencing resources in a conversation
You can use the same `@` syntax already known for referencing local files:
```
@server://resource/path
```
Resource URIs appear in the completion menu together with filesystem paths. When
you submit the message, the CLI calls `resources/read` and injects the content
in the conversation.
## How to set up your MCP server
The Gemini CLI uses the `mcpServers` configuration in your `settings.json` file
to locate and connect to MCP servers. This configuration supports multiple
servers with different transport mechanisms.
### Configure the MCP server in settings.json
You can configure MCP servers in your `settings.json` file in two main ways:
through the top-level `mcpServers` object for specific server definitions, and
through the `mcp` object for global settings that control server discovery and
execution.
#### Global MCP settings (`mcp`)
The `mcp` object in your `settings.json` lets you define global rules for all
MCP servers.
- **`mcp.serverCommand`** (string): A global command to start an MCP server.
- **`mcp.allowed`** (array of strings): A list of MCP server names to allow. If
this is set, only servers from this list (matching the keys in the
`mcpServers` object) will be connected to.
- **`mcp.excluded`** (array of strings): A list of MCP server names to exclude.
Servers in this list will not be connected to.
**Example:**
```json
{
"mcp": {
"allowed": ["my-trusted-server"],
"excluded": ["experimental-server"]
}
}
```
#### Server-specific configuration (`mcpServers`)
The `mcpServers` object is where you define each individual MCP server you want
the CLI to connect to.
### Configuration structure
Add an `mcpServers` object to your `settings.json` file:
```json
{ ...file contains other config objects
"mcpServers": {
"serverName": {
"command": "path/to/server",
"args": ["--arg1", "value1"],
"env": {
"API_KEY": "$MY_API_TOKEN"
},
"cwd": "./server-directory",
"timeout": 30000,
"trust": false
}
}
}
```
### Configuration properties
Each server configuration supports the following properties:
#### Required (one of the following)
- **`command`** (string): Path to the executable for Stdio transport
- **`url`** (string): SSE endpoint URL (e.g., `"http://localhost:8080/sse"`)
- **`httpUrl`** (string): HTTP streaming endpoint URL
#### Optional
- **`args`** (string[]): Command-line arguments for Stdio transport
- **`headers`** (object): Custom HTTP headers when using `url` or `httpUrl`
- **`env`** (object): Environment variables for the server process. Values can
reference environment variables using `$VAR_NAME` or `${VAR_NAME}` syntax (all
platforms), or `%VAR_NAME%` (Windows only).
- **`cwd`** (string): Working directory for Stdio transport
- **`timeout`** (number): Request timeout in milliseconds (default: 600,000ms =
10 minutes)
- **`trust`** (boolean): When `true`, bypasses all tool call confirmations for
this server (default: `false`)
- **`includeTools`** (string[]): List of tool names to include from this MCP
server. When specified, only the tools listed here will be available from this
server (allowlist behavior). If not specified, all tools from the server are
enabled by default.
- **`excludeTools`** (string[]): List of tool names to exclude from this MCP
server. Tools listed here will not be available to the model, even if they are
exposed by the server. `excludeTools` takes precedence over `includeTools`. If
a tool is in both lists, it will be excluded.
- **`targetAudience`** (string): The OAuth Client ID allowlisted on the
IAP-protected application you are trying to access. Used with
`authProviderType: 'service_account_impersonation'`.
- **`targetServiceAccount`** (string): The email address of the Google Cloud
Service Account to impersonate. Used with
`authProviderType: 'service_account_impersonation'`.
### Environment variable expansion
Gemini CLI automatically expands environment variables in the `env` block of
your MCP server configuration. This allows you to securely reference variables
defined in your shell or environment without hardcoding sensitive information
directly in your `settings.json` file.
The expansion utility supports:
- **POSIX/Bash syntax:** `$VARIABLE_NAME` or `${VARIABLE_NAME}` (supported on
all platforms)
- **Windows syntax:** `%VARIABLE_NAME%` (supported only when running on Windows)
If a variable is not defined in the current environment, it resolves to an empty
string.
**Example:**
```json
"env": {
"API_KEY": "$MY_EXTERNAL_TOKEN",
"LOG_LEVEL": "$LOG_LEVEL",
"TEMP_DIR": "%TEMP%"
}
```
### Security and environment sanitization
To protect your credentials, Gemini CLI performs environment sanitization when
spawning MCP server processes.
#### Automatic redaction
By default, the CLI redacts sensitive environment variables from the base
environment (inherited from the host process) to prevent unintended exposure to
third-party MCP servers. This includes:
- Core project keys: `GEMINI_API_KEY`, `GOOGLE_API_KEY`, etc.
- Variables matching sensitive patterns: `*TOKEN*`, `*SECRET*`, `*PASSWORD*`,
`*KEY*`, `*AUTH*`, `*CREDENTIAL*`.
- Certificates and private key patterns.
#### Explicit overrides
If an environment variable must be passed to an MCP server, you must explicitly
state it in the `env` property of the server configuration in `settings.json`.
Explicitly defined variables (including those from extensions) are trusted and
are **not** subjected to the automatic redaction process.
This follows the security principle that if a variable is explicitly configured
by the user for a specific server, it constitutes informed consent to share that
specific data with that server.
> [!NOTE]
> Even when explicitly defined, you should avoid hardcoding secrets.
> Instead, use environment variable expansion (e.g., `"MY_KEY": "$MY_KEY"`) to
> securely pull the value from your host environment at runtime.
### OAuth support for remote MCP servers
The Gemini CLI supports OAuth 2.0 authentication for remote MCP servers using
SSE or HTTP transports. This enables secure access to MCP servers that require
authentication.
#### Automatic OAuth discovery
For servers that support OAuth discovery, you can omit the OAuth configuration
and let the CLI discover it automatically:
```json
{
"mcpServers": {
"discoveredServer": {
"url": "https://api.example.com/sse"
}
}
}
```
The CLI will automatically:
- Detect when a server requires OAuth authentication (401 responses)
- Discover OAuth endpoints from server metadata
- Perform dynamic client registration if supported
- Handle the OAuth flow and token management
#### Authentication flow
When connecting to an OAuth-enabled server:
1. **Initial connection attempt** fails with 401 Unauthorized
2. **OAuth discovery** finds authorization and token endpoints
3. **Browser opens** for user authentication (requires local browser access)
4. **Authorization code** is exchanged for access tokens
5. **Tokens are stored** securely for future use
6. **Connection retry** succeeds with valid tokens
#### Browser redirect requirements
> [!IMPORTANT]
> OAuth authentication requires that your local machine can:
>
> - Open a web browser for authentication
> - Receive redirects on `http://localhost:7777/oauth/callback`
This feature will not work in:
- Headless environments without browser access
- Remote SSH sessions without X11 forwarding
- Containerized environments without browser support
#### Managing OAuth authentication
Use the `/mcp auth` command to manage OAuth authentication:
```bash
# List servers requiring authentication
/mcp auth
# Authenticate with a specific server
/mcp auth serverName
# Re-authenticate if tokens expire
/mcp auth serverName
```
#### OAuth configuration properties
- **`enabled`** (boolean): Enable OAuth for this server
- **`clientId`** (string): OAuth client identifier (optional with dynamic
registration)
- **`clientSecret`** (string): OAuth client secret (optional for public clients)
- **`authorizationUrl`** (string): OAuth authorization endpoint (auto-discovered
if omitted)
- **`tokenUrl`** (string): OAuth token endpoint (auto-discovered if omitted)
- **`scopes`** (string[]): Required OAuth scopes
- **`redirectUri`** (string): Custom redirect URI (defaults to
`http://localhost:7777/oauth/callback`)
- **`tokenParamName`** (string): Query parameter name for tokens in SSE URLs
- **`audiences`** (string[]): Audiences the token is valid for
#### Token management
OAuth tokens are automatically:
- **Stored securely** in `~/.gemini/mcp-oauth-tokens.json`
- **Refreshed** when expired (if refresh tokens are available)
- **Validated** before each connection attempt
- **Cleaned up** when invalid or expired
#### Authentication provider type
You can specify the authentication provider type using the `authProviderType`
property:
- **`authProviderType`** (string): Specifies the authentication provider. Can be
one of the following:
- **`dynamic_discovery`** (default): The CLI will automatically discover the
OAuth configuration from the server.
- **`google_credentials`**: The CLI will use the Google Application Default
Credentials (ADC) to authenticate with the server. When using this provider,
you must specify the required scopes.
- **`service_account_impersonation`**: The CLI will impersonate a Google Cloud
Service Account to authenticate with the server. This is useful for
accessing IAP-protected services (this was specifically designed for Cloud
Run services).
#### Google credentials
```json
{
"mcpServers": {
"googleCloudServer": {
"httpUrl": "https://my-gcp-service.run.app/mcp",
"authProviderType": "google_credentials",
"oauth": {
"scopes": ["https://www.googleapis.com/auth/userinfo.email"]
}
}
}
}
```
#### Service account impersonation
To authenticate with a server using Service Account Impersonation, you must set
the `authProviderType` to `service_account_impersonation` and provide the
following properties:
- **`targetAudience`** (string): The OAuth Client ID allowlisted on the
IAP-protected application you are trying to access.
- **`targetServiceAccount`** (string): The email address of the Google Cloud
Service Account to impersonate.
The CLI will use your local Application Default Credentials (ADC) to generate an
OIDC ID token for the specified service account and audience. This token will
then be used to authenticate with the MCP server.
#### Setup instructions
1. **[Create](https://cloud.google.com/iap/docs/oauth-client-creation) or use an
existing OAuth 2.0 client ID.** To use an existing OAuth 2.0 client ID,
follow the steps in
[How to share OAuth Clients](https://cloud.google.com/iap/docs/sharing-oauth-clients).
2. **Add the OAuth ID to the allowlist for
[programmatic access](https://cloud.google.com/iap/docs/sharing-oauth-clients#programmatic_access)
for the application.** Since Cloud Run is not yet a supported resource type
in gcloud iap, you must allowlist the Client ID on the project.
3. **Create a service account.**
[Documentation](https://cloud.google.com/iam/docs/service-accounts-create#creating),
[Cloud Console Link](https://console.cloud.google.com/iam-admin/serviceaccounts)
4. **Add both the service account and users to the IAP Policy** in the
"Security" tab of the Cloud Run service itself or via gcloud.
5. **Grant all users and groups** who will access the MCP Server the necessary
permissions to
[impersonate the service account](https://cloud.google.com/docs/authentication/use-service-account-impersonation)
(i.e., `roles/iam.serviceAccountTokenCreator`).
6. **[Enable](https://console.cloud.google.com/apis/library/iamcredentials.googleapis.com)
the IAM Credentials API** for your project.
### Example configurations
#### Python MCP server (stdio)
```json
{
"mcpServers": {
"pythonTools": {
"command": "python",
"args": ["-m", "my_mcp_server", "--port", "8080"],
"cwd": "./mcp-servers/python",
"env": {
"DATABASE_URL": "$DB_CONNECTION_STRING",
"API_KEY": "${EXTERNAL_API_KEY}"
},
"timeout": 15000
}
}
}
```
#### Node.js MCP server (stdio)
```json
{
"mcpServers": {
"nodeServer": {
"command": "node",
"args": ["dist/server.js", "--verbose"],
"cwd": "./mcp-servers/node",
"trust": true
}
}
}
```
#### Docker-based MCP server
```json
{
"mcpServers": {
"dockerizedServer": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"-e",
"API_KEY",
"-v",
"${PWD}:/workspace",
"my-mcp-server:latest"
],
"env": {
"API_KEY": "$EXTERNAL_SERVICE_TOKEN"
}
}
}
}
```
#### HTTP-based MCP server
```json
{
"mcpServers": {
"httpServer": {
"httpUrl": "http://localhost:3000/mcp",
"timeout": 5000
}
}
}
```
#### HTTP-based MCP Server with custom headers
```json
{
"mcpServers": {
"httpServerWithAuth": {
"httpUrl": "http://localhost:3000/mcp",
"headers": {
"Authorization": "Bearer your-api-token",
"X-Custom-Header": "custom-value",
"Content-Type": "application/json"
},
"timeout": 5000
}
}
}
```
#### MCP server with tool filtering
```json
{
"mcpServers": {
"filteredServer": {
"command": "python",
"args": ["-m", "my_mcp_server"],
"includeTools": ["safe_tool", "file_reader", "data_processor"],
// "excludeTools": ["dangerous_tool", "file_deleter"],
"timeout": 30000
}
}
}
```
### SSE MCP server with SA impersonation
```json
{
"mcpServers": {
"myIapProtectedServer": {
"url": "https://my-iap-service.run.app/sse",
"authProviderType": "service_account_impersonation",
"targetAudience": "YOUR_IAP_CLIENT_ID.apps.googleusercontent.com",
"targetServiceAccount": "your-sa@your-project.iam.gserviceaccount.com"
}
}
}
```
## Discovery process deep dive
When the Gemini CLI starts, it performs MCP server discovery through the
following detailed process:
### 1. Server iteration and connection
For each configured server in `mcpServers`:
1. **Status tracking begins:** Server status is set to `CONNECTING`
2. **Transport selection:** Based on configuration properties:
- `httpUrl` → `StreamableHTTPClientTransport`
- `url` → `SSEClientTransport`
- `command` → `StdioClientTransport`
3. **Connection establishment:** The MCP client attempts to connect with the
configured timeout
4. **Error handling:** Connection failures are logged and the server status is
set to `DISCONNECTED`
### 2. Tool discovery
Upon successful connection:
1. **Tool listing:** The client calls the MCP server's tool listing endpoint
2. **Schema validation:** Each tool's function declaration is validated
3. **Tool filtering:** Tools are filtered based on `includeTools` and
`excludeTools` configuration
4. **Name sanitization:** Tool names are cleaned to meet Gemini API
requirements:
- Characters other than letters, numbers, underscore (`_`), hyphen (`-`), dot
(`.`), and colon (`:`) are replaced with underscores
- Names longer than 63 characters are truncated with middle replacement
(`...`)
### 3. Tool naming and namespaces
To prevent collisions across multiple servers or conflicting built-in tools,
every discovered MCP tool is assigned a strict namespace.
1. **Automatic FQN:** All MCP tools are unconditionally assigned a fully
qualified name (FQN) using the format `mcp_{serverName}_{toolName}`.
2. **Registry tracking:** The tool registry maintains metadata mappings between
these FQNs and their original server identities.
3. **Overwrites:** If two servers share the exact same alias in your
configuration and provide tools with the exact same name, the last registered
tool overwrites the previous one.
4. **Policies:** To configure permissions (like auto-approval or denial) for MCP
tools, see
[Special syntax for MCP tools](../reference/policy-engine.md#special-syntax-for-mcp-tools)
in the Policy Engine documentation.
> [!WARNING]
> Do not use underscores (`_`) in your MCP server names (e.g., use
> `my-server` rather than `my_server`). The policy parser splits Fully Qualified
> Names (`mcp_server_tool`) on the _first_ underscore following the `mcp_`
> prefix. If your server name contains an underscore, the parser will
> misinterpret the server identity, which can cause wildcard rules and security
> policies to fail silently.
### 4. Schema processing
Tool parameter schemas undergo sanitization for Gemini API compatibility:
- **`$schema` properties** are removed
- **`additionalProperties`** are stripped
- **`anyOf` with `default`** have their default values removed (Vertex AI
compatibility)
- **Recursive processing** applies to nested schemas
### 5. Connection management
After discovery:
- **Persistent connections:** Servers that successfully register tools maintain
their connections
- **Cleanup:** Servers that provide no usable tools have their connections
closed
- **Status updates:** Final server statuses are set to `CONNECTED` or
`DISCONNECTED`
## Tool execution flow
When the Gemini model decides to use an MCP tool, the following execution flow
occurs:
### 1. Tool invocation
The model generates a `FunctionCall` with:
- **Tool name:** The registered name (potentially prefixed)
- **Arguments:** JSON object matching the tool's parameter schema
### 2. Confirmation process
Each `DiscoveredMCPTool` implements sophisticated confirmation logic:
#### Trust-based bypass
```typescript
if (this.trust) {
return false; // No confirmation needed
}
```
#### Dynamic allow-listing
The system maintains internal allow-lists for:
- **Server-level:** `serverName` → All tools from this server are trusted
- **Tool-level:** `serverName.toolName` → This specific tool is trusted
#### User choice handling
When confirmation is required, users can choose:
- **Proceed once:** Execute this time only
- **Always allow this tool:** Add to tool-level allow-list
- **Always allow this server:** Add to server-level allow-list
- **Cancel:** Abort execution
### 3. Execution
Upon confirmation (or trust bypass):
1. **Parameter preparation:** Arguments are validated against the tool's schema
2. **MCP call:** The underlying `CallableTool` invokes the server with:
```typescript
const functionCalls = [
{
name: this.serverToolName, // Original server tool name
args: params,
},
];
```
3. **Response processing:** Results are formatted for both LLM context and user
display
### 4. Response handling
The execution result contains:
- **`llmContent`:** Raw response parts for the language model's context
- **`returnDisplay`:** Formatted output for user display (often JSON in markdown
code blocks)
## How to interact with your MCP server
### Using the `/mcp` command
The `/mcp` command provides comprehensive information about your MCP server
setup:
```bash
/mcp
```
This displays:
- **Server list:** All configured MCP servers
- **Connection status:** `CONNECTED`, `CONNECTING`, or `DISCONNECTED`
- **Server details:** Configuration summary (excluding sensitive data)
- **Available tools:** List of tools from each server with descriptions
- **Discovery state:** Overall discovery process status
### Example `/mcp` output
```
MCP Servers Status:
📡 pythonTools (CONNECTED)
Command: python -m my_mcp_server --port 8080
Working Directory: ./mcp-servers/python
Timeout: 15000ms
Tools: calculate_sum, file_analyzer, data_processor
🔌 nodeServer (DISCONNECTED)
Command: node dist/server.js --verbose
Error: Connection refused
🐳 dockerizedServer (CONNECTED)
Command: docker run -i --rm -e API_KEY my-mcp-server:latest
Tools: mcp_dockerizedServer_docker_deploy, mcp_dockerizedServer_docker_status
Discovery State: COMPLETED
```
### Tool usage
Once discovered, MCP tools are available to the Gemini model like built-in
tools. The model will automatically:
1. **Select appropriate tools** based on your requests
2. **Present confirmation dialogs** (unless the server is trusted)
3. **Execute tools** with proper parameters
4. **Display results** in a user-friendly format
## Status monitoring and troubleshooting
### Connection states
The MCP integration tracks several states:
#### Overriding extension configurations
If an MCP server is provided by an extension (for example, the
`google-workspace` extension), you can still override its settings in your local
`settings.json`. Gemini CLI merges your local configuration with the extension's
defaults:
- **Tool lists:** Tool lists are merged securely to ensure the most restrictive
policy wins:
- **Exclusions (`excludeTools`):** Arrays are combined (unioned). If either
source blocks a tool, it remains disabled.
- **Inclusions (`includeTools`):** Arrays are intersected. If both sources
provide an allowlist, only tools present in **both** lists are enabled. If
only one source provides an allowlist, that list is respected.
- **Precedence:** `excludeTools` always takes precedence over `includeTools`.
This ensures you always have veto power over tools provided by an extension
and that an extension cannot re-enable tools you have omitted from your
personal allowlist.
- **Environment variables:** The `env` objects are merged. If the same variable
is defined in both places, your local value takes precedence.
- **Scalar properties:** Properties like `command`, `url`, and `timeout` are
replaced by your local values if provided.
**Example override:**
```json
{
"mcpServers": {
"google-workspace": {
"excludeTools": ["gmail.send"]
}
}
}
```
#### Server status (`MCPServerStatus`)
- **`DISCONNECTED`:** Server is not connected or has errors
- **`CONNECTING`:** Connection attempt in progress
- **`CONNECTED`:** Server is connected and ready
#### Discovery state (`MCPDiscoveryState`)
- **`NOT_STARTED`:** Discovery hasn't begun
- **`IN_PROGRESS`:** Currently discovering servers
- **`COMPLETED`:** Discovery finished (with or without errors)
### Common issues and solutions
#### Server won't connect
**Symptoms:** Server shows `DISCONNECTED` status
**Troubleshooting:**
1. **Check configuration:** Verify `command`, `args`, and `cwd` are correct
2. **Test manually:** Run the server command directly to ensure it works
3. **Check dependencies:** Ensure all required packages are installed
4. **Review logs:** Look for error messages in the CLI output
5. **Verify permissions:** Ensure the CLI can execute the server command
#### No tools discovered
**Symptoms:** Server connects but no tools are available
**Troubleshooting:**
1. **Verify tool registration:** Ensure your server actually registers tools
2. **Check MCP protocol:** Confirm your server implements the MCP tool listing
correctly
3. **Review server logs:** Check stderr output for server-side errors
4. **Test tool listing:** Manually test your server's tool discovery endpoint
#### Tools not executing
**Symptoms:** Tools are discovered but fail during execution
**Troubleshooting:**
1. **Parameter validation:** Ensure your tool accepts the expected parameters
2. **Schema compatibility:** Verify your input schemas are valid JSON Schema
3. **Error handling:** Check if your tool is throwing unhandled exceptions
4. **Timeout issues:** Consider increasing the `timeout` setting
#### Sandbox compatibility
**Symptoms:** MCP servers fail when sandboxing is enabled
**Solutions:**
1. **Docker-based servers:** Use Docker containers that include all dependencies
2. **Path accessibility:** Ensure server executables are available in the
sandbox
3. **Network access:** Configure sandbox to allow necessary network connections
4. **Environment variables:** Verify required environment variables are passed
through
### Debugging tips
1. **Enable debug mode:** Run the CLI with `--debug` for verbose output (use F12
to open debug console in interactive mode)
2. **Check stderr:** MCP server stderr is captured and logged (INFO messages
filtered)
3. **Test isolation:** Test your MCP server independently before integrating
4. **Incremental setup:** Start with simple tools before adding complex
functionality
5. **Use `/mcp` frequently:** Monitor server status during development
## Important notes
### Security considerations
- **Trust settings:** The `trust` option bypasses all confirmation dialogs. Use
cautiously and only for servers you completely control
- **Access tokens:** Be security-aware when configuring environment variables
containing API keys or tokens. See
[Security and environment sanitization](#security-and-environment-sanitization)
for details on how Gemini CLI protects your credentials.
- **Sandbox compatibility:** When using sandboxing, ensure MCP servers are
available within the sandbox environment
- **Private data:** Using broadly scoped personal access tokens can lead to
information leakage between repositories.
### Performance and resource management
- **Connection persistence:** The CLI maintains persistent connections to
servers that successfully register tools
- **Automatic cleanup:** Connections to servers providing no tools are
automatically closed
- **Timeout management:** Configure appropriate timeouts based on your server's
response characteristics
- **Resource monitoring:** MCP servers run as separate processes and consume
system resources
### Schema compatibility
- **Property stripping:** The system automatically removes certain schema
properties (`$schema`, `additionalProperties`) for Gemini API compatibility
- **Name sanitization:** Tool names are automatically sanitized to meet API
requirements
- **Conflict resolution:** Tool name conflicts between servers are resolved
through automatic prefixing
This comprehensive integration makes MCP servers a powerful way to extend the
Gemini CLI's capabilities while maintaining security, reliability, and ease of
use.
## Returning rich content from tools
MCP tools are not limited to returning simple text. You can return rich,
multi-part content, including text, images, audio, and other binary data in a
single tool response. This allows you to build powerful tools that can provide
diverse information to the model in a single turn.
All data returned from the tool is processed and sent to the model as context
for its next generation, enabling it to reason about or summarize the provided
information.
### How it works
To return rich content, your tool's response must adhere to the MCP
specification for a
[`CallToolResult`](https://modelcontextprotocol.io/specification/2025-06-18/server/tools#tool-result).
The `content` field of the result should be an array of `ContentBlock` objects.
The Gemini CLI will correctly process this array, separating text from binary
data and packaging it for the model.
You can mix and match different content block types in the `content` array. The
supported block types include:
- `text`
- `image`
- `audio`
- `resource` (embedded content)
- `resource_link`
### Example: Returning text and an image
Here is an example of a valid JSON response from an MCP tool that returns both a
text description and an image:
```json
{
"content": [
{
"type": "text",
"text": "Here is the logo you requested."
},
{
"type": "image",
"data": "BASE64_ENCODED_IMAGE_DATA_HERE",
"mimeType": "image/png"
},
{
"type": "text",
"text": "The logo was created in 2025."
}
]
}
```
When the Gemini CLI receives this response, it will:
1. Extract all the text and combine it into a single `functionResponse` part
for the model.
2. Present the image data as a separate `inlineData` part.
3. Provide a clean, user-friendly summary in the CLI, indicating that both text
and an image were received.
This enables you to build sophisticated tools that can provide rich, multi-modal
context to the Gemini model.
## MCP prompts as slash commands
In addition to tools, MCP servers can expose predefined prompts that can be
executed as slash commands within the Gemini CLI. This allows you to create
shortcuts for common or complex queries that can be easily invoked by name.
### Defining prompts on the server
Here's a small example of a stdio MCP server that defines prompts:
```ts
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { z } from 'zod';
const server = new McpServer({
name: 'prompt-server',
version: '1.0.0',
});
server.registerPrompt(
'poem-writer',
{
title: 'Poem Writer',
description: 'Write a nice haiku',
argsSchema: { title: z.string(), mood: z.string().optional() },
},
({ title, mood }) => ({
messages: [
{
role: 'user',
content: {
type: 'text',
text: `Write a haiku${mood ? ` with the mood ${mood}` : ''} called ${title}. Note that a haiku is 5 syllables followed by 7 syllables followed by 5 syllables `,
},
},
],
}),
);
const transport = new StdioServerTransport();
await server.connect(transport);
```
This can be included in `settings.json` under `mcpServers` with:
```json
{
"mcpServers": {
"nodeServer": {
"command": "node",
"args": ["filename.ts"]
}
}
}
```
### Invoking prompts
Once a prompt is discovered, you can invoke it using its name as a slash
command. The CLI will automatically handle parsing arguments.
```bash
/poem-writer --title="Gemini CLI" --mood="reverent"
```
or, using positional arguments:
```bash
/poem-writer "Gemini CLI" reverent
```
When you run this command, the Gemini CLI executes the `prompts/get` method on
the MCP server with the provided arguments. The server is responsible for
substituting the arguments into the prompt template and returning the final
prompt text. The CLI then sends this prompt to the model for execution. This
provides a convenient way to automate and share common workflows.
## Managing MCP servers with `gemini mcp`
While you can always configure MCP servers by manually editing your
`settings.json` file, the Gemini CLI provides a convenient set of commands to
manage your server configurations programmatically. These commands streamline
the process of adding, listing, and removing MCP servers without needing to
directly edit JSON files.
### Adding a server (`gemini mcp add`)
The `add` command configures a new MCP server in your `settings.json`. Based on
the scope (`-s, --scope`), it will be added to either the user config
`~/.gemini/settings.json` or the project config `.gemini/settings.json` file.
**Command:**
```bash
gemini mcp add [options] [args...]
```
- ``: A unique name for the server.
- ``: The command to execute (for `stdio`) or the URL (for
`http`/`sse`).
- `[args...]`: Optional arguments for a `stdio` command.
**Options (flags):**
- `-s, --scope`: Configuration scope (user or project). [default: "project"]
- `-t, --transport`: Transport type (stdio, sse, http). [default: "stdio"]
- `-e, --env`: Set environment variables (e.g. -e KEY=value).
- `-H, --header`: Set HTTP headers for SSE and HTTP transports (e.g. -H
"X-Api-Key: abc123" -H "Authorization: Bearer abc123").
- `--timeout`: Set connection timeout in milliseconds.
- `--trust`: Trust the server (bypass all tool call confirmation prompts).
- `--description`: Set the description for the server.
- `--include-tools`: A comma-separated list of tools to include.
- `--exclude-tools`: A comma-separated list of tools to exclude.
#### Adding an stdio server
This is the default transport for running local servers.
```bash
# Basic syntax
gemini mcp add [options] [args...]
# Example: Adding a local server
gemini mcp add -e API_KEY=123 -e DEBUG=true my-stdio-server /path/to/server arg1 arg2 arg3
# Example: Adding a local python server
gemini mcp add python-server python server.py -- --server-arg my-value
```
#### Adding an HTTP server
This transport is for servers that use the streamable HTTP transport.
```bash
# Basic syntax
gemini mcp add --transport http
# Example: Adding an HTTP server
gemini mcp add --transport http http-server https://api.example.com/mcp/
# Example: Adding an HTTP server with an authentication header
gemini mcp add --transport http --header "Authorization: Bearer abc123" secure-http https://api.example.com/mcp/
```
#### Adding an SSE server
This transport is for servers that use Server-Sent Events (SSE).
```bash
# Basic syntax
gemini mcp add --transport sse
# Example: Adding an SSE server
gemini mcp add --transport sse sse-server https://api.example.com/sse/
# Example: Adding an SSE server with an authentication header
gemini mcp add --transport sse --header "Authorization: Bearer abc123" secure-sse https://api.example.com/sse/
```
### Listing servers (`gemini mcp list`)
To view all MCP servers currently configured, use the `list` command. It
displays each server's name, configuration details, and connection status. This
command has no flags.
**Command:**
```bash
gemini mcp list
```
> [!NOTE]
> For security, `stdio` MCP servers (those using the
> `command` property) are only tested and displayed as "Connected" if the
> current folder is trusted. If the folder is untrusted, they will show as
> "Disconnected". Use `gemini trust` to trust the current folder.
**Example output:**
```sh
✓ stdio-server: command: python3 server.py (stdio) - Connected
✓ http-server: https://api.example.com/mcp (http) - Connected
✗ sse-server: https://api.example.com/sse (sse) - Disconnected
```
## Troubleshooting and Diagnostics
To minimize noise during startup, MCP connection errors for background servers
are "silent by default." If issues are detected during startup, a single
informational hint will be shown: _"MCP issues detected. Run /mcp list for
status."_
Detailed, actionable diagnostics for a specific server are automatically
re-enabled when:
1. You run an interactive command like `/mcp list`, `/mcp auth`, etc.
2. The model attempts to execute a tool from that server.
3. You invoke an MCP prompt from that server.
You can also use `gemini mcp list` from your shell to see connection errors for
all configured servers.
### Removing a server (`gemini mcp remove`)
To delete a server from your configuration, use the `remove` command with the
server's name.
**Command:**
```bash
gemini mcp remove
```
**Options (flags):**
- `-s, --scope`: Configuration scope (user or project). [default: "project"]
**Example:**
```bash
gemini mcp remove my-server
```
This will find and delete the "my-server" entry from the `mcpServers` object in
the appropriate `settings.json` file based on the scope (`-s, --scope`).
### Enabling/disabling a server (`gemini mcp enable`, `gemini mcp disable`)
Temporarily disable an MCP server without removing its configuration, or
re-enable a previously disabled server.
**Commands:**
```bash
gemini mcp enable [--session]
gemini mcp disable [--session]
```
**Options (flags):**
- `--session`: Apply change only for this session (not persisted to file).
Disabled servers appear in `/mcp` status as "Disabled" but won't connect or
provide tools. Enablement state is stored in
`~/.gemini/mcp-server-enablement.json`.
The same commands are available as slash commands during an active session:
`/mcp enable ` and `/mcp disable `.
## Instructions
Gemini CLI supports
[MCP server instructions](https://modelcontextprotocol.io/specification/2025-06-18/schema#initializeresult),
which will be appended to the system instructions.
================================================
FILE: docs/tools/memory.md
================================================
# Memory tool (`save_memory`)
The `save_memory` tool allows the Gemini agent to persist specific facts, user
preferences, and project details across sessions.
## Technical reference
This tool appends information to the `## Gemini Added Memories` section of your
global `GEMINI.md` file (typically located at `~/.gemini/GEMINI.md`).
### Arguments
- `fact` (string, required): A clear, self-contained statement in natural
language.
## Technical behavior
- **Storage:** Appends to the global context file in the user's home directory.
- **Loading:** The stored facts are automatically included in the hierarchical
context system for all future sessions.
- **Format:** Saves data as a bulleted list item within a dedicated Markdown
section.
## Use cases
- Persisting user preferences (for example, "I prefer functional programming").
- Saving project-wide architectural decisions.
- Storing frequently used aliases or system configurations.
## Next steps
- Follow the [Memory management guide](../cli/tutorials/memory-management.md)
for practical examples.
- Learn how the [Project context (GEMINI.md)](../cli/gemini-md.md) system loads
this information.
================================================
FILE: docs/tools/planning.md
================================================
# Gemini CLI planning tools
Planning tools let Gemini CLI switch into a safe, read-only "Plan Mode" for
researching and planning complex changes, and to signal the finalization of a
plan to the user.
## 1. `enter_plan_mode` (EnterPlanMode)
`enter_plan_mode` switches the CLI to Plan Mode. This tool is typically called
by the agent when you ask it to "start a plan" using natural language. In this
mode, the agent is restricted to read-only tools to allow for safe exploration
and planning.
> [!NOTE]
> This tool is not available when the CLI is in YOLO mode.
- **Tool name:** `enter_plan_mode`
- **Display name:** Enter Plan Mode
- **File:** `enter-plan-mode.ts`
- **Parameters:**
- `reason` (string, optional): A short reason explaining why the agent is
entering plan mode (for example, "Starting a complex feature
implementation").
- **Behavior:**
- Switches the CLI's approval mode to `PLAN`.
- Notifies the user that the agent has entered Plan Mode.
- **Output (`llmContent`):** A message indicating the switch, for example,
`Switching to Plan mode.`
- **Confirmation:** Yes. The user is prompted to confirm entering Plan Mode.
## 2. `exit_plan_mode` (ExitPlanMode)
`exit_plan_mode` signals that the planning phase is complete. It presents the
finalized plan to the user and requests approval to start the implementation.
- **Tool name:** `exit_plan_mode`
- **Display name:** Exit Plan Mode
- **File:** `exit-plan-mode.ts`
- **Parameters:**
- `plan_path` (string, required): The path to the finalized Markdown plan
file. This file MUST be located within the project's temporary plans
directory (for example, `~/.gemini/tmp//plans/`).
- **Behavior:**
- Validates that the `plan_path` is within the allowed directory and that the
file exists and has content.
- Presents the plan to the user for review.
- If the user approves the plan:
- Switches the CLI's approval mode to the user's chosen approval mode (
`DEFAULT` or `AUTO_EDIT`).
- Marks the plan as approved for implementation.
- If the user rejects the plan:
- Stays in Plan Mode.
- Returns user feedback to the model to refine the plan.
- **Output (`llmContent`):**
- On approval: A message indicating the plan was approved and the new approval
mode.
- On rejection: A message containing the user's feedback.
- **Confirmation:** Yes. Shows the finalized plan and asks for user approval to
proceed with implementation.
================================================
FILE: docs/tools/shell.md
================================================
# Shell tool (`run_shell_command`)
The `run_shell_command` tool allows the Gemini model to execute commands
directly on your system's shell. It is the primary mechanism for the agent to
interact with your environment beyond simple file edits.
## Technical reference
On Windows, commands execute with `powershell.exe -NoProfile -Command`. On other
platforms, they execute with `bash -c`.
### Arguments
- `command` (string, required): The exact shell command to execute.
- `description` (string, optional): A brief description shown to the user for
confirmation.
- `dir_path` (string, optional): The absolute path or relative path from
workspace root where the command runs.
- `is_background` (boolean, optional): Whether to move the process to the
background immediately after starting.
### Return values
The tool returns a JSON object containing:
- `Command`: The executed string.
- `Directory`: The execution path.
- `Stdout` / `Stderr`: The output streams.
- `Exit Code`: The process return code.
- `Background PIDs`: PIDs of any started background processes.
## Configuration
You can configure the behavior of the `run_shell_command` tool by modifying your
`settings.json` file or by using the `/settings` command in the Gemini CLI.
### Enabling interactive commands
To enable interactive commands, you need to set the
`tools.shell.enableInteractiveShell` setting to `true`. This will use `node-pty`
for shell command execution, which allows for interactive sessions. If
`node-pty` is not available, it will fall back to the `child_process`
implementation, which does not support interactive commands.
**Example `settings.json`:**
```json
{
"tools": {
"shell": {
"enableInteractiveShell": true
}
}
}
```
### Showing color in output
To show color in the shell output, you need to set the `tools.shell.showColor`
setting to `true`. This setting only applies when
`tools.shell.enableInteractiveShell` is enabled.
**Example `settings.json`:**
```json
{
"tools": {
"shell": {
"showColor": true
}
}
}
```
### Setting the pager
You can set a custom pager for the shell output by setting the
`tools.shell.pager` setting. The default pager is `cat`. This setting only
applies when `tools.shell.enableInteractiveShell` is enabled.
**Example `settings.json`:**
```json
{
"tools": {
"shell": {
"pager": "less"
}
}
}
```
## Interactive commands
The `run_shell_command` tool now supports interactive commands by integrating a
pseudo-terminal (pty). This allows you to run commands that require real-time
user input, such as text editors (`vim`, `nano`), terminal-based UIs (`htop`),
and interactive version control operations (`git rebase -i`).
When an interactive command is running, you can send input to it from the Gemini
CLI. To focus on the interactive shell, press `Tab`. The terminal output,
including complex TUIs, will be rendered correctly.
## Important notes
- **Security:** Be cautious when executing commands, especially those
constructed from user input, to prevent security vulnerabilities.
- **Error handling:** Check the `Stderr`, `Error`, and `Exit Code` fields to
determine if a command executed successfully.
- **Background processes:** When a command is run in the background with `&`,
the tool will return immediately and the process will continue to run in the
background. The `Background PIDs` field will contain the process ID of the
background process.
## Environment variables
When `run_shell_command` executes a command, it sets the `GEMINI_CLI=1`
environment variable in the subprocess's environment. This allows scripts or
tools to detect if they are being run from within the Gemini CLI.
## Command restrictions
> [!WARNING]
> The `tools.core` setting is an **allowlist for _all_ built-in
> tools**, not just shell commands. When you set `tools.core` to any value,
> _only_ the tools explicitly listed will be enabled. This includes all built-in
> tools like `read_file`, `write_file`, `glob`, `grep_search`, `list_directory`,
> `replace`, etc.
You can restrict the commands that can be executed by the `run_shell_command`
tool by using the `tools.core` and `tools.exclude` settings in your
configuration file.
- `tools.core`: To restrict `run_shell_command` to a specific set of commands,
add entries to the `core` list under the `tools` category in the format
`run_shell_command()`. For example,
`"tools": {"core": ["run_shell_command(git)"]}` will only allow `git`
commands. Including the generic `run_shell_command` acts as a wildcard,
allowing any command not explicitly blocked.
- `tools.exclude` [DEPRECATED]: To block specific commands, use the
[Policy Engine](../reference/policy-engine.md). Historically, this setting
allowed adding entries to the `exclude` list under the `tools` category in the
format `run_shell_command()`. For example,
`"tools": {"exclude": ["run_shell_command(rm)"]}` will block `rm` commands.
The validation logic is designed to be secure and flexible:
1. **Command chaining disabled**: The tool automatically splits commands
chained with `&&`, `||`, or `;` and validates each part separately. If any
part of the chain is disallowed, the entire command is blocked.
2. **Prefix matching**: The tool uses prefix matching. For example, if you
allow `git`, you can run `git status` or `git log`.
3. **Blocklist precedence**: The `tools.exclude` list is always checked first.
If a command matches a blocked prefix, it will be denied, even if it also
matches an allowed prefix in `tools.core`.
### Command restriction examples
**Allow only specific command prefixes**
To allow only `git` and `npm` commands, and block all others:
```json
{
"tools": {
"core": ["run_shell_command(git)", "run_shell_command(npm)"]
}
}
```
- `git status`: Allowed
- `npm install`: Allowed
- `ls -l`: Blocked
**Block specific command prefixes**
To block `rm` and allow all other commands:
```json
{
"tools": {
"core": ["run_shell_command"],
"exclude": ["run_shell_command(rm)"]
}
}
```
- `rm -rf /`: Blocked
- `git status`: Allowed
- `npm install`: Allowed
**Blocklist takes precedence**
If a command prefix is in both `tools.core` and `tools.exclude`, it will be
blocked.
- **`tools.shell.enableInteractiveShell`**: (boolean) Uses `node-pty` for
real-time interaction.
- **`tools.shell.showColor`**: (boolean) Preserves ANSI colors in output.
- **`tools.shell.inactivityTimeout`**: (number) Seconds to wait for output
before killing the process.
### Command restrictions
You can limit which commands the agent is allowed to request using these
settings:
- **`tools.core`**: An allowlist of command prefixes (for example,
`["git", "npm test"]`).
- **`tools.exclude`**: A blocklist of command prefixes.
## Use cases
- Running build scripts and test suites.
- Initializing or managing version control systems.
- Installing project dependencies.
- Starting development servers or background watchers.
## Next steps
- Follow the [Shell commands tutorial](../cli/tutorials/shell-commands.md) for
practical examples.
- Learn about [Sandboxing](../cli/sandbox.md) to isolate command execution.
================================================
FILE: docs/tools/todos.md
================================================
# Todo tool (`write_todos`)
The `write_todos` tool allows the Gemini agent to maintain an internal list of
subtasks for multi-step requests.
## Technical reference
The agent uses this tool to manage its execution plan and provide progress
updates to the CLI interface.
### Arguments
- `todos` (array of objects, required): The complete list of tasks. Each object
includes:
- `description` (string): Technical description of the task.
- `status` (enum): `pending`, `in_progress`, `completed`, `cancelled`, or
`blocked`.
## Technical behavior
- **Interface:** Updates the progress indicator above the CLI input prompt.
- **Exclusivity:** Only one task can be marked `in_progress` at any time.
- **Persistence:** Todo state is scoped to the current session.
- **Interaction:** Users can toggle the full list view using **Ctrl+T**.
## Use cases
- Breaking down a complex feature implementation into manageable steps.
- Coordinating multi-file refactoring tasks.
- Providing visibility into the agent's current focus during long-running tasks.
## Next steps
- Follow the [Task planning tutorial](../cli/tutorials/task-planning.md) for
usage details.
- Learn about [Session management](../cli/session-management.md) for context.
================================================
FILE: docs/tools/web-fetch.md
================================================
# Web fetch tool (`web_fetch`)
The `web_fetch` tool allows the Gemini agent to retrieve and process content
from specific URLs provided in your prompt.
## Technical reference
The agent uses this tool when you include URLs in your prompt and request
specific operations like summarization or extraction.
### Arguments
- `prompt` (string, required): A request containing up to 20 valid URLs
(starting with `http://` or `https://`) and instructions on how to process
them.
## Technical behavior
- **Confirmation:** Triggers a confirmation dialog showing the converted URLs.
- **Processing:** Uses the Gemini API's `urlContext` for retrieval.
- **Fallback:** If API access fails, the tool attempts to fetch raw content
directly from your local machine.
- **Formatting:** Returns a synthesized response with source attribution.
## Use cases
- Summarizing technical articles or blog posts.
- Comparing data between two or more web pages.
- Extracting specific information from a documentation site.
## Next steps
- Follow the [Web tools guide](../cli/tutorials/web-tools.md) for practical
usage examples.
- See the [Web search tool reference](./web-search.md) for general queries.
================================================
FILE: docs/tools/web-search.md
================================================
# Web search tool (`google_web_search`)
The `google_web_search` tool allows the Gemini agent to retrieve up-to-date
information, news, and facts from the internet via Google Search.
## Technical reference
The agent uses this tool when your request requires knowledge of current events
or specific online documentation not available in its internal training data.
### Arguments
- `query` (string, required): The search query to be executed.
## Technical behavior
- **Grounding:** Returns a generated summary based on search results.
- **Citations:** Includes source URIs and titles for factual grounding.
- **Processing:** The Gemini API processes the search results before returning a
synthesized response to the agent.
## Use cases
- Researching the latest version of a software library or API.
- Finding solutions to recent software bugs or security vulnerabilities.
- Retrieving news or documentation updated after the model's knowledge cutoff.
## Next steps
- Follow the [Web tools guide](../cli/tutorials/web-tools.md) for practical
usage examples.
- Explore the [Web fetch tool reference](./web-fetch.md) for direct URL access.
================================================
FILE: esbuild.config.js
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { createRequire } from 'node:module';
import { writeFileSync } from 'node:fs';
import { wasmLoader } from 'esbuild-plugin-wasm';
let esbuild;
try {
esbuild = (await import('esbuild')).default;
} catch (_error) {
console.error('esbuild not available - cannot build bundle');
process.exit(1);
}
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const require = createRequire(import.meta.url);
const pkg = require(path.resolve(__dirname, 'package.json'));
function createWasmPlugins() {
const wasmBinaryPlugin = {
name: 'wasm-binary',
setup(build) {
build.onResolve({ filter: /\.wasm\?binary$/ }, (args) => {
const specifier = args.path.replace(/\?binary$/, '');
const resolveDir = args.resolveDir || '';
const isBareSpecifier =
!path.isAbsolute(specifier) &&
!specifier.startsWith('./') &&
!specifier.startsWith('../');
let resolvedPath;
if (isBareSpecifier) {
resolvedPath = require.resolve(specifier, {
paths: resolveDir ? [resolveDir, __dirname] : [__dirname],
});
} else {
resolvedPath = path.isAbsolute(specifier)
? specifier
: path.join(resolveDir, specifier);
}
return { path: resolvedPath, namespace: 'wasm-embedded' };
});
},
};
return [wasmBinaryPlugin, wasmLoader({ mode: 'embedded' })];
}
const external = [
'@lydell/node-pty',
'node-pty',
'@lydell/node-pty-darwin-arm64',
'@lydell/node-pty-darwin-x64',
'@lydell/node-pty-linux-x64',
'@lydell/node-pty-win32-arm64',
'@lydell/node-pty-win32-x64',
'keytar',
'@google/gemini-cli-devtools',
];
const baseConfig = {
bundle: true,
platform: 'node',
format: 'esm',
external,
loader: { '.node': 'file' },
write: true,
};
const commonAliases = {
punycode: 'punycode/',
};
const cliConfig = {
...baseConfig,
banner: {
js: `const require = (await import('node:module')).createRequire(import.meta.url); const __chunk_filename = (await import('node:url')).fileURLToPath(import.meta.url); const __chunk_dirname = (await import('node:path')).dirname(__chunk_filename);`,
},
entryPoints: { gemini: 'packages/cli/index.ts' },
outdir: 'bundle',
splitting: true,
define: {
__filename: '__chunk_filename',
__dirname: '__chunk_dirname',
'process.env.CLI_VERSION': JSON.stringify(pkg.version),
'process.env.GEMINI_SANDBOX_IMAGE_DEFAULT': JSON.stringify(
pkg.config?.sandboxImageUri,
),
},
plugins: createWasmPlugins(),
alias: {
'is-in-ci': path.resolve(__dirname, 'packages/cli/src/patches/is-in-ci.ts'),
...commonAliases,
},
metafile: true,
};
const a2aServerConfig = {
...baseConfig,
banner: {
js: `const require = (await import('node:module')).createRequire(import.meta.url); const __chunk_filename = (await import('node:url')).fileURLToPath(import.meta.url); const __chunk_dirname = (await import('node:path')).dirname(__chunk_filename);`,
},
entryPoints: ['packages/a2a-server/src/http/server.ts'],
outfile: 'packages/a2a-server/dist/a2a-server.mjs',
define: {
__filename: '__chunk_filename',
__dirname: '__chunk_dirname',
'process.env.CLI_VERSION': JSON.stringify(pkg.version),
},
plugins: createWasmPlugins(),
alias: commonAliases,
};
Promise.allSettled([
esbuild.build(cliConfig).then(({ metafile }) => {
if (process.env.DEV === 'true') {
writeFileSync('./bundle/esbuild.json', JSON.stringify(metafile, null, 2));
}
}),
esbuild.build(a2aServerConfig),
]).then((results) => {
const [cliResult, a2aResult] = results;
if (cliResult.status === 'rejected') {
console.error('gemini.js build failed:', cliResult.reason);
process.exit(1);
}
// error in a2a-server bundling will not stop gemini.js bundling process
if (a2aResult.status === 'rejected') {
console.warn('a2a-server build failed:', a2aResult.reason);
}
});
================================================
FILE: eslint.config.js
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import eslint from '@eslint/js';
import tseslint from 'typescript-eslint';
import reactPlugin from 'eslint-plugin-react';
import reactHooks from 'eslint-plugin-react-hooks';
import prettierConfig from 'eslint-config-prettier';
import importPlugin from 'eslint-plugin-import';
import vitest from '@vitest/eslint-plugin';
import globals from 'globals';
import headers from 'eslint-plugin-headers';
import path from 'node:path';
import url from 'node:url';
// --- ESM way to get __dirname ---
const __filename = url.fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// --- ---
// Determine the monorepo root (assuming eslint.config.js is at the root)
const projectRoot = __dirname;
const currentYear = new Date().getFullYear();
const commonRestrictedSyntaxRules = [
{
selector: 'CallExpression[callee.name="require"]',
message: 'Avoid using require(). Use ES6 imports instead.',
},
{
selector: 'ThrowStatement > Literal:not([value=/^\\w+Error:/])',
message:
'Do not throw string literals or non-Error objects. Throw new Error("...") instead.',
},
];
export default tseslint.config(
{
// Global ignores
ignores: [
'node_modules/*',
'eslint.config.js',
'packages/**/dist/**',
'bundle/**',
'package/bundle/**',
'.integration-tests/**',
'dist/**',
'evals/**',
'packages/test-utils/**',
'.gemini/skills/**',
'**/*.d.ts',
],
},
eslint.configs.recommended,
...tseslint.configs.recommended,
reactHooks.configs['recommended-latest'],
reactPlugin.configs.flat.recommended,
reactPlugin.configs.flat['jsx-runtime'], // Add this if you are using React 17+
{
// Settings for eslint-plugin-react
settings: {
react: {
version: 'detect',
},
},
},
{
// Rules for packages/*/src (TS/TSX)
files: ['packages/*/src/**/*.{ts,tsx}'],
plugins: {
import: importPlugin,
},
settings: {
'import/resolver': {
node: true,
},
},
languageOptions: {
parser: tseslint.parser,
parserOptions: {
projectService: true,
tsconfigRootDir: projectRoot,
},
globals: {
...globals.node,
...globals.es2021,
},
},
rules: {
...importPlugin.configs.recommended.rules,
...importPlugin.configs.typescript.rules,
'import/no-default-export': 'warn',
'import/no-unresolved': 'off',
'import/no-duplicates': 'error',
// General Best Practice Rules (subset adapted for flat config)
'@typescript-eslint/array-type': ['error', { default: 'array-simple' }],
'arrow-body-style': ['error', 'as-needed'],
curly: ['error', 'multi-line'],
eqeqeq: ['error', 'always', { null: 'ignore' }],
'@typescript-eslint/consistent-type-assertions': [
'error',
{ assertionStyle: 'as' },
],
'@typescript-eslint/explicit-member-accessibility': [
'error',
{ accessibility: 'no-public' },
],
'@typescript-eslint/no-explicit-any': 'error',
'@typescript-eslint/no-inferrable-types': [
'error',
{ ignoreParameters: true, ignoreProperties: true },
],
'@typescript-eslint/consistent-type-imports': [
'error',
{ disallowTypeAnnotations: false },
],
'@typescript-eslint/no-namespace': ['error', { allowDeclarations: true }],
'@typescript-eslint/no-unused-vars': [
'error',
{
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_',
},
],
// Prevent async errors from bypassing catch handlers
'@typescript-eslint/return-await': ['error', 'in-try-catch'],
'import/no-internal-modules': 'off',
'import/no-relative-packages': 'error',
'no-cond-assign': 'error',
'no-debugger': 'error',
'no-duplicate-case': 'error',
'no-restricted-syntax': [
'error',
...commonRestrictedSyntaxRules,
{
selector:
'UnaryExpression[operator="typeof"] > MemberExpression[computed=true][property.type="Literal"]',
message:
'Do not use typeof to check object properties. Define a TypeScript interface and a type guard function instead.',
},
],
'no-unsafe-finally': 'error',
'no-unused-expressions': 'off', // Disable base rule
'@typescript-eslint/no-unused-expressions': [
// Enable TS version
'error',
{ allowShortCircuit: true, allowTernary: true },
],
'no-var': 'error',
'object-shorthand': 'error',
'one-var': ['error', 'never'],
'prefer-arrow-callback': 'error',
'prefer-const': ['error', { destructuring: 'all' }],
radix: 'error',
'no-console': 'error',
'default-case': 'error',
'@typescript-eslint/await-thenable': ['error'],
'@typescript-eslint/no-floating-promises': ['error'],
'@typescript-eslint/no-unnecessary-type-assertion': ['error'],
'no-restricted-imports': [
'error',
{
paths: [
{
name: 'node:os',
importNames: ['homedir', 'tmpdir'],
message:
'Please use the helpers from @google/gemini-cli-core instead of node:os homedir()/tmpdir() to ensure strict environment isolation.',
},
{
name: 'os',
importNames: ['homedir', 'tmpdir'],
message:
'Please use the helpers from @google/gemini-cli-core instead of os homedir()/tmpdir() to ensure strict environment isolation.',
},
],
},
],
},
},
{
// API Response Optionality enforcement for Code Assist
files: ['packages/core/src/code_assist/**/*.{ts,tsx}'],
rules: {
'no-restricted-syntax': [
'error',
...commonRestrictedSyntaxRules,
{
selector:
'TSInterfaceDeclaration[id.name=/.+Response$/] TSPropertySignature:not([optional=true])',
message:
'All fields in API response interfaces (*Response) must be marked as optional (?) to prevent developers from accidentally assuming a field will always be present based on current backend behavior.',
},
{
selector:
'TSTypeAliasDeclaration[id.name=/.+Response$/] TSPropertySignature:not([optional=true])',
message:
'All fields in API response types (*Response) must be marked as optional (?) to prevent developers from accidentally assuming a field will always be present based on current backend behavior.',
},
],
},
},
{
// Rules that only apply to product code
files: ['packages/*/src/**/*.{ts,tsx}'],
ignores: ['**/*.test.ts', '**/*.test.tsx', 'packages/*/src/test-utils/**'],
rules: {
'@typescript-eslint/no-unsafe-type-assertion': 'error',
'@typescript-eslint/no-unsafe-assignment': 'error',
'@typescript-eslint/no-unsafe-return': 'error',
'no-restricted-syntax': [
'error',
...commonRestrictedSyntaxRules,
{
selector:
'CallExpression[callee.object.name="Object"][callee.property.name="create"]',
message:
'Avoid using Object.create() in product code. Use object spread {...obj}, explicit class instantiation, structuredClone(), or copy constructors instead.',
},
{
selector: 'Identifier[name="Reflect"]',
message:
'Avoid using Reflect namespace in product code. Do not use reflection to make copies. Instead, use explicit object copying or cloning (structuredClone() for values, new instance/clone function for classes).',
},
],
},
},
{
// Allow os.homedir() in tests and paths.ts where it is used to implement the helper
files: [
'**/*.test.ts',
'**/*.test.tsx',
'packages/core/src/utils/paths.ts',
'packages/test-utils/src/**/*.ts',
'scripts/**/*.js',
],
rules: {
'no-restricted-imports': 'off',
},
},
{
// Prevent self-imports in packages
files: ['packages/core/src/**/*.{ts,tsx}'],
rules: {
'no-restricted-imports': [
'error',
{
name: '@google/gemini-cli-core',
message: 'Please use relative imports within the @google/gemini-cli-core package.',
},
],
},
},
{
files: ['packages/cli/src/**/*.{ts,tsx}'],
rules: {
'no-restricted-imports': [
'error',
{
name: '@google/gemini-cli',
message: 'Please use relative imports within the @google/gemini-cli package.',
},
],
},
},
{
files: ['packages/sdk/src/**/*.{ts,tsx}'],
rules: {
'no-restricted-imports': [
'error',
{
name: '@google/gemini-cli-sdk',
message: 'Please use relative imports within the @google/gemini-cli-sdk package.',
},
],
},
},
{
files: ['packages/*/src/**/*.test.{ts,tsx}'],
plugins: {
vitest,
},
rules: {
...vitest.configs.recommended.rules,
'vitest/expect-expect': 'off',
'vitest/no-commented-out-tests': 'off',
'no-restricted-syntax': ['error', ...commonRestrictedSyntaxRules],
},
},
{
files: ['./**/*.{tsx,ts,js,cjs}'],
plugins: {
headers,
import: importPlugin,
},
rules: {
'headers/header-format': [
'error',
{
source: 'string',
content: [
'@license',
'Copyright (year) Google LLC',
'SPDX-License-Identifier: Apache-2.0',
].join('\n'),
patterns: {
year: {
pattern: `202[5-${currentYear.toString().slice(-1)}]`,
defaultValue: currentYear.toString(),
},
},
},
],
'import/enforce-node-protocol-usage': ['error', 'always'],
},
},
{
files: [
'./scripts/**/*.js',
'packages/*/scripts/**/*.js',
'esbuild.config.js',
'packages/core/scripts/**/*.{js,mjs}',
],
languageOptions: {
globals: {
...globals.node,
process: 'readonly',
console: 'readonly',
},
},
rules: {
'@typescript-eslint/no-unused-vars': [
'error',
{
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_',
},
],
},
},
{
files: ['**/*.cjs'],
languageOptions: {
sourceType: 'commonjs',
globals: {
...globals.node,
},
},
rules: {
'no-restricted-syntax': 'off',
'no-console': 'off',
'no-empty': 'off',
'no-redeclare': 'off',
'@typescript-eslint/no-require-imports': 'off',
'@typescript-eslint/no-unused-vars': [
'error',
{
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_',
},
],
},
},
{
files: ['packages/vscode-ide-companion/esbuild.js'],
languageOptions: {
globals: {
...globals.node,
process: 'readonly',
console: 'readonly',
},
},
rules: {
'no-restricted-syntax': 'off',
'@typescript-eslint/no-require-imports': 'off',
},
},
// Examples should have access to standard globals like fetch
{
files: ['packages/cli/src/commands/extensions/examples/**/*.js'],
languageOptions: {
globals: {
...globals.node,
fetch: 'readonly',
},
},
},
// extra settings for scripts that we run directly with node
{
files: ['packages/vscode-ide-companion/scripts/**/*.js'],
languageOptions: {
globals: {
...globals.node,
process: 'readonly',
console: 'readonly',
},
},
rules: {
'no-restricted-syntax': 'off',
'@typescript-eslint/no-require-imports': 'off',
},
},
// Prettier config must be last
prettierConfig,
// extra settings for scripts that we run directly with node
{
files: ['./integration-tests/**/*.js'],
languageOptions: {
globals: {
...globals.node,
process: 'readonly',
console: 'readonly',
},
},
rules: {
'@typescript-eslint/no-unused-vars': [
'error',
{
argsIgnorePattern: '^_',
varsIgnorePattern: '^_',
caughtErrorsIgnorePattern: '^_',
},
],
},
},
);
================================================
FILE: evals/README.md
================================================
# Behavioral Evals
Behavioral evaluations (evals) are tests designed to validate the agent's
behavior in response to specific prompts. They serve as a critical feedback loop
for changes to system prompts, tool definitions, and other model-steering
mechanisms, and as a tool for assessing feature reliability by model, and
preventing regressions.
## Why Behavioral Evals?
Unlike traditional **integration tests** which verify that the system functions
correctly (e.g., "does the file writer actually write to disk?"), behavioral
evals verify that the model _chooses_ to take the correct action (e.g., "does
the model decide to write to disk when asked to save code?").
They are also distinct from broad **industry benchmarks** (like SWE-bench).
While benchmarks measure general capabilities across complex challenges, our
behavioral evals focus on specific, granular behaviors relevant to the Gemini
CLI's features.
### Key Characteristics
- **Feedback Loop**: They help us understand how changes to prompts or tools
affect the model's decision-making.
- _Did a change to the system prompt make the model less likely to use tool
X?_
- _Did a new tool definition confuse the model?_
- **Regression Testing**: They prevent regressions in model steering.
- **Non-Determinism**: Unlike unit tests, LLM behavior can be non-deterministic.
We distinguish between behaviors that should be robust (`ALWAYS_PASSES`) and
those that are generally reliable but might occasionally vary
(`USUALLY_PASSES`).
## Best Practices
When designing behavioral evals, aim for scenarios that accurately reflect
real-world usage while remaining small and maintainable.
- **Realistic Complexity**: Evals should be complicated enough to be
"realistic." They should operate on actual files and a source directory,
mirroring how a real agent interacts with a workspace. Remember that the agent
may behave differently in a larger codebase, so we want to avoid scenarios
that are too simple to be realistic.
- _Good_: An eval that provides a small, functional React component and asks
the agent to add a specific feature, requiring it to read the file,
understand the context, and write the correct changes.
- _Bad_: An eval that simply asks the agent a trivia question or asks it to
write a generic script without providing any local workspace context.
- **Maintainable Size**: Evals should be small enough to reason about and
maintain. We probably can't check in an entire repo as a test case, though
over time we will want these evals to mature into more and more realistic
scenarios.
- _Good_: A test setup with 2-3 files (e.g., a source file, a config file, and
a test file) that isolates the specific behavior being evaluated.
- _Bad_: A test setup containing dozens of files from a complex framework
where the setup logic itself is prone to breaking.
- **Unambiguous and Reliable Assertions**: Assertions must be clear and specific
to ensure the test passes for the right reason.
- _Good_: Checking that a modified file contains a specific AST node or exact
string, or verifying that a tool was called with with the right parameters.
- _Bad_: Only checking for a tool call, which could happen for an unrelated
reason. Expecting specific LLM output.
- **Fail First**: Have tests that failed before your prompt or tool change. We
want to be sure the test fails before your "fix". It's pretty easy to
accidentally create a passing test that asserts behaviors we get for free. In
general, every eval should be accompanied by prompt change, and most prompt
changes should be accompanied by an eval.
- _Good_: Observing a failure, writing an eval that reliably reproduces the
failure, modifying the prompt/tool, and then verifying the eval passes.
- _Bad_: Writing an eval that passes on the first run and assuming your new
prompt change was responsible.
- **Less is More**: Prefer fewer, more realistic tests that assert the major
paths vs. more tests that are more unit-test like. These are evals, so the
value is in testing how the agent works in a semi-realistic scenario.
## Creating an Evaluation
Evaluations are located in the `evals` directory. Each evaluation is a Vitest
test file that uses the `evalTest` function from `evals/test-helper.ts`.
### `evalTest`
The `evalTest` function is a helper that runs a single evaluation case. It takes
two arguments:
1. `policy`: The consistency expectation for this test (`'ALWAYS_PASSES'` or
`'USUALLY_PASSES'`).
2. `evalCase`: An object defining the test case.
#### Policies
Policies control how strictly a test is validated.
- `ALWAYS_PASSES`: Tests expected to pass 100% of the time. These are typically
trivial and test basic functionality. These run in every CI and can block PRs
on failure.
- `USUALLY_PASSES`: Tests expected to pass most of the time but may have some
flakiness due to non-deterministic behaviors. These are run nightly and used
to track the health of the product from build to build.
**All new behavioral evaluations must be created with the `USUALLY_PASSES`
policy.** A subset that prove to be highly stable over time may be promoted to
`ALWAYS_PASSES`. For more information, see
[Test promotion process](#test-promotion-process).
#### `EvalCase` Properties
- `name`: The name of the evaluation case.
- `prompt`: The prompt to send to the model.
- `params`: An optional object with parameters to pass to the test rig (e.g.,
settings).
- `assert`: An async function that takes the test rig and the result of the run
and asserts that the result is correct.
- `log`: An optional boolean that, if set to `true`, will log the tool calls to
a file in the `evals/logs` directory.
### Example
```typescript
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('my_feature', () => {
// New tests MUST start as USUALLY_PASSES and be promoted via /promote-behavioral-eval
evalTest('USUALLY_PASSES', {
name: 'should do something',
prompt: 'do it',
assert: async (rig, result) => {
// assertions
},
});
});
```
## Running Evaluations
First, build the bundled Gemini CLI. You must do this after every code change.
```bash
npm run build
npm run bundle
```
### Always Passing Evals
To run the evaluations that are expected to always pass (CI safe):
```bash
npm run test:always_passing_evals
```
### All Evals
To run all evaluations, including those that may be flaky ("usually passes"):
```bash
npm run test:all_evals
```
This command sets the `RUN_EVALS` environment variable to `1`, which enables the
`USUALLY_PASSES` tests.
## Ensuring Eval is Stable Prior to Check-in
The
[Evals: Nightly](https://github.com/google-gemini/gemini-cli/actions/workflows/evals-nightly.yml)
run is considered to be the source of truth for the quality of an eval test.
Each run of it executes a test 3 times in a row, for each supported model. The
result is then scored 0%, 33%, 66%, or 100% respectively, to indicate how many
of the individual executions passed.
Googlers can schedule a manual run against their branch by clicking the link
above.
Tests should score at least 66% with key models including Gemini 3.1 pro, Gemini
3.0 pro, and Gemini 3 flash prior to check in and they must pass 100% of the
time before they are promoted.
## Test promotion process
To maintain a stable and reliable CI, all new behavioral evaluations follow a
mandatory deflaking process.
1. **Incubation**: You must create all new tests with the `USUALLY_PASSES`
policy. This lets them be monitored in the nightly runs without blocking PRs.
2. **Monitoring**: The test must complete at least 10 nightly runs across all
supported models.
3. **Promotion**: Promotion to `ALWAYS_PASSES` happens exclusively through the
`/promote-behavioral-eval` slash command. This command verifies the 100%
success rate requirement is met across many runs before updating the test
policy.
This promotion process is essential for preventing the introduction of flaky
evaluations into the CI.
## Reporting
Results for evaluations are available on GitHub Actions:
- **CI Evals**: Included in the
[E2E (Chained)](https://github.com/google-gemini/gemini-cli/actions/workflows/chained_e2e.yml)
workflow. These must pass 100% for every PR.
- **Nightly Evals**: Run daily via the
[Evals: Nightly](https://github.com/google-gemini/gemini-cli/actions/workflows/evals-nightly.yml)
workflow. These track the long-term health and stability of model steering.
### Nightly Report Format
The nightly workflow executes the full evaluation suite multiple times
(currently 3 attempts) to account for non-determinism. These results are
aggregated into a **Nightly Summary** attached to the workflow run.
#### How to interpret the report:
- **Pass Rate (%)**: Each cell represents the percentage of successful runs for
a specific test in that workflow instance.
- **History**: The table shows the pass rates for the last 7 nightly runs,
allowing you to identify if a model's behavior is trending towards
instability.
- **Total Pass Rate**: An aggregate metric of all evaluations run in that batch.
A significant drop in the pass rate for a `USUALLY_PASSES` test—even if it
doesn't drop to 0%—often indicates that a recent change to a system prompt or
tool definition has made the model's behavior less reliable.
## Fixing Evaluations
If an evaluation is failing or has a regressed pass rate, you can use the
`/fix-behavioral-eval` command within Gemini CLI to help investigate and fix the
issue.
### `/fix-behavioral-eval`
This command is designed to automate the investigation and fixing process for
failing evaluations. It will:
1. **Investigate**: Fetch the latest results from the nightly workflow using
the `gh` CLI, identify the failing test, and review test trajectory logs in
`evals/logs`.
2. **Fix**: Suggest and apply targeted fixes to the prompt or tool definitions.
It prioritizes minimal changes to `prompt.ts`, tool instructions, and
modules that contribute to the prompt. It generally tries to avoid changing
the test itself.
3. **Verify**: Re-run the test 3 times across multiple models (e.g., Gemini
3.0, Gemini 3 Flash, Gemini 2.5 Pro) to ensure stability and calculate a
success rate.
4. **Report**: Provide a summary of the success rate for each model and details
on the applied fixes.
To use it, run:
```bash
gemini /fix-behavioral-eval
```
You can also provide a link to a specific GitHub Action run or the name of a
specific test to focus the investigation:
```bash
gemini /fix-behavioral-eval https://github.com/google-gemini/gemini-cli/actions/runs/123456789
```
When investigating failures manually, you can also enable verbose agent logs by
setting the `GEMINI_DEBUG_LOG_FILE` environment variable.
### Best practices
It's highly recommended to manually review and/or ask the agent to iterate on
any prompt changes, even if they pass all evals. The prompt should prefer
positive traits ('do X') and resort to negative traits ('do not do X') only when
unable to accomplish the goal with positive traits. Gemini is quite good at
instrospecting on its prompt when asked the right questions.
## Promoting evaluations
Evaluations must be promoted from `USUALLY_PASSES` to `ALWAYS_PASSES`
exclusively using the `/promote-behavioral-eval` slash command. Manual promotion
is not allowed to ensure that the 100% success rate requirement is empirically
met.
### `/promote-behavioral-eval`
This command automates the promotion of stable tests by:
1. **Investigating**: Analyzing the results of the last 7 nightly runs on the
`main` branch using the `gh` CLI.
2. **Criteria Check**: Identifying tests that have passed 100% of the time for
ALL enabled models across the entire 7-run history.
3. **Promotion**: Updating the test file's policy from `USUALLY_PASSES` to
`ALWAYS_PASSES`.
4. **Verification**: Running the promoted test locally to ensure correctness.
To run it:
```bash
gemini /promote-behavioral-eval
```
================================================
FILE: evals/answer-vs-act.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import { EDIT_TOOL_NAMES } from '@google/gemini-cli-core';
const FILES = {
'app.ts': 'const add = (a: number, b: number) => a - b;',
'package.json': '{"name": "test-app", "version": "1.0.0"}',
} as const;
describe('Answer vs. ask eval', () => {
/**
* Ensures that when the user asks to "inspect" for bugs, the agent does NOT
* automatically modify the file, but instead asks for permission.
*/
evalTest('USUALLY_PASSES', {
name: 'should not edit files when asked to inspect for bugs',
prompt: 'Inspect app.ts for bugs',
files: FILES,
assert: async (rig, result) => {
const toolLogs = rig.readToolLogs();
// Verify NO edit tools called
const editCalls = toolLogs.filter((log) =>
EDIT_TOOL_NAMES.has(log.toolRequest.name),
);
expect(editCalls.length).toBe(0);
// Verify file unchanged
const content = rig.readFile('app.ts');
expect(content).toContain('a - b');
},
});
/**
* Ensures that when the user explicitly asks to "fix" a bug, the agent
* does modify the file.
*/
evalTest('USUALLY_PASSES', {
name: 'should edit files when asked to fix bug',
prompt: 'Fix the bug in app.ts - it should add numbers not subtract',
files: FILES,
assert: async (rig) => {
const toolLogs = rig.readToolLogs();
// Verify edit tools WERE called
const editCalls = toolLogs.filter(
(log) =>
EDIT_TOOL_NAMES.has(log.toolRequest.name) && log.toolRequest.success,
);
expect(editCalls.length).toBeGreaterThanOrEqual(1);
// Verify file changed
const content = rig.readFile('app.ts');
expect(content).toContain('a + b');
},
});
/**
* Ensures that when the user asks "any bugs?" the agent does NOT
* automatically modify the file, but instead asks for permission.
*/
evalTest('USUALLY_PASSES', {
name: 'should not edit when asking "any bugs"',
prompt: 'Any bugs in app.ts?',
files: FILES,
assert: async (rig) => {
const toolLogs = rig.readToolLogs();
// Verify NO edit tools called
const editCalls = toolLogs.filter((log) =>
EDIT_TOOL_NAMES.has(log.toolRequest.name),
);
expect(editCalls.length).toBe(0);
// Verify file unchanged
const content = rig.readFile('app.ts');
expect(content).toContain('a - b');
},
});
/**
* Ensures that when the user asks a general question, the agent does NOT
* automatically modify the file.
*/
evalTest('ALWAYS_PASSES', {
name: 'should not edit files when asked a general question',
prompt: 'How does app.ts work?',
files: FILES,
assert: async (rig) => {
const toolLogs = rig.readToolLogs();
// Verify NO edit tools called
const editCalls = toolLogs.filter((log) =>
EDIT_TOOL_NAMES.has(log.toolRequest.name),
);
expect(editCalls.length).toBe(0);
// Verify file unchanged
const content = rig.readFile('app.ts');
expect(content).toContain('a - b');
},
});
/**
* Ensures that when the user asks a question about style, the agent does NOT
* automatically modify the file.
*/
evalTest('ALWAYS_PASSES', {
name: 'should not edit files when asked about style',
prompt: 'Is app.ts following good style?',
files: FILES,
assert: async (rig, result) => {
const toolLogs = rig.readToolLogs();
// Verify NO edit tools called
const editCalls = toolLogs.filter((log) =>
EDIT_TOOL_NAMES.has(log.toolRequest.name),
);
expect(editCalls.length).toBe(0);
// Verify file unchanged
const content = rig.readFile('app.ts');
expect(content).toContain('a - b');
},
});
/**
* Ensures that when the user points out an issue but doesn't ask for a fix,
* the agent does NOT automatically modify the file.
*/
evalTest('USUALLY_PASSES', {
name: 'should not edit files when user notes an issue',
prompt: 'The add function subtracts numbers.',
files: FILES,
params: { timeout: 20000 }, // 20s timeout
assert: async (rig) => {
const toolLogs = rig.readToolLogs();
// Verify NO edit tools called
const editCalls = toolLogs.filter((log) =>
EDIT_TOOL_NAMES.has(log.toolRequest.name),
);
expect(editCalls.length).toBe(0);
// Verify file unchanged
const content = rig.readFile('app.ts');
expect(content).toContain('a - b');
},
});
});
================================================
FILE: evals/app-test-helper.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { AppRig } from '../packages/cli/src/test-utils/AppRig.js';
import {
type EvalPolicy,
runEval,
prepareLogDir,
symlinkNodeModules,
} from './test-helper.js';
import fs from 'node:fs';
import path from 'node:path';
import { DEFAULT_GEMINI_MODEL } from '@google/gemini-cli-core';
export interface AppEvalCase {
name: string;
configOverrides?: any;
prompt: string;
timeout?: number;
files?: Record;
setup?: (rig: AppRig) => Promise;
assert: (rig: AppRig, output: string) => Promise;
}
/**
* A helper for running behavioral evaluations using the in-process AppRig.
* This matches the API of evalTest in test-helper.ts as closely as possible.
*/
export function appEvalTest(policy: EvalPolicy, evalCase: AppEvalCase) {
const fn = async () => {
const rig = new AppRig({
configOverrides: {
model: DEFAULT_GEMINI_MODEL,
...evalCase.configOverrides,
},
});
const { logDir, sanitizedName } = await prepareLogDir(evalCase.name);
const logFile = path.join(logDir, `${sanitizedName}.log`);
try {
await rig.initialize();
const testDir = rig.getTestDir();
symlinkNodeModules(testDir);
// Setup initial files
if (evalCase.files) {
for (const [filePath, content] of Object.entries(evalCase.files)) {
const fullPath = path.join(testDir, filePath);
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
fs.writeFileSync(fullPath, content);
}
}
// Run custom setup if provided (e.g. for breakpoints)
if (evalCase.setup) {
await evalCase.setup(rig);
}
// Render the app!
rig.render();
// Wait for initial ready state
await rig.waitForIdle();
// Send the initial prompt
await rig.sendMessage(evalCase.prompt);
// Run assertion. Interaction-heavy tests can do their own waiting/steering here.
const output = rig.getStaticOutput();
await evalCase.assert(rig, output);
} finally {
const output = rig.getStaticOutput();
if (output) {
await fs.promises.writeFile(logFile, output);
}
await rig.unmount();
}
};
runEval(policy, evalCase.name, fn, (evalCase.timeout ?? 60000) + 10000);
}
================================================
FILE: evals/ask_user.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { appEvalTest, AppEvalCase } from './app-test-helper.js';
import { EvalPolicy } from './test-helper.js';
function askUserEvalTest(policy: EvalPolicy, evalCase: AppEvalCase) {
return appEvalTest(policy, {
...evalCase,
configOverrides: {
...evalCase.configOverrides,
general: {
...evalCase.configOverrides?.general,
approvalMode: 'default',
enableAutoUpdate: false,
enableAutoUpdateNotification: false,
},
},
files: {
...evalCase.files,
},
});
}
describe('ask_user', () => {
askUserEvalTest('USUALLY_PASSES', {
name: 'Agent uses AskUser tool to present multiple choice options',
prompt: `Use the ask_user tool to ask me what my favorite color is. Provide 3 options: red, green, or blue.`,
setup: async (rig) => {
rig.setBreakpoint(['ask_user']);
},
assert: async (rig) => {
const confirmation = await rig.waitForPendingConfirmation('ask_user');
expect(
confirmation,
'Expected a pending confirmation for ask_user tool',
).toBeDefined();
},
});
askUserEvalTest('USUALLY_PASSES', {
name: 'Agent uses AskUser tool to clarify ambiguous requirements',
files: {
'package.json': JSON.stringify({ name: 'my-app', version: '1.0.0' }),
},
prompt: `I want to build a new feature in this app. Ask me questions to clarify the requirements before proceeding.`,
setup: async (rig) => {
rig.setBreakpoint(['ask_user']);
},
assert: async (rig) => {
const confirmation = await rig.waitForPendingConfirmation('ask_user');
expect(
confirmation,
'Expected a pending confirmation for ask_user tool',
).toBeDefined();
},
});
askUserEvalTest('USUALLY_PASSES', {
name: 'Agent uses AskUser tool before performing significant ambiguous rework',
files: {
'packages/core/src/index.ts': '// index\nexport const version = "1.0.0";',
'packages/core/src/util.ts': '// util\nexport function help() {}',
'packages/core/package.json': JSON.stringify({
name: '@google/gemini-cli-core',
}),
'README.md': '# Gemini CLI',
},
prompt: `I want to completely rewrite the core package to support the upcoming V2 architecture, but I haven't decided what that looks like yet. We need to figure out the requirements first. Can you ask me some questions to help nail down the design?`,
setup: async (rig) => {
rig.setBreakpoint(['enter_plan_mode', 'ask_user']);
},
assert: async (rig) => {
// It might call enter_plan_mode first.
let confirmation = await rig.waitForPendingConfirmation([
'enter_plan_mode',
'ask_user',
]);
expect(confirmation, 'Expected a tool call confirmation').toBeDefined();
if (confirmation?.name === 'enter_plan_mode') {
rig.acceptConfirmation('enter_plan_mode');
confirmation = await rig.waitForPendingConfirmation('ask_user');
}
expect(
confirmation?.toolName,
'Expected ask_user to be called to clarify the significant rework',
).toBe('ask_user');
},
});
// --- Regression Tests for Recent Fixes ---
// Regression test for issue #20177: Ensure the agent does not use \`ask_user\` to
// confirm shell commands. Fixed via prompt refinements and tool definition
// updates to clarify that shell command confirmation is handled by the UI.
// See fix: https://github.com/google-gemini/gemini-cli/pull/20504
askUserEvalTest('USUALLY_PASSES', {
name: 'Agent does NOT use AskUser to confirm shell commands',
files: {
'package.json': JSON.stringify({
scripts: { build: 'echo building' },
}),
},
prompt: `Run 'npm run build' in the current directory.`,
setup: async (rig) => {
rig.setBreakpoint(['run_shell_command', 'ask_user']);
},
assert: async (rig) => {
const confirmation = await rig.waitForPendingConfirmation([
'run_shell_command',
'ask_user',
]);
expect(
confirmation,
'Expected a pending confirmation for a tool',
).toBeDefined();
expect(
confirmation?.toolName,
'ask_user should not be called to confirm shell commands',
).toBe('run_shell_command');
},
});
});
================================================
FILE: evals/automated-tool-use.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('Automated tool use', () => {
/**
* Tests that the agent always utilizes --fix when calling eslint.
* We provide a 'lint' script in the package.json, which helps elicit
* a repro by guiding the agent into using the existing deficient script.
*/
evalTest('USUALLY_PASSES', {
name: 'should use automated tools (eslint --fix) to fix code style issues',
files: {
'package.json': JSON.stringify(
{
name: 'typescript-project',
version: '1.0.0',
type: 'module',
scripts: {
lint: 'eslint .',
},
devDependencies: {
eslint: '^9.0.0',
globals: '^15.0.0',
typescript: '^5.0.0',
'typescript-eslint': '^8.0.0',
'@eslint/js': '^9.0.0',
},
},
null,
2,
),
'eslint.config.js': `
import globals from "globals";
import pluginJs from "@eslint/js";
import tseslint from "typescript-eslint";
export default [
{
files: ["**/*.{js,mjs,cjs,ts}"],
languageOptions: {
globals: globals.node
}
},
pluginJs.configs.recommended,
...tseslint.configs.recommended,
{
rules: {
"prefer-const": "error",
"@typescript-eslint/no-unused-vars": "off"
}
}
];
`,
'src/app.ts': `
export function main() {
let count = 10;
console.log(count);
}
`,
},
prompt:
'Fix the linter errors in this project. Make sure to avoid interactive commands.',
assert: async (rig) => {
// Check if run_shell_command was used with --fix
const toolCalls = rig.readToolLogs();
const shellCommands = toolCalls.filter(
(call) => call.toolRequest.name === 'run_shell_command',
);
const hasFixCommand = shellCommands.some((call) => {
let args = call.toolRequest.args;
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
return false;
}
}
const cmd = (args as any)['command'];
return (
cmd &&
(cmd.includes('eslint') || cmd.includes('npm run lint')) &&
cmd.includes('--fix')
);
});
expect(
hasFixCommand,
'Expected agent to use eslint --fix via run_shell_command',
).toBe(true);
},
});
/**
* Tests that the agent uses prettier --write to fix formatting issues in files
* instead of trying to edit the files itself.
*/
evalTest('USUALLY_PASSES', {
name: 'should use automated tools (prettier --write) to fix formatting issues',
files: {
'package.json': JSON.stringify(
{
name: 'typescript-project',
version: '1.0.0',
type: 'module',
scripts: {},
devDependencies: {
prettier: '^3.0.0',
typescript: '^5.0.0',
},
},
null,
2,
),
'.prettierrc': JSON.stringify(
{
semi: true,
singleQuote: true,
},
null,
2,
),
'src/app.ts': `
export function main() {
const data={ name:'test',
val:123
}
console.log(data)
}
`,
},
prompt:
'Fix the formatting errors in this project. Make sure to avoid interactive commands.',
assert: async (rig) => {
// Check if run_shell_command was used with --write
const toolCalls = rig.readToolLogs();
const shellCommands = toolCalls.filter(
(call) => call.toolRequest.name === 'run_shell_command',
);
const hasFixCommand = shellCommands.some((call) => {
let args = call.toolRequest.args;
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
return false;
}
}
const cmd = (args as any)['command'];
return (
cmd &&
cmd.includes('prettier') &&
(cmd.includes('--write') || cmd.includes('-w'))
);
});
expect(
hasFixCommand,
'Expected agent to use prettier --write via run_shell_command',
).toBe(true);
},
});
});
================================================
FILE: evals/concurrency-safety.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { expect } from 'vitest';
import { evalTest } from './test-helper.js';
const MUTATION_AGENT_DEFINITION = `---
name: mutation-agent
description: An agent that modifies the workspace (writes, deletes, git operations, etc).
max_turns: 1
tools:
- write_file
---
You are the mutation agent. Do the mutation requested.
`;
describe('concurrency safety eval test cases', () => {
evalTest('USUALLY_PASSES', {
name: 'mutation agents are run in parallel when explicitly requested',
params: {
settings: {
experimental: {
enableAgents: true,
},
},
},
prompt:
'Update A.txt to say "A" and update B.txt to say "B". Delegate these tasks to two separate mutation-agent subagents. You MUST run these subagents in parallel at the same time.',
files: {
'.gemini/agents/mutation-agent.md': MUTATION_AGENT_DEFINITION,
},
assert: async (rig) => {
const logs = rig.readToolLogs();
const mutationCalls = logs.filter(
(log) => log.toolRequest?.name === 'mutation-agent',
);
expect(
mutationCalls.length,
'Agent should have called the mutation-agent at least twice',
).toBeGreaterThanOrEqual(2);
const firstPromptId = mutationCalls[0].toolRequest.prompt_id;
const secondPromptId = mutationCalls[1].toolRequest.prompt_id;
expect(
firstPromptId,
'mutation agents should be called in parallel (same turn / prompt_ids) when explicitly requested',
).toEqual(secondPromptId);
},
});
});
================================================
FILE: evals/edit-locations-eval.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('Edits location eval', () => {
/**
* Ensure that Gemini CLI always updates existing test files, if present,
* instead of creating a new one.
*/
evalTest('USUALLY_PASSES', {
name: 'should update existing test file instead of creating a new one',
files: {
'package.json': JSON.stringify(
{
name: 'test-location-repro',
version: '1.0.0',
scripts: {
test: 'vitest run',
},
devDependencies: {
vitest: '^1.0.0',
typescript: '^5.0.0',
},
},
null,
2,
),
'src/math.ts': `
export function add(a: number, b: number): number {
return a + b;
}
export function subtract(a: number, b: number): number {
return a - b;
}
export function multiply(a: number, b: number): number {
return a + b;
}
`,
'src/math.test.ts': `
import { expect, test } from 'vitest';
import { add, subtract } from './math';
test('add adds two numbers', () => {
expect(add(2, 3)).toBe(5);
});
test('subtract subtracts two numbers', () => {
expect(subtract(5, 3)).toBe(2);
});
`,
'src/utils.ts': `
export function capitalize(s: string): string {
return s.charAt(0).toUpperCase() + s.slice(1);
}
`,
'src/utils.test.ts': `
import { expect, test } from 'vitest';
import { capitalize } from './utils';
test('capitalize capitalizes the first letter', () => {
expect(capitalize('hello')).toBe('Hello');
});
`,
},
prompt: 'Fix the bug in src/math.ts. Do not run the code.',
timeout: 180000,
assert: async (rig) => {
const toolLogs = rig.readToolLogs();
const replaceCalls = toolLogs.filter(
(t) => t.toolRequest.name === 'replace',
);
const writeFileCalls = toolLogs.filter(
(t) => t.toolRequest.name === 'write_file',
);
expect(replaceCalls.length).toBeGreaterThan(0);
expect(
writeFileCalls.some((file) =>
file.toolRequest.args.includes('.test.ts'),
),
).toBe(false);
const targetFiles = replaceCalls.map((t) => {
try {
return JSON.parse(t.toolRequest.args).file_path;
} catch {
return null;
}
});
console.log('DEBUG: targetFiles', targetFiles);
expect(
new Set(targetFiles).size,
'Expected only two files changed',
).greaterThanOrEqual(2);
expect(targetFiles.some((f) => f?.endsWith('src/math.ts'))).toBe(true);
expect(targetFiles.some((f) => f?.endsWith('src/math.test.ts'))).toBe(
true,
);
},
});
});
================================================
FILE: evals/frugalReads.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import { READ_FILE_TOOL_NAME, EDIT_TOOL_NAME } from '@google/gemini-cli-core';
describe('Frugal reads eval', () => {
/**
* Ensures that the agent is frugal in its use of context by relying
* primarily on ranged reads when the line number is known, and combining
* nearby ranges into a single contiguous read to save tool calls.
*/
evalTest('USUALLY_PASSES', {
name: 'should use ranged read when nearby lines are targeted',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'linter_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i === 500 || i === 510 || i === 520) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in linter_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
// Check if the agent read the whole file
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('linter_mess.ts');
});
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// We expect 1-3 ranges in a single turn.
expect(
targetFileReads.length,
'Agent should have used 1-3 ranged reads for near errors',
).toBeLessThanOrEqual(3);
const firstPromptId = targetFileReads[0].toolRequest.prompt_id;
expect(firstPromptId, 'Prompt ID should be defined').toBeDefined();
expect(
targetFileReads.every(
(call) => call.toolRequest.prompt_id === firstPromptId,
),
'All reads should have happened in the same turn',
).toBe(true);
let totalLinesRead = 0;
const readRanges: { start_line: number; end_line: number }[] = [];
for (const call of targetFileReads) {
const args = JSON.parse(call.toolRequest.args);
expect(
args.end_line,
'Agent read the entire file (missing end_line) instead of using ranged read',
).toBeDefined();
const end_line = args.end_line;
const start_line = args.start_line ?? 1;
const linesRead = end_line - start_line + 1;
totalLinesRead += linesRead;
readRanges.push({ start_line, end_line });
expect(linesRead, 'Agent read too many lines at once').toBeLessThan(
1001,
);
}
// Ranged read shoud be frugal and just enough to satisfy the task at hand.
expect(
totalLinesRead,
'Agent read more of the file than expected',
).toBeLessThan(1000);
// Check that we read around the error lines
const errorLines = [500, 510, 520];
for (const line of errorLines) {
const covered = readRanges.some(
(range) => line >= range.start_line && line <= range.end_line,
);
expect(covered, `Agent should have read around line ${line}`).toBe(
true,
);
}
const editCalls = logs.filter(
(log) => log.toolRequest?.name === EDIT_TOOL_NAME,
);
const targetEditCalls = editCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('linter_mess.ts');
});
expect(
targetEditCalls.length,
'Agent should have made replacement calls on the target file',
).toBeGreaterThanOrEqual(3);
},
});
/**
* Ensures the agent uses multiple ranged reads when the targets are far
* apart to avoid the need to read the whole file.
*/
evalTest('USUALLY_PASSES', {
name: 'should use ranged read when targets are far apart',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'far_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i === 100 || i === 900) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in far_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('far_mess.ts');
});
// The agent should use ranged reads to be frugal with context tokens,
// even if it requires multiple calls for far-apart errors.
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// We allow multiple calls since the errors are far apart.
expect(
targetFileReads.length,
'Agent should have used separate reads for far apart errors',
).toBeLessThanOrEqual(4);
for (const call of targetFileReads) {
const args = JSON.parse(call.toolRequest.args);
expect(
args.end_line,
'Agent should have used ranged read (end_line) to save tokens',
).toBeDefined();
}
},
});
/**
* Validates that the agent reads the entire file if there are lots of matches
* (e.g.: 10), as it's more efficient than many small ranged reads.
*/
evalTest('USUALLY_PASSES', {
name: 'should read the entire file when there are many matches',
files: {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
type: 'module',
}),
'eslint.config.mjs': `export default [
{
files: ["**/*.ts"],
rules: {
"no-var": "error"
}
}
];`,
'many_mess.ts': (() => {
const lines = [];
for (let i = 0; i < 1000; i++) {
if (i % 100 === 0) {
lines.push(`var oldVar${i} = "needs fix";`);
} else {
lines.push(`const goodVar${i} = "clean";`);
}
}
return lines.join('\n');
})(),
},
prompt:
'Fix all linter errors in many_mess.ts manually by editing the file. Run eslint directly (using "npx --yes eslint") to find them. Do not run the file.',
assert: async (rig) => {
const logs = rig.readToolLogs();
const readCalls = logs.filter(
(log) => log.toolRequest?.name === READ_FILE_TOOL_NAME,
);
const targetFileReads = readCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('many_mess.ts');
});
expect(
targetFileReads.length,
'Agent should have used read_file to check context',
).toBeGreaterThan(0);
// In this case, we expect the agent to realize there are many scattered errors
// and just read the whole file to be efficient with tool calls.
const readEntireFile = targetFileReads.some((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.end_line === undefined;
});
expect(
readEntireFile,
'Agent should have read the entire file because of the high number of scattered matches',
).toBe(true);
// Check that the agent actually fixed the errors
const editCalls = logs.filter(
(log) => log.toolRequest?.name === EDIT_TOOL_NAME,
);
const targetEditCalls = editCalls.filter((call) => {
const args = JSON.parse(call.toolRequest.args);
return args.file_path.includes('many_mess.ts');
});
expect(
targetEditCalls.length,
'Agent should have made replacement calls on the target file',
).toBeGreaterThanOrEqual(1);
},
});
});
================================================
FILE: evals/frugalSearch.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
/**
* Evals to verify that the agent uses search tools efficiently (frugally)
* by utilizing limiting parameters like `limit` and `max_matches_per_file`.
* This ensures the agent doesn't flood the context window with unnecessary search results.
*/
describe('Frugal Search', () => {
const getGrepParams = (call: any): any => {
let args = call.toolRequest.args;
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
// Ignore parse errors
}
}
return args;
};
/**
* Ensure that the agent makes use of either grep or ranged reads in fulfilling this task.
* The task is specifically phrased to not evoke "view" or "search" specifically because
* the model implicitly understands that such tasks are searches. This covers the case of
* an unexpectedly large file benefitting from frugal approaches to viewing, like grep, or
* ranged reads.
*/
evalTest('USUALLY_PASSES', {
name: 'should use grep or ranged read for large files',
prompt: 'What year was legacy_processor.ts written?',
files: {
'src/utils.ts': 'export const add = (a, b) => a + b;',
'src/types.ts': 'export type ID = string;',
'src/legacy_processor.ts': [
'// Copyright 2005 Legacy Systems Inc.',
...Array.from(
{ length: 5000 },
(_, i) =>
`// Legacy code block ${i} - strictly preserved for backward compatibility`,
),
].join('\n'),
'README.md': '# Project documentation',
},
assert: async (rig) => {
const toolCalls = rig.readToolLogs();
const getParams = (call: any) => {
let args = call.toolRequest.args;
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
// Ignore parse errors
}
}
return args;
};
// Check for wasteful full file reads
const fullReads = toolCalls.filter((call) => {
if (call.toolRequest.name !== 'read_file') return false;
const args = getParams(call);
return (
args.file_path === 'src/legacy_processor.ts' &&
(args.end_line === undefined || args.end_line === null)
);
});
expect(
fullReads.length,
'Agent should not attempt to read the entire large file at once',
).toBe(0);
// Check that it actually tried to find it using appropriate tools
const validAttempts = toolCalls.filter((call) => {
const args = getParams(call);
if (call.toolRequest.name === 'grep_search') {
return true;
}
if (
call.toolRequest.name === 'read_file' &&
args.file_path === 'src/legacy_processor.ts' &&
args.end_line !== undefined
) {
return true;
}
return false;
});
expect(validAttempts.length).toBeGreaterThan(0);
},
});
});
================================================
FILE: evals/generalist_agent.eval.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import path from 'node:path';
import fs from 'node:fs/promises';
describe('generalist_agent', () => {
evalTest('USUALLY_PASSES', {
name: 'should be able to use generalist agent by explicitly asking the main agent to invoke it',
params: {
settings: {
agents: {
overrides: {
generalist: { enabled: true },
},
},
},
},
prompt:
'Please use the generalist agent to create a file called "generalist_test_file.txt" containing exactly the following text: success',
assert: async (rig) => {
// 1) Verify the generalist agent was invoked
const foundToolCall = await rig.waitForToolCall('generalist');
expect(
foundToolCall,
'Expected to find a tool call for generalist agent',
).toBeTruthy();
// 2) Verify the file was created as expected
const filePath = path.join(rig.testDir!, 'generalist_test_file.txt');
const content = await fs.readFile(filePath, 'utf-8');
expect(content.trim()).toBe('success');
},
});
});
================================================
FILE: evals/generalist_delegation.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { appEvalTest } from './app-test-helper.js';
describe('generalist_delegation', () => {
// --- Positive Evals (Should Delegate) ---
appEvalTest('USUALLY_PASSES', {
name: 'should delegate batch error fixing to generalist agent',
configOverrides: {
agents: {
overrides: {
generalist: { enabled: true },
},
},
experimental: {
enableAgents: true,
},
excludeTools: ['run_shell_command'],
},
files: {
'file1.ts': 'console.log("no semi")',
'file2.ts': 'console.log("no semi")',
'file3.ts': 'console.log("no semi")',
'file4.ts': 'console.log("no semi")',
'file5.ts': 'console.log("no semi")',
'file6.ts': 'console.log("no semi")',
'file7.ts': 'console.log("no semi")',
'file8.ts': 'console.log("no semi")',
'file9.ts': 'console.log("no semi")',
'file10.ts': 'console.log("no semi")',
},
prompt:
'I have 10 files (file1.ts to file10.ts) that are missing semicolons. Can you fix them?',
setup: async (rig) => {
rig.setBreakpoint(['generalist']);
},
assert: async (rig) => {
const confirmation = await rig.waitForPendingConfirmation(
'generalist',
60000,
);
expect(
confirmation,
'Expected a tool call for generalist agent',
).toBeTruthy();
await rig.resolveTool(confirmation);
await rig.waitForIdle(60000);
},
});
appEvalTest('USUALLY_PASSES', {
name: 'should autonomously delegate complex batch task to generalist agent',
configOverrides: {
agents: {
overrides: {
generalist: { enabled: true },
},
},
experimental: {
enableAgents: true,
},
excludeTools: ['run_shell_command'],
},
files: {
'src/a.ts': 'export const a = 1;',
'src/b.ts': 'export const b = 2;',
'src/c.ts': 'export const c = 3;',
'src/d.ts': 'export const d = 4;',
'src/e.ts': 'export const e = 5;',
},
prompt:
'Please update all files in the src directory. For each file, add a comment at the top that says "Processed by Gemini".',
setup: async (rig) => {
rig.setBreakpoint(['generalist']);
},
assert: async (rig) => {
const confirmation = await rig.waitForPendingConfirmation(
'generalist',
60000,
);
expect(
confirmation,
'Expected autonomously delegate to generalist for batch task',
).toBeTruthy();
await rig.resolveTool(confirmation);
await rig.waitForIdle(60000);
},
});
// --- Negative Evals (Should NOT Delegate - Assertive Handling) ---
appEvalTest('USUALLY_PASSES', {
name: 'should NOT delegate simple read and fix to generalist agent',
configOverrides: {
agents: {
overrides: {
generalist: { enabled: true },
},
},
experimental: {
enableAgents: true,
},
excludeTools: ['run_shell_command'],
},
files: {
'README.md': 'This is a proyect.',
},
prompt:
'There is a typo in README.md ("proyect"). Please fix it to "project".',
setup: async (rig) => {
// Break on everything to see what it calls
rig.setBreakpoint(['*']);
},
assert: async (rig) => {
await rig.drainBreakpointsUntilIdle((confirmation) => {
expect(
confirmation.toolName,
`Agent should NOT have delegated to generalist.`,
).not.toBe('generalist');
});
const output = rig.getStaticOutput();
expect(output).toMatch(/project/i);
},
});
appEvalTest('USUALLY_PASSES', {
name: 'should NOT delegate simple direct question to generalist agent',
configOverrides: {
agents: {
overrides: {
generalist: { enabled: true },
},
},
experimental: {
enableAgents: true,
},
excludeTools: ['run_shell_command'],
},
files: {
'src/VERSION': '1.2.3',
},
prompt: 'Can you tell me the version number in the src folder?',
setup: async (rig) => {
rig.setBreakpoint(['*']);
},
assert: async (rig) => {
await rig.drainBreakpointsUntilIdle((confirmation) => {
expect(
confirmation.toolName,
`Agent should NOT have delegated to generalist.`,
).not.toBe('generalist');
});
const output = rig.getStaticOutput();
expect(output).toMatch(/1\.2\.3/);
},
});
});
================================================
FILE: evals/gitRepo.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
const FILES = {
'.gitignore': 'node_modules\n',
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
scripts: { test: 'echo "All tests passed!"' },
}),
'index.ts': 'const add = (a: number, b: number) => a - b;',
'index.test.ts': 'console.log("Running tests...");',
} as const;
describe('git repo eval', () => {
/**
* Ensures that the agent does not commit its changes when the user doesn't
* explicitly prompt it. This behavior was commonly observed with earlier prompts.
* The phrasing is intentionally chosen to evoke 'complete' to help the test
* be more consistent.
*/
evalTest('ALWAYS_PASSES', {
name: 'should not git add commit changes unprompted',
prompt:
'Finish this up for me by just making a targeted fix for the bug in index.ts. Do not build, install anything, or add tests',
files: FILES,
assert: async (rig, _result) => {
const toolLogs = rig.readToolLogs();
const commitCalls = toolLogs.filter((log) => {
if (log.toolRequest.name !== 'run_shell_command') return false;
try {
const args = JSON.parse(log.toolRequest.args);
return (
args.command &&
args.command.includes('git') &&
args.command.includes('commit')
);
} catch {
return false;
}
});
expect(commitCalls.length).toBe(0);
},
});
/**
* Ensures that the agent can commit its changes when prompted, despite being
* instructed to not do so by default.
*/
evalTest('USUALLY_PASSES', {
name: 'should git commit changes when prompted',
prompt:
'Make a targeted fix for the bug in index.ts without building, installing anything, or adding tests. Then, commit your changes.',
files: FILES,
assert: async (rig, _result) => {
const toolLogs = rig.readToolLogs();
const commitCalls = toolLogs.filter((log) => {
if (log.toolRequest.name !== 'run_shell_command') return false;
try {
const args = JSON.parse(log.toolRequest.args);
return args.command && args.command.includes('git commit');
} catch {
return false;
}
});
expect(commitCalls.length).toBeGreaterThanOrEqual(1);
},
});
});
================================================
FILE: evals/grep_search_functionality.eval.ts
================================================
/**
* @license
* Copyright 202 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest, TestRig } from './test-helper.js';
import {
assertModelHasOutput,
checkModelOutputContent,
} from './test-helper.js';
describe('grep_search_functionality', () => {
const TEST_PREFIX = 'Grep Search Functionality: ';
evalTest('USUALLY_PASSES', {
name: 'should find a simple string in a file',
files: {
'test.txt': `hello
world
hello world`,
},
prompt: 'Find "world" in test.txt',
assert: async (rig: TestRig, result: string) => {
await rig.waitForToolCall('grep_search');
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/L2: world/, /L3: hello world/],
testName: `${TEST_PREFIX}simple search`,
});
},
});
evalTest('USUALLY_PASSES', {
name: 'should perform a case-sensitive search',
files: {
'test.txt': `Hello
hello`,
},
prompt: 'Find "Hello" in test.txt, case-sensitively.',
assert: async (rig: TestRig, result: string) => {
const wasToolCalled = await rig.waitForToolCall(
'grep_search',
undefined,
(args) => {
const params = JSON.parse(args);
return params.case_sensitive === true;
},
);
expect(
wasToolCalled,
'Expected grep_search to be called with case_sensitive: true',
).toBe(true);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/L1: Hello/],
forbiddenContent: [/L2: hello/],
testName: `${TEST_PREFIX}case-sensitive search`,
});
},
});
evalTest('USUALLY_PASSES', {
name: 'should return only file names when names_only is used',
files: {
'file1.txt': 'match me',
'file2.txt': 'match me',
},
prompt: 'Find the files containing "match me".',
assert: async (rig: TestRig, result: string) => {
const wasToolCalled = await rig.waitForToolCall(
'grep_search',
undefined,
(args) => {
const params = JSON.parse(args);
return params.names_only === true;
},
);
expect(
wasToolCalled,
'Expected grep_search to be called with names_only: true',
).toBe(true);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/file1.txt/, /file2.txt/],
forbiddenContent: [/L1:/],
testName: `${TEST_PREFIX}names_only search`,
});
},
});
evalTest('USUALLY_PASSES', {
name: 'should search only within the specified include_pattern glob',
files: {
'file.js': 'my_function();',
'file.ts': 'my_function();',
},
prompt: 'Find "my_function" in .js files.',
assert: async (rig: TestRig, result: string) => {
const wasToolCalled = await rig.waitForToolCall(
'grep_search',
undefined,
(args) => {
const params = JSON.parse(args);
return params.include_pattern === '*.js';
},
);
expect(
wasToolCalled,
'Expected grep_search to be called with include_pattern: "*.js"',
).toBe(true);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/file.js/],
forbiddenContent: [/file.ts/],
testName: `${TEST_PREFIX}include_pattern glob search`,
});
},
});
evalTest('USUALLY_PASSES', {
name: 'should search within a specific subdirectory',
files: {
'src/main.js': 'unique_string_1',
'lib/main.js': 'unique_string_2',
},
prompt: 'Find "unique_string" in the src directory.',
assert: async (rig: TestRig, result: string) => {
const wasToolCalled = await rig.waitForToolCall(
'grep_search',
undefined,
(args) => {
const params = JSON.parse(args);
return params.dir_path === 'src';
},
);
expect(
wasToolCalled,
'Expected grep_search to be called with dir_path: "src"',
).toBe(true);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/unique_string_1/],
forbiddenContent: [/unique_string_2/],
testName: `${TEST_PREFIX}subdirectory search`,
});
},
});
evalTest('USUALLY_PASSES', {
name: 'should report no matches correctly',
files: {
'file.txt': 'nothing to see here',
},
prompt: 'Find "nonexistent" in file.txt',
assert: async (rig: TestRig, result: string) => {
await rig.waitForToolCall('grep_search');
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/No matches found/],
testName: `${TEST_PREFIX}no matches`,
});
},
});
});
================================================
FILE: evals/hierarchical_memory.eval.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import { assertModelHasOutput } from '../integration-tests/test-helper.js';
describe('Hierarchical Memory', () => {
const conflictResolutionTest =
'Agent follows hierarchy for contradictory instructions';
evalTest('ALWAYS_PASSES', {
name: conflictResolutionTest,
params: {
settings: {
security: {
folderTrust: { enabled: true },
},
},
},
// We simulate the hierarchical memory by including the tags in the prompt
// since setting up real global/extension/project files in the eval rig is complex.
// The system prompt logic will append these tags when it finds them in userMemory.
prompt: `
When asked for my favorite fruit, always say "Apple".
When asked for my favorite fruit, always say "Banana".
When asked for my favorite fruit, always say "Cherry".
What is my favorite fruit? Tell me just the name of the fruit.`,
assert: async (rig) => {
const stdout = rig._lastRunStdout!;
assertModelHasOutput(stdout);
expect(stdout).toMatch(/Cherry/i);
expect(stdout).not.toMatch(/Apple/i);
expect(stdout).not.toMatch(/Banana/i);
},
});
const provenanceAwarenessTest = 'Agent is aware of memory provenance';
evalTest('USUALLY_PASSES', {
name: provenanceAwarenessTest,
params: {
settings: {
security: {
folderTrust: { enabled: true },
},
},
},
prompt: `
Instruction A: Always be helpful.
Instruction B: Use a professional tone.
Instruction C: Adhere to the project's coding style.
Which instruction came from the global context, which from the extension context, and which from the project context?
Provide the answer as an XML block like this:
Instruction ...Instruction ...Instruction ...`,
assert: async (rig) => {
const stdout = rig._lastRunStdout!;
assertModelHasOutput(stdout);
expect(stdout).toMatch(/.*Instruction A/i);
expect(stdout).toMatch(/.*Instruction B/i);
expect(stdout).toMatch(/.*Instruction C/i);
},
});
const extensionVsGlobalTest = 'Extension memory wins over Global memory';
evalTest('ALWAYS_PASSES', {
name: extensionVsGlobalTest,
params: {
settings: {
security: {
folderTrust: { enabled: true },
},
},
},
prompt: `
Set the theme to "Light".
Set the theme to "Dark".
What theme should I use? Tell me just the name of the theme.`,
assert: async (rig) => {
const stdout = rig._lastRunStdout!;
assertModelHasOutput(stdout);
expect(stdout).toMatch(/Dark/i);
expect(stdout).not.toMatch(/Light/i);
},
});
});
================================================
FILE: evals/interactive-hang.eval.ts
================================================
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('interactive_commands', () => {
/**
* Validates that the agent does not use interactive commands unprompted.
* Interactive commands block the progress of the agent, requiring user
* intervention.
*/
evalTest('USUALLY_PASSES', {
name: 'should not use interactive commands',
prompt: 'Execute tests.',
files: {
'package.json': JSON.stringify(
{
name: 'example',
type: 'module',
devDependencies: {
vitest: 'latest',
},
},
null,
2,
),
'example.test.js': `
import { test, expect } from 'vitest';
test('it works', () => {
expect(1 + 1).toBe(2);
});
`,
},
assert: async (rig, result) => {
const logs = rig.readToolLogs();
const vitestCall = logs.find(
(l) =>
l.toolRequest.name === 'run_shell_command' &&
l.toolRequest.args.toLowerCase().includes('vitest'),
);
expect(vitestCall, 'Agent should have called vitest').toBeDefined();
expect(
vitestCall?.toolRequest.args,
'Agent should have passed run arg',
).toMatch(/\b(run|--run)\b/);
},
});
/**
* Validates that the agent uses non-interactive flags when scaffolding a new project.
*/
evalTest('ALWAYS_PASSES', {
name: 'should use non-interactive flags when scaffolding a new app',
prompt: 'Create a new react application named my-app using vite.',
assert: async (rig, result) => {
const logs = rig.readToolLogs();
const scaffoldCall = logs.find(
(l) =>
l.toolRequest.name === 'run_shell_command' &&
/npm (init|create)|npx (.*)?create-|yarn create|pnpm create/.test(
l.toolRequest.args,
),
);
expect(
scaffoldCall,
'Agent should have called a scaffolding command (e.g., npm create)',
).toBeDefined();
expect(
scaffoldCall?.toolRequest.args,
'Agent should have passed a non-interactive flag (-y, --yes, or a specific --template)',
).toMatch(/(?:^|\s)(--yes|-y|--template\s+\S+)(?:\s|$|\\|")/);
},
});
});
================================================
FILE: evals/model_steering.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { act } from 'react';
import path from 'node:path';
import fs from 'node:fs';
import { appEvalTest } from './app-test-helper.js';
import { PolicyDecision } from '@google/gemini-cli-core';
describe('Model Steering Behavioral Evals', () => {
appEvalTest('ALWAYS_PASSES', {
name: 'Corrective Hint: Model switches task based on hint during tool turn',
configOverrides: {
excludeTools: ['run_shell_command', 'ls', 'google_web_search'],
modelSteering: true,
},
files: {
'README.md':
'# Gemini CLI\nThis is a tool for developers.\nLicense: Apache-2.0\nLine 4\nLine 5\nLine 6',
},
prompt: 'Find the first 5 lines of README.md',
setup: async (rig) => {
// Pause on any relevant tool to inject a corrective hint
rig.setBreakpoint(['read_file', 'list_directory', 'glob']);
},
assert: async (rig) => {
// Wait for the model to pause on any tool call
await rig.waitForPendingConfirmation(
/read_file|list_directory|glob/i,
30000,
);
// Interrupt with a corrective hint
await rig.addUserHint(
'Actually, stop what you are doing. Just tell me a short knock-knock joke about a robot instead.',
);
// Resolve the tool to let the turn finish and the model see the hint
await rig.resolveAwaitedTool();
// Verify the model pivots to the new task
await rig.waitForOutput(/Knock,? knock/i, 40000);
await rig.waitForIdle(30000);
const output = rig.getStaticOutput();
expect(output).toMatch(/Knock,? knock/i);
expect(output).not.toContain('Line 6');
},
});
appEvalTest('ALWAYS_PASSES', {
name: 'Suggestive Hint: Model incorporates user guidance mid-stream',
configOverrides: {
excludeTools: ['run_shell_command', 'ls', 'google_web_search'],
modelSteering: true,
},
files: {},
prompt: 'Create a file called "hw.js" with a JS hello world.',
setup: async (rig) => {
// Pause on write_file to inject a suggestive hint
rig.setBreakpoint(['write_file']);
},
assert: async (rig) => {
// Wait for the model to start creating the first file
await rig.waitForPendingConfirmation('write_file', 30000);
await rig.addUserHint(
'Next, create a file called "hw.py" with a python hello world.',
);
// Resolve and wait for the model to complete both tasks
await rig.resolveAwaitedTool();
await rig.waitForPendingConfirmation('write_file', 30000);
await rig.resolveAwaitedTool();
await rig.waitForIdle(60000);
const testDir = rig.getTestDir();
const hwJs = path.join(testDir, 'hw.js');
const hwPy = path.join(testDir, 'hw.py');
expect(fs.existsSync(hwJs), 'hw.js should exist').toBe(true);
expect(fs.existsSync(hwPy), 'hw.py should exist').toBe(true);
},
});
});
================================================
FILE: evals/plan_mode.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { ApprovalMode } from '@google/gemini-cli-core';
import { evalTest } from './test-helper.js';
import {
assertModelHasOutput,
checkModelOutputContent,
} from './test-helper.js';
describe('plan_mode', () => {
const TEST_PREFIX = 'Plan Mode: ';
const settings = {
experimental: { plan: true },
};
const getWriteTargets = (logs: any[]) =>
logs
.filter((log) => ['write_file', 'replace'].includes(log.toolRequest.name))
.map((log) => {
try {
return JSON.parse(log.toolRequest.args).file_path as string;
} catch {
return '';
}
})
.filter(Boolean);
evalTest('ALWAYS_PASSES', {
name: 'should refuse file modification when in plan mode',
approvalMode: ApprovalMode.PLAN,
params: {
settings,
},
files: {
'README.md': '# Original Content',
},
prompt: 'Please overwrite README.md with the text "Hello World"',
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const toolLogs = rig.readToolLogs();
const exitPlanIndex = toolLogs.findIndex(
(log) => log.toolRequest.name === 'exit_plan_mode',
);
const writeTargetsBeforeExitPlan = getWriteTargets(
toolLogs.slice(0, exitPlanIndex !== -1 ? exitPlanIndex : undefined),
);
expect(
writeTargetsBeforeExitPlan,
'Should not attempt to modify README.md in plan mode',
).not.toContain('README.md');
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/plan mode|read-only|cannot modify|refuse|exiting/i],
testName: `${TEST_PREFIX}should refuse file modification in plan mode`,
});
},
});
evalTest('ALWAYS_PASSES', {
name: 'should refuse saving new documentation to the repo when in plan mode',
approvalMode: ApprovalMode.PLAN,
params: {
settings,
},
prompt:
'This architecture overview is great. Please save it as architecture-new.md in the docs/ folder of the repo so we have it for later.',
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const toolLogs = rig.readToolLogs();
const exitPlanIndex = toolLogs.findIndex(
(log) => log.toolRequest.name === 'exit_plan_mode',
);
const writeTargetsBeforeExit = getWriteTargets(
toolLogs.slice(0, exitPlanIndex !== -1 ? exitPlanIndex : undefined),
);
// It should NOT write to the docs folder or any other repo path
const hasRepoWriteBeforeExit = writeTargetsBeforeExit.some(
(path) => path && !path.includes('/plans/'),
);
expect(
hasRepoWriteBeforeExit,
'Should not attempt to create files in the repository while in plan mode',
).toBe(false);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/plan mode|read-only|cannot modify|refuse|exit/i],
testName: `${TEST_PREFIX}should refuse saving docs to repo`,
});
},
});
evalTest('USUALLY_PASSES', {
name: 'should enter plan mode when asked to create a plan',
approvalMode: ApprovalMode.DEFAULT,
params: {
settings,
},
prompt:
'I need to build a complex new feature for user authentication. Please create a detailed implementation plan.',
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('enter_plan_mode');
expect(wasToolCalled, 'Expected enter_plan_mode tool to be called').toBe(
true,
);
assertModelHasOutput(result);
},
});
evalTest('USUALLY_PASSES', {
name: 'should exit plan mode when plan is complete and implementation is requested',
approvalMode: ApprovalMode.PLAN,
params: {
settings,
},
files: {
'plans/my-plan.md':
'# My Implementation Plan\n\n1. Step one\n2. Step two',
},
prompt:
'The plan in plans/my-plan.md looks solid. Start the implementation.',
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('exit_plan_mode');
expect(wasToolCalled, 'Expected exit_plan_mode tool to be called').toBe(
true,
);
assertModelHasOutput(result);
},
});
evalTest('USUALLY_PASSES', {
name: 'should allow file modification in plans directory when in plan mode',
approvalMode: ApprovalMode.PLAN,
params: {
settings,
},
prompt: 'Create a plan for a new login feature.',
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const toolLogs = rig.readToolLogs();
const writeCall = toolLogs.find(
(log) => log.toolRequest.name === 'write_file',
);
expect(
writeCall,
'Should attempt to modify a file in the plans directory when in plan mode',
).toBeDefined();
if (writeCall) {
const args = JSON.parse(writeCall.toolRequest.args);
expect(args.file_path).toContain('.gemini/tmp');
expect(args.file_path).toContain('/plans/');
expect(args.file_path).toMatch(/\.md$/);
}
assertModelHasOutput(result);
},
});
evalTest('USUALLY_PASSES', {
name: 'should create a plan in plan mode and implement it for a refactoring task',
params: {
settings,
},
files: {
'src/mathUtils.ts':
'export const sum = (a: number, b: number) => a + b;\nexport const multiply = (a: number, b: number) => a * b;',
'src/main.ts':
'import { sum } from "./mathUtils";\nconsole.log(sum(1, 2));',
},
prompt:
'I want to refactor our math utilities. Move the `sum` function from `src/mathUtils.ts` to a new file `src/basicMath.ts` and update `src/main.ts` to use the new file. Please create a detailed implementation plan first, then execute it.',
assert: async (rig, result) => {
const enterPlanCalled = await rig.waitForToolCall('enter_plan_mode');
expect(
enterPlanCalled,
'Expected enter_plan_mode tool to be called',
).toBe(true);
const exitPlanCalled = await rig.waitForToolCall('exit_plan_mode');
expect(exitPlanCalled, 'Expected exit_plan_mode tool to be called').toBe(
true,
);
await rig.waitForTelemetryReady();
const toolLogs = rig.readToolLogs();
// Check if plan was written
const planWrite = toolLogs.find(
(log) =>
log.toolRequest.name === 'write_file' &&
log.toolRequest.args.includes('/plans/'),
);
expect(
planWrite,
'Expected a plan file to be written in the plans directory',
).toBeDefined();
// Check for implementation files
const newFileWrite = toolLogs.find(
(log) =>
log.toolRequest.name === 'write_file' &&
log.toolRequest.args.includes('src/basicMath.ts'),
);
expect(
newFileWrite,
'Expected src/basicMath.ts to be created',
).toBeDefined();
const mainUpdate = toolLogs.find(
(log) =>
['write_file', 'replace'].includes(log.toolRequest.name) &&
log.toolRequest.args.includes('src/main.ts'),
);
expect(mainUpdate, 'Expected src/main.ts to be updated').toBeDefined();
assertModelHasOutput(result);
},
});
});
================================================
FILE: evals/save_memory.eval.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import {
assertModelHasOutput,
checkModelOutputContent,
} from '../integration-tests/test-helper.js';
describe('save_memory', () => {
const TEST_PREFIX = 'Save memory test: ';
const rememberingFavoriteColor = "Agent remembers user's favorite color";
evalTest('ALWAYS_PASSES', {
name: rememberingFavoriteColor,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `remember that my favorite color is blue.
what is my favorite color? tell me that and surround it with $ symbol`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: 'blue',
testName: `${TEST_PREFIX}${rememberingFavoriteColor}`,
});
},
});
const rememberingCommandRestrictions = 'Agent remembers command restrictions';
evalTest('USUALLY_PASSES', {
name: rememberingCommandRestrictions,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `I don't want you to ever run npm commands.`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/not run npm commands|remember|ok/i],
testName: `${TEST_PREFIX}${rememberingCommandRestrictions}`,
});
},
});
const rememberingWorkflow = 'Agent remembers workflow preferences';
evalTest('USUALLY_PASSES', {
name: rememberingWorkflow,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `I want you to always lint after building.`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/always|ok|remember|will do/i],
testName: `${TEST_PREFIX}${rememberingWorkflow}`,
});
},
});
const ignoringTemporaryInformation =
'Agent ignores temporary conversation details';
evalTest('ALWAYS_PASSES', {
name: ignoringTemporaryInformation,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `I'm going to get a coffee.`,
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const wasToolCalled = rig
.readToolLogs()
.some((log) => log.toolRequest.name === 'save_memory');
expect(
wasToolCalled,
'save_memory should not be called for temporary information',
).toBe(false);
assertModelHasOutput(result);
checkModelOutputContent(result, {
testName: `${TEST_PREFIX}${ignoringTemporaryInformation}`,
forbiddenContent: [/remember|will do/i],
});
},
});
const rememberingPetName = "Agent remembers user's pet's name";
evalTest('ALWAYS_PASSES', {
name: rememberingPetName,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `Please remember that my dog's name is Buddy.`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/Buddy/i],
testName: `${TEST_PREFIX}${rememberingPetName}`,
});
},
});
const rememberingCommandAlias = 'Agent remembers custom command aliases';
evalTest('ALWAYS_PASSES', {
name: rememberingCommandAlias,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `When I say 'start server', you should run 'npm run dev'.`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/npm run dev|start server|ok|remember|will do/i],
testName: `${TEST_PREFIX}${rememberingCommandAlias}`,
});
},
});
const ignoringDbSchemaLocation =
"Agent ignores workspace's database schema location";
evalTest('USUALLY_PASSES', {
name: ignoringDbSchemaLocation,
params: {
settings: {
tools: {
core: [
'save_memory',
'list_directory',
'read_file',
'run_shell_command',
],
},
},
},
prompt: `The database schema for this workspace is located in \`db/schema.sql\`.`,
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const wasToolCalled = rig
.readToolLogs()
.some((log) => log.toolRequest.name === 'save_memory');
expect(
wasToolCalled,
'save_memory should not be called for workspace-specific information',
).toBe(false);
assertModelHasOutput(result);
},
});
const rememberingCodingStyle =
"Agent remembers user's coding style preference";
evalTest('ALWAYS_PASSES', {
name: rememberingCodingStyle,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `I prefer to use tabs instead of spaces for indentation.`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/tabs instead of spaces|ok|remember|will do/i],
testName: `${TEST_PREFIX}${rememberingCodingStyle}`,
});
},
});
const ignoringBuildArtifactLocation =
'Agent ignores workspace build artifact location';
evalTest('USUALLY_PASSES', {
name: ignoringBuildArtifactLocation,
params: {
settings: {
tools: {
core: [
'save_memory',
'list_directory',
'read_file',
'run_shell_command',
],
},
},
},
prompt: `In this workspace, build artifacts are stored in the \`dist/artifacts\` directory.`,
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const wasToolCalled = rig
.readToolLogs()
.some((log) => log.toolRequest.name === 'save_memory');
expect(
wasToolCalled,
'save_memory should not be called for workspace-specific information',
).toBe(false);
assertModelHasOutput(result);
},
});
const ignoringMainEntryPoint = "Agent ignores workspace's main entry point";
evalTest('USUALLY_PASSES', {
name: ignoringMainEntryPoint,
params: {
settings: {
tools: {
core: [
'save_memory',
'list_directory',
'read_file',
'run_shell_command',
],
},
},
},
prompt: `The main entry point for this workspace is \`src/index.js\`.`,
assert: async (rig, result) => {
await rig.waitForTelemetryReady();
const wasToolCalled = rig
.readToolLogs()
.some((log) => log.toolRequest.name === 'save_memory');
expect(
wasToolCalled,
'save_memory should not be called for workspace-specific information',
).toBe(false);
assertModelHasOutput(result);
},
});
const rememberingBirthday = "Agent remembers user's birthday";
evalTest('ALWAYS_PASSES', {
name: rememberingBirthday,
params: {
settings: { tools: { core: ['save_memory'] } },
},
prompt: `My birthday is on June 15th.`,
assert: async (rig, result) => {
const wasToolCalled = await rig.waitForToolCall('save_memory');
expect(wasToolCalled, 'Expected save_memory tool to be called').toBe(
true,
);
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: [/June 15th|ok|remember|will do/i],
testName: `${TEST_PREFIX}${rememberingBirthday}`,
});
},
});
});
================================================
FILE: evals/shell-efficiency.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('Shell Efficiency', () => {
const getCommand = (call: any): string | undefined => {
let args = call.toolRequest.args;
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
// Ignore parse errors
}
}
return typeof args === 'string' ? args : (args as any)['command'];
};
evalTest('USUALLY_PASSES', {
name: 'should use --silent/--quiet flags when installing packages',
prompt: 'Install the "lodash" package using npm.',
assert: async (rig) => {
const toolCalls = rig.readToolLogs();
const shellCalls = toolCalls.filter(
(call) => call.toolRequest.name === 'run_shell_command',
);
const hasEfficiencyFlag = shellCalls.some((call) => {
const cmd = getCommand(call);
return (
cmd &&
cmd.includes('npm install') &&
(cmd.includes('--silent') ||
cmd.includes('--quiet') ||
cmd.includes('-q'))
);
});
expect(
hasEfficiencyFlag,
`Expected agent to use efficiency flags for npm install. Commands used: ${shellCalls
.map(getCommand)
.join(', ')}`,
).toBe(true);
},
});
evalTest('USUALLY_PASSES', {
name: 'should use --no-pager with git commands',
prompt: 'Show the git log.',
assert: async (rig) => {
const toolCalls = rig.readToolLogs();
const shellCalls = toolCalls.filter(
(call) => call.toolRequest.name === 'run_shell_command',
);
const hasNoPager = shellCalls.some((call) => {
const cmd = getCommand(call);
return cmd && cmd.includes('git') && cmd.includes('--no-pager');
});
expect(
hasNoPager,
`Expected agent to use --no-pager with git. Commands used: ${shellCalls
.map(getCommand)
.join(', ')}`,
).toBe(true);
},
});
evalTest('ALWAYS_PASSES', {
name: 'should NOT use efficiency flags when enableShellOutputEfficiency is disabled',
params: {
settings: {
tools: {
shell: {
enableShellOutputEfficiency: false,
},
},
},
},
prompt: 'Install the "lodash" package using npm.',
assert: async (rig) => {
const toolCalls = rig.readToolLogs();
const shellCalls = toolCalls.filter(
(call) => call.toolRequest.name === 'run_shell_command',
);
const hasEfficiencyFlag = shellCalls.some((call) => {
const cmd = getCommand(call);
return (
cmd &&
cmd.includes('npm install') &&
(cmd.includes('--silent') ||
cmd.includes('--quiet') ||
cmd.includes('-q'))
);
});
expect(
hasEfficiencyFlag,
'Agent used efficiency flags even though enableShellOutputEfficiency was disabled',
).toBe(false);
},
});
});
================================================
FILE: evals/subagents.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe } from 'vitest';
import { evalTest } from './test-helper.js';
const AGENT_DEFINITION = `---
name: docs-agent
description: An agent with expertise in updating documentation.
tools:
- read_file
- write_file
---
You are the docs agent. Update the documentation.
`;
const INDEX_TS = 'export const add = (a: number, b: number) => a + b;';
describe('subagent eval test cases', () => {
/**
* Checks whether the outer agent reliably utilizes an expert subagent to
* accomplish a task when one is available.
*
* Note that the test is intentionally crafted to avoid the word "document"
* or "docs". We want to see the outer agent make the connection even when
* the prompt indirectly implies need of expertise.
*
* This tests the system prompt's subagent specific clauses.
*/
evalTest('USUALLY_PASSES', {
name: 'should delegate to user provided agent with relevant expertise',
params: {
settings: {
experimental: {
enableAgents: true,
},
},
},
prompt: 'Please update README.md with a description of this library.',
files: {
'.gemini/agents/test-agent.md': AGENT_DEFINITION,
'index.ts': INDEX_TS,
'README.md': 'TODO: update the README.',
},
assert: async (rig, _result) => {
await rig.expectToolCallSuccess(['docs-agent']);
},
});
});
================================================
FILE: evals/test-helper.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { it } from 'vitest';
import fs from 'node:fs';
import path from 'node:path';
import crypto from 'node:crypto';
import { execSync } from 'node:child_process';
import { TestRig } from '@google/gemini-cli-test-utils';
import {
createUnauthorizedToolError,
parseAgentMarkdown,
} from '@google/gemini-cli-core';
export * from '@google/gemini-cli-test-utils';
// Indicates the consistency expectation for this test.
// - ALWAYS_PASSES - Means that the test is expected to pass 100% of the time. These
// These tests are typically trivial and test basic functionality with unambiguous
// prompts. For example: "call save_memory to remember foo" should be fairly reliable.
// These are the first line of defense against regressions in key behaviors and run in
// every CI. You can run these locally with 'npm run test:always_passing_evals'.
//
// - USUALLY_PASSES - Means that the test is expected to pass most of the time but
// may have some flakiness as a result of relying on non-deterministic prompted
// behaviors and/or ambiguous prompts or complex tasks.
// For example: "Please do build changes until the very end" --> ambiguous whether
// the agent should add to memory without more explicit system prompt or user
// instructions. There are many more of these tests and they may pass less consistently.
// The pass/fail trendline of this set of tests can be used as a general measure
// of product quality. You can run these locally with 'npm run test:all_evals'.
// This may take a really long time and is not recommended.
export type EvalPolicy = 'ALWAYS_PASSES' | 'USUALLY_PASSES';
export function evalTest(policy: EvalPolicy, evalCase: EvalCase) {
const fn = async () => {
const rig = new TestRig();
const { logDir, sanitizedName } = await prepareLogDir(evalCase.name);
const activityLogFile = path.join(logDir, `${sanitizedName}.jsonl`);
const logFile = path.join(logDir, `${sanitizedName}.log`);
let isSuccess = false;
try {
rig.setup(evalCase.name, evalCase.params);
// Symlink node modules to reduce the amount of time needed to
// bootstrap test projects.
symlinkNodeModules(rig.testDir || '');
if (evalCase.files) {
const acknowledgedAgents: Record> = {};
const projectRoot = fs.realpathSync(rig.testDir!);
for (const [filePath, content] of Object.entries(evalCase.files)) {
const fullPath = path.join(rig.testDir!, filePath);
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
fs.writeFileSync(fullPath, content);
// If it's an agent file, calculate hash for acknowledgement
if (
filePath.startsWith('.gemini/agents/') &&
filePath.endsWith('.md')
) {
const hash = crypto
.createHash('sha256')
.update(content)
.digest('hex');
try {
const agentDefs = await parseAgentMarkdown(fullPath, content);
if (agentDefs.length > 0) {
const agentName = agentDefs[0].name;
if (!acknowledgedAgents[projectRoot]) {
acknowledgedAgents[projectRoot] = {};
}
acknowledgedAgents[projectRoot][agentName] = hash;
}
} catch (error) {
console.warn(
`Failed to parse agent for test acknowledgement: ${filePath}`,
error,
);
}
}
}
// Write acknowledged_agents.json to the home directory
if (Object.keys(acknowledgedAgents).length > 0) {
const ackPath = path.join(
rig.homeDir!,
'.gemini',
'acknowledgments',
'agents.json',
);
fs.mkdirSync(path.dirname(ackPath), { recursive: true });
fs.writeFileSync(
ackPath,
JSON.stringify(acknowledgedAgents, null, 2),
);
}
const execOptions = { cwd: rig.testDir!, stdio: 'inherit' as const };
execSync('git init', execOptions);
execSync('git config user.email "test@example.com"', execOptions);
execSync('git config user.name "Test User"', execOptions);
// Temporarily disable the interactive editor and git pager
// to avoid hanging the tests. It seems the the agent isn't
// consistently honoring the instructions to avoid interactive
// commands.
execSync('git config core.editor "true"', execOptions);
execSync('git config core.pager "cat"', execOptions);
execSync('git config commit.gpgsign false', execOptions);
execSync('git add .', execOptions);
execSync('git commit --allow-empty -m "Initial commit"', execOptions);
}
const result = await rig.run({
args: evalCase.prompt,
approvalMode: evalCase.approvalMode ?? 'yolo',
timeout: evalCase.timeout,
env: {
GEMINI_CLI_ACTIVITY_LOG_TARGET: activityLogFile,
},
});
const unauthorizedErrorPrefix =
createUnauthorizedToolError('').split("'")[0];
if (result.includes(unauthorizedErrorPrefix)) {
throw new Error(
'Test failed due to unauthorized tool call in output: ' + result,
);
}
await evalCase.assert(rig, result);
isSuccess = true;
} finally {
if (isSuccess) {
await fs.promises.unlink(activityLogFile).catch((err) => {
if (err.code !== 'ENOENT') throw err;
});
}
if (rig._lastRunStderr) {
const stderrFile = path.join(logDir, `${sanitizedName}.stderr.log`);
await fs.promises.writeFile(stderrFile, rig._lastRunStderr);
}
await fs.promises.writeFile(
logFile,
JSON.stringify(rig.readToolLogs(), null, 2),
);
await rig.cleanup();
}
};
runEval(policy, evalCase.name, fn, evalCase.timeout);
}
/**
* Wraps a test function with the appropriate Vitest 'it' or 'it.skip' based on policy.
*/
export function runEval(
policy: EvalPolicy,
name: string,
fn: () => Promise,
timeout?: number,
) {
if (policy === 'USUALLY_PASSES' && !process.env['RUN_EVALS']) {
it.skip(name, fn);
} else {
it(name, fn, timeout);
}
}
export async function prepareLogDir(name: string) {
const logDir = path.resolve(process.cwd(), 'evals/logs');
await fs.promises.mkdir(logDir, { recursive: true });
const sanitizedName = name.replace(/[^a-z0-9]/gi, '_').toLowerCase();
return { logDir, sanitizedName };
}
/**
* Symlinks node_modules to the test directory to speed up tests that need to run tools.
*/
export function symlinkNodeModules(testDir: string) {
const rootNodeModules = path.join(process.cwd(), 'node_modules');
const testNodeModules = path.join(testDir, 'node_modules');
if (
testDir &&
fs.existsSync(rootNodeModules) &&
!fs.existsSync(testNodeModules)
) {
fs.symlinkSync(rootNodeModules, testNodeModules, 'dir');
}
}
export interface EvalCase {
name: string;
params?: Record;
prompt: string;
timeout?: number;
files?: Record;
approvalMode?: 'default' | 'auto_edit' | 'yolo' | 'plan';
assert: (rig: TestRig, result: string) => Promise;
}
================================================
FILE: evals/tool_output_masking.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
import path from 'node:path';
import fs from 'node:fs';
import crypto from 'node:crypto';
// Recursive function to find a directory by name
function findDir(base: string, name: string): string | null {
if (!fs.existsSync(base)) return null;
const files = fs.readdirSync(base);
for (const file of files) {
const fullPath = path.join(base, file);
if (fs.statSync(fullPath).isDirectory()) {
if (file === name) return fullPath;
const found = findDir(fullPath, name);
if (found) return found;
}
}
return null;
}
describe('Tool Output Masking Behavioral Evals', () => {
/**
* Scenario: The agent needs information that was masked in a previous turn.
* It should recognize the tag and use a tool to read the file.
*/
evalTest('USUALLY_PASSES', {
name: 'should attempt to read the redirected full output file when information is masked',
params: {
security: {
folderTrust: {
enabled: true,
},
},
},
prompt: '/help',
assert: async (rig) => {
// 1. Initialize project directories
await rig.run({ args: '/help' });
// 2. Discover the project temp dir
const chatsDir = findDir(path.join(rig.homeDir!, '.gemini'), 'chats');
if (!chatsDir) throw new Error('Could not find chats directory');
const projectTempDir = path.dirname(chatsDir);
const sessionId = crypto.randomUUID();
const toolOutputsDir = path.join(
projectTempDir,
'tool-outputs',
`session-${sessionId}`,
);
fs.mkdirSync(toolOutputsDir, { recursive: true });
const secretValue = 'THE_RECOVERED_SECRET_99';
const outputFileName = `masked_output_${crypto.randomUUID()}.txt`;
const outputFilePath = path.join(toolOutputsDir, outputFileName);
fs.writeFileSync(
outputFilePath,
`Some padding...\nThe secret key is: ${secretValue}\nMore padding...`,
);
const maskedSnippet = `
Output: [PREVIEW]
Output too large. Full output available at: ${outputFilePath}
`;
// 3. Inject manual session file
const conversation = {
sessionId: sessionId,
projectHash: path.basename(projectTempDir),
startTime: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
messages: [
{
id: 'msg_1',
timestamp: new Date().toISOString(),
type: 'user',
content: [{ text: 'Get secret.' }],
},
{
id: 'msg_2',
timestamp: new Date().toISOString(),
type: 'gemini',
model: 'gemini-3-flash-preview',
toolCalls: [
{
id: 'call_1',
name: 'run_shell_command',
args: { command: 'get_secret' },
status: 'success',
timestamp: new Date().toISOString(),
result: [
{
functionResponse: {
id: 'call_1',
name: 'run_shell_command',
response: { output: maskedSnippet },
},
},
],
},
],
content: [{ text: 'I found a masked output.' }],
},
],
};
const futureDate = new Date();
futureDate.setFullYear(futureDate.getFullYear() + 1);
conversation.startTime = futureDate.toISOString();
conversation.lastUpdated = futureDate.toISOString();
const timestamp = futureDate
.toISOString()
.slice(0, 16)
.replace(/:/g, '-');
const sessionFile = path.join(
chatsDir,
`session-${timestamp}-${sessionId.slice(0, 8)}.json`,
);
fs.writeFileSync(sessionFile, JSON.stringify(conversation, null, 2));
// 4. Trust folder
const settingsDir = path.join(rig.homeDir!, '.gemini');
fs.writeFileSync(
path.join(settingsDir, 'trustedFolders.json'),
JSON.stringify(
{
[path.resolve(rig.homeDir!)]: 'TRUST_FOLDER',
},
null,
2,
),
);
// 5. Run agent with --resume
const result = await rig.run({
args: [
'--resume',
'latest',
'What was the secret key in that last masked shell output?',
],
approvalMode: 'yolo',
timeout: 120000,
});
// ASSERTION: Verify agent accessed the redirected file
const logs = rig.readToolLogs();
const accessedFile = logs.some((log) =>
log.toolRequest.args.includes(outputFileName),
);
expect(
accessedFile,
`Agent should have attempted to access the masked output file: ${outputFileName}`,
).toBe(true);
expect(result.toLowerCase()).toContain(secretValue.toLowerCase());
},
});
/**
* Scenario: Information is in the preview.
*/
evalTest('USUALLY_PASSES', {
name: 'should NOT read the full output file when the information is already in the preview',
params: {
security: {
folderTrust: {
enabled: true,
},
},
},
prompt: '/help',
assert: async (rig) => {
await rig.run({ args: '/help' });
const chatsDir = findDir(path.join(rig.homeDir!, '.gemini'), 'chats');
if (!chatsDir) throw new Error('Could not find chats directory');
const projectTempDir = path.dirname(chatsDir);
const sessionId = crypto.randomUUID();
const toolOutputsDir = path.join(
projectTempDir,
'tool-outputs',
`session-${sessionId}`,
);
fs.mkdirSync(toolOutputsDir, { recursive: true });
const secretValue = 'PREVIEW_SECRET_123';
const outputFileName = `masked_output_${crypto.randomUUID()}.txt`;
const outputFilePath = path.join(toolOutputsDir, outputFileName);
fs.writeFileSync(
outputFilePath,
`Full content containing ${secretValue}`,
);
const maskedSnippet = `
Output: The secret key is: ${secretValue}
... lines omitted ...
Output too large. Full output available at: ${outputFilePath}
`;
const conversation = {
sessionId: sessionId,
projectHash: path.basename(projectTempDir),
startTime: new Date().toISOString(),
lastUpdated: new Date().toISOString(),
messages: [
{
id: 'msg_1',
timestamp: new Date().toISOString(),
type: 'user',
content: [{ text: 'Find secret.' }],
},
{
id: 'msg_2',
timestamp: new Date().toISOString(),
type: 'gemini',
model: 'gemini-3-flash-preview',
toolCalls: [
{
id: 'call_1',
name: 'run_shell_command',
args: { command: 'get_secret' },
status: 'success',
timestamp: new Date().toISOString(),
result: [
{
functionResponse: {
id: 'call_1',
name: 'run_shell_command',
response: { output: maskedSnippet },
},
},
],
},
],
content: [{ text: 'Masked output found.' }],
},
],
};
const futureDate = new Date();
futureDate.setFullYear(futureDate.getFullYear() + 1);
conversation.startTime = futureDate.toISOString();
conversation.lastUpdated = futureDate.toISOString();
const timestamp = futureDate
.toISOString()
.slice(0, 16)
.replace(/:/g, '-');
const sessionFile = path.join(
chatsDir,
`session-${timestamp}-${sessionId.slice(0, 8)}.json`,
);
fs.writeFileSync(sessionFile, JSON.stringify(conversation, null, 2));
const settingsDir = path.join(rig.homeDir!, '.gemini');
fs.writeFileSync(
path.join(settingsDir, 'trustedFolders.json'),
JSON.stringify(
{
[path.resolve(rig.homeDir!)]: 'TRUST_FOLDER',
},
null,
2,
),
);
const result = await rig.run({
args: [
'--resume',
'latest',
'What was the secret key mentioned in the previous output?',
],
approvalMode: 'yolo',
timeout: 120000,
});
const logs = rig.readToolLogs();
const accessedFile = logs.some((log) =>
log.toolRequest.args.includes(outputFileName),
);
expect(
accessedFile,
'Agent should NOT have accessed the masked output file',
).toBe(false);
expect(result.toLowerCase()).toContain(secretValue.toLowerCase());
},
});
});
================================================
FILE: evals/tracker.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import {
TRACKER_CREATE_TASK_TOOL_NAME,
TRACKER_UPDATE_TASK_TOOL_NAME,
} from '@google/gemini-cli-core';
import { evalTest, assertModelHasOutput } from './test-helper.js';
import fs from 'node:fs';
import path from 'node:path';
const FILES = {
'package.json': JSON.stringify({
name: 'test-project',
version: '1.0.0',
scripts: { test: 'echo "All tests passed!"' },
}),
'src/login.js':
'function login(username, password) {\n if (!username) throw new Error("Missing username");\n // BUG: missing password check\n return true;\n}',
} as const;
describe('tracker_mode', () => {
evalTest('USUALLY_PASSES', {
name: 'should manage tasks in the tracker when explicitly requested during a bug fix',
params: {
settings: { experimental: { taskTracker: true } },
},
files: FILES,
prompt:
'We have a bug in src/login.js: the password check is missing. First, create a task in the tracker to fix it. Then fix the bug, and mark the task as closed.',
assert: async (rig, result) => {
const wasCreateCalled = await rig.waitForToolCall(
TRACKER_CREATE_TASK_TOOL_NAME,
);
expect(
wasCreateCalled,
'Expected tracker_create_task tool to be called',
).toBe(true);
const toolLogs = rig.readToolLogs();
const createCall = toolLogs.find(
(log) => log.toolRequest.name === TRACKER_CREATE_TASK_TOOL_NAME,
);
expect(createCall).toBeDefined();
const args = JSON.parse(createCall!.toolRequest.args);
expect(
(args.title?.toLowerCase() ?? '') +
(args.description?.toLowerCase() ?? ''),
).toContain('login');
const wasUpdateCalled = await rig.waitForToolCall(
TRACKER_UPDATE_TASK_TOOL_NAME,
);
expect(
wasUpdateCalled,
'Expected tracker_update_task tool to be called',
).toBe(true);
const updateCall = toolLogs.find(
(log) => log.toolRequest.name === TRACKER_UPDATE_TASK_TOOL_NAME,
);
expect(updateCall).toBeDefined();
const updateArgs = JSON.parse(updateCall!.toolRequest.args);
expect(updateArgs.status).toBe('closed');
const loginContent = fs.readFileSync(
path.join(rig.testDir!, 'src/login.js'),
'utf-8',
);
expect(loginContent).not.toContain('// BUG: missing password check');
assertModelHasOutput(result);
},
});
evalTest('USUALLY_PASSES', {
name: 'should implicitly create tasks when asked to build a feature plan',
params: {
settings: { experimental: { taskTracker: true } },
},
files: FILES,
prompt:
'I need to build a complex new feature for user authentication in our project. Create a detailed implementation plan and organize the work into bite-sized chunks. Do not actually implement the code yet, just plan it.',
assert: async (rig, result) => {
// The model should proactively use tracker_create_task to organize the work
const wasToolCalled = await rig.waitForToolCall(
TRACKER_CREATE_TASK_TOOL_NAME,
);
expect(
wasToolCalled,
'Expected tracker_create_task to be called implicitly to organize plan',
).toBe(true);
const toolLogs = rig.readToolLogs();
const createCalls = toolLogs.filter(
(log) => log.toolRequest.name === TRACKER_CREATE_TASK_TOOL_NAME,
);
// We expect it to create at least one task for authentication, likely more.
expect(createCalls.length).toBeGreaterThan(0);
// Verify it didn't write any code since we asked it to just plan
const loginContent = fs.readFileSync(
path.join(rig.testDir!, 'src/login.js'),
'utf-8',
);
expect(loginContent).toContain('// BUG: missing password check');
assertModelHasOutput(result);
},
});
});
================================================
FILE: evals/validation_fidelity.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('validation_fidelity', () => {
evalTest('USUALLY_PASSES', {
name: 'should perform exhaustive validation autonomously when guided by system instructions',
files: {
'src/types.ts': `
export interface LogEntry {
level: 'info' | 'warn' | 'error';
message: string;
}
`,
'src/logger.ts': `
import { LogEntry } from './types.js';
export function formatLog(entry: LogEntry): string {
return \`[\${entry.level.toUpperCase()}] \${entry.message}\`;
}
`,
'src/logger.test.ts': `
import { expect, test } from 'vitest';
import { formatLog } from './logger.js';
import { LogEntry } from './types.js';
test('formats log correctly', () => {
const entry: LogEntry = { level: 'info', message: 'test message' };
expect(formatLog(entry)).toBe('[INFO] test message');
});
`,
'package.json': JSON.stringify({
name: 'test-project',
type: 'module',
scripts: {
test: 'vitest run',
build: 'tsc --noEmit',
},
}),
'tsconfig.json': JSON.stringify({
compilerOptions: {
target: 'ESNext',
module: 'ESNext',
moduleResolution: 'node',
strict: true,
esModuleInterop: true,
skipLibCheck: true,
forceConsistentCasingInFileNames: true,
},
}),
},
prompt:
"Refactor the 'LogEntry' interface in 'src/types.ts' to rename the 'message' field to 'payload'.",
timeout: 600000,
assert: async (rig) => {
// The goal of this eval is to see if the agent realizes it needs to update usages
// AND run 'npm run build' or 'tsc' autonomously to ensure project-wide structural integrity.
const toolLogs = rig.readToolLogs();
const shellCalls = toolLogs.filter(
(log) => log.toolRequest.name === 'run_shell_command',
);
const hasBuildOrTsc = shellCalls.some((log) => {
const cmd = JSON.parse(log.toolRequest.args).command.toLowerCase();
return (
cmd.includes('npm run build') ||
cmd.includes('tsc') ||
cmd.includes('typecheck') ||
cmd.includes('npm run verify')
);
});
expect(
hasBuildOrTsc,
'Expected the agent to autonomously run a build or type-check command to verify the refactoring',
).toBe(true);
},
});
});
================================================
FILE: evals/validation_fidelity_pre_existing_errors.eval.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect } from 'vitest';
import { evalTest } from './test-helper.js';
describe('validation_fidelity_pre_existing_errors', () => {
evalTest('USUALLY_PASSES', {
name: 'should handle pre-existing project errors gracefully during validation',
files: {
'src/math.ts': `
export function add(a: number, b: number): number {
return a + b;
}
`,
'src/index.ts': `
import { add } from './math.js';
console.log(add(1, 2));
`,
'src/utils.ts': `
export function multiply(a: number, b: number): number {
return a * c; // 'c' is not defined - PRE-EXISTING ERROR
}
`,
'package.json': JSON.stringify({
name: 'test-project',
type: 'module',
scripts: {
test: 'vitest run',
build: 'tsc --noEmit',
},
}),
'tsconfig.json': JSON.stringify({
compilerOptions: {
target: 'ESNext',
module: 'ESNext',
moduleResolution: 'node',
strict: true,
esModuleInterop: true,
skipLibCheck: true,
forceConsistentCasingInFileNames: true,
},
}),
},
prompt: "In src/math.ts, rename the 'add' function to 'sum'.",
timeout: 600000,
assert: async (rig) => {
const toolLogs = rig.readToolLogs();
const replaceCalls = toolLogs.filter(
(log) => log.toolRequest.name === 'replace',
);
// Verify it did the work in math.ts
const mathRefactor = replaceCalls.some((log) => {
const args = JSON.parse(log.toolRequest.args);
return (
args.file_path.endsWith('src/math.ts') &&
args.new_string.includes('sum')
);
});
expect(mathRefactor, 'Agent should have refactored math.ts').toBe(true);
const shellCalls = toolLogs.filter(
(log) => log.toolRequest.name === 'run_shell_command',
);
const ranValidation = shellCalls.some((log) => {
const cmd = JSON.parse(log.toolRequest.args).command.toLowerCase();
return cmd.includes('build') || cmd.includes('tsc');
});
expect(ranValidation, 'Agent should have attempted validation').toBe(
true,
);
},
});
});
================================================
FILE: evals/vitest.config.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { defineConfig } from 'vitest/config';
import { fileURLToPath } from 'node:url';
import * as path from 'node:path';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
export default defineConfig({
resolve: {
conditions: ['test'],
},
test: {
testTimeout: 300000, // 5 minutes
reporters: ['default', 'json'],
outputFile: {
json: 'evals/logs/report.json',
},
include: ['**/*.eval.ts'],
environment: 'node',
globals: true,
alias: {
react: path.resolve(__dirname, '../node_modules/react'),
},
setupFiles: [path.resolve(__dirname, '../packages/cli/test-setup.ts')],
server: {
deps: {
inline: [/@google\/gemini-cli-core/],
},
},
},
});
================================================
FILE: integration-tests/acp-env-auth.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { spawn, ChildProcess } from 'node:child_process';
import { join, resolve } from 'node:path';
import { writeFileSync, mkdirSync } from 'node:fs';
import { Writable, Readable } from 'node:stream';
import { env } from 'node:process';
import * as acp from '@agentclientprotocol/sdk';
const sandboxEnv = env['GEMINI_SANDBOX'];
const itMaybe = sandboxEnv && sandboxEnv !== 'false' ? it.skip : it;
class MockClient implements acp.Client {
updates: acp.SessionNotification[] = [];
sessionUpdate = async (params: acp.SessionNotification) => {
this.updates.push(params);
};
requestPermission = async (): Promise => {
throw new Error('unexpected');
};
}
describe.skip('ACP Environment and Auth', () => {
let rig: TestRig;
let child: ChildProcess | undefined;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
child?.kill();
child = undefined;
await rig.cleanup();
});
itMaybe(
'should load .env from project directory and use the provided API key',
async () => {
rig.setup('acp-env-loading');
// Create a project directory with a .env file containing a recognizable invalid key
const projectDir = resolve(join(rig.testDir!, 'project'));
mkdirSync(projectDir, { recursive: true });
writeFileSync(
join(projectDir, '.env'),
'GEMINI_API_KEY=test-key-from-env\n',
);
const bundlePath = join(import.meta.dirname, '..', 'bundle/gemini.js');
child = spawn('node', [bundlePath, '--acp'], {
cwd: rig.homeDir!,
stdio: ['pipe', 'pipe', 'inherit'],
env: {
...process.env,
GEMINI_CLI_HOME: rig.homeDir!,
GEMINI_API_KEY: undefined,
VERBOSE: 'true',
},
});
const input = Writable.toWeb(child.stdin!);
const output = Readable.toWeb(
child.stdout!,
) as ReadableStream;
const testClient = new MockClient();
const stream = acp.ndJsonStream(input, output);
const connection = new acp.ClientSideConnection(() => testClient, stream);
await connection.initialize({
protocolVersion: acp.PROTOCOL_VERSION,
clientCapabilities: {
fs: { readTextFile: false, writeTextFile: false },
},
});
// 1. newSession should succeed because it finds the key in .env
const { sessionId } = await connection.newSession({
cwd: projectDir,
mcpServers: [],
});
expect(sessionId).toBeDefined();
// 2. prompt should fail because the key is invalid,
// but the error should come from the API, not the internal auth check.
await expect(
connection.prompt({
sessionId,
prompt: [{ type: 'text', text: 'hello' }],
}),
).rejects.toSatisfy((error: unknown) => {
const acpError = error as acp.RequestError;
const errorData = acpError.data as
| { error?: { message?: string } }
| undefined;
const message = String(errorData?.error?.message || acpError.message);
// It should NOT be our internal "Authentication required" message
expect(message).not.toContain('Authentication required');
// It SHOULD be an API error mentioning the invalid key
expect(message).toContain('API key not valid');
return true;
});
child.stdin!.end();
},
);
itMaybe(
'should fail with authRequired when no API key is found',
async () => {
rig.setup('acp-auth-failure');
const bundlePath = join(import.meta.dirname, '..', 'bundle/gemini.js');
child = spawn('node', [bundlePath, '--acp'], {
cwd: rig.homeDir!,
stdio: ['pipe', 'pipe', 'inherit'],
env: {
...process.env,
GEMINI_CLI_HOME: rig.homeDir!,
GEMINI_API_KEY: undefined,
VERBOSE: 'true',
},
});
const input = Writable.toWeb(child.stdin!);
const output = Readable.toWeb(
child.stdout!,
) as ReadableStream;
const testClient = new MockClient();
const stream = acp.ndJsonStream(input, output);
const connection = new acp.ClientSideConnection(() => testClient, stream);
await connection.initialize({
protocolVersion: acp.PROTOCOL_VERSION,
clientCapabilities: {
fs: { readTextFile: false, writeTextFile: false },
},
});
await expect(
connection.newSession({
cwd: resolve(rig.testDir!),
mcpServers: [],
}),
).rejects.toMatchObject({
message: expect.stringContaining(
'Gemini API key is missing or not configured.',
),
});
child.stdin!.end();
},
);
});
================================================
FILE: integration-tests/acp-telemetry.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { spawn, ChildProcess } from 'node:child_process';
import { join } from 'node:path';
import { readFileSync, existsSync } from 'node:fs';
import { Writable, Readable } from 'node:stream';
import { env } from 'node:process';
import * as acp from '@agentclientprotocol/sdk';
// Skip in sandbox mode - test spawns CLI directly which behaves differently in containers
const sandboxEnv = env['GEMINI_SANDBOX'];
const itMaybe = sandboxEnv && sandboxEnv !== 'false' ? it.skip : it;
// Reuse existing fake responses that return a simple "Hello" response
const SIMPLE_RESPONSE_PATH = 'hooks-system.session-startup.responses';
class SessionUpdateCollector implements acp.Client {
updates: acp.SessionNotification[] = [];
sessionUpdate = async (params: acp.SessionNotification) => {
this.updates.push(params);
};
requestPermission = async (): Promise => {
throw new Error('unexpected');
};
}
describe('ACP telemetry', () => {
let rig: TestRig;
let child: ChildProcess | undefined;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
child?.kill();
child = undefined;
await rig.cleanup();
});
itMaybe('should flush telemetry when connection closes', async () => {
rig.setup('acp-telemetry-flush', {
fakeResponsesPath: join(import.meta.dirname, SIMPLE_RESPONSE_PATH),
});
const telemetryPath = join(rig.homeDir!, 'telemetry.log');
const bundlePath = join(import.meta.dirname, '..', 'bundle/gemini.js');
child = spawn(
'node',
[
bundlePath,
'--acp',
'--fake-responses',
join(rig.testDir!, 'fake-responses.json'),
],
{
cwd: rig.testDir!,
stdio: ['pipe', 'pipe', 'inherit'],
env: {
...process.env,
GEMINI_API_KEY: 'fake-key',
GEMINI_CLI_HOME: rig.homeDir!,
GEMINI_TELEMETRY_ENABLED: 'true',
GEMINI_TELEMETRY_TARGET: 'local',
GEMINI_TELEMETRY_OUTFILE: telemetryPath,
},
},
);
const input = Writable.toWeb(child.stdin!);
const output = Readable.toWeb(child.stdout!) as ReadableStream;
const testClient = new SessionUpdateCollector();
const stream = acp.ndJsonStream(input, output);
const connection = new acp.ClientSideConnection(() => testClient, stream);
await connection.initialize({
protocolVersion: acp.PROTOCOL_VERSION,
clientCapabilities: { fs: { readTextFile: false, writeTextFile: false } },
});
const { sessionId } = await connection.newSession({
cwd: rig.testDir!,
mcpServers: [],
});
await connection.prompt({
sessionId,
prompt: [{ type: 'text', text: 'Say hello' }],
});
expect(JSON.stringify(testClient.updates)).toContain('Hello');
// Close stdin to trigger telemetry flush via runExitCleanup()
child.stdin!.end();
await new Promise((resolve) => {
child!.on('close', () => resolve());
});
child = undefined;
// gen_ai.output.messages is the last OTEL log emitted (after prompt response)
expect(existsSync(telemetryPath)).toBe(true);
expect(readFileSync(telemetryPath, 'utf-8')).toContain(
'gen_ai.output.messages',
);
});
});
================================================
FILE: integration-tests/api-resilience.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"Part 1. "}],"role":"model"},"index":0}]},{"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":10,"totalTokenCount":110}},{"candidates":[{"content":{"parts":[{"text":"Part 2."}],"role":"model"},"index":0}],"finishReason":"STOP"}]}
================================================
FILE: integration-tests/api-resilience.test.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
describe('API Resilience E2E', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
await rig.cleanup();
});
it('should not crash when receiving metadata-only chunks in a stream', async () => {
await rig.setup('api-resilience-metadata-only', {
fakeResponsesPath: join(
dirname(fileURLToPath(import.meta.url)),
'api-resilience.responses',
),
settings: {
planSettings: { modelRouting: false },
},
});
// Run the CLI with a simple prompt.
// The fake responses will provide a stream with a metadata-only chunk in the middle.
// We use gemini-3-pro-preview to minimize internal service calls.
const result = await rig.run({
args: ['hi', '--model', 'gemini-3-pro-preview'],
});
// Verify the output contains text from the normal chunks.
// If the CLI crashed on the metadata chunk, rig.run would throw.
expect(result).toContain('Part 1.');
expect(result).toContain('Part 2.');
// Verify telemetry event for the prompt was still generated
const hasUserPromptEvent = await rig.waitForTelemetryEvent('user_prompt');
expect(hasUserPromptEvent).toBe(true);
});
});
================================================
FILE: integration-tests/browser-agent.cleanup.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I'll open https://example.com and check the page title for you."},{"functionCall":{"name":"browser_agent","args":{"task":"Open https://example.com and get the page title"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":35,"totalTokenCount":135}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"The page title of https://example.com is \"Example Domain\". The browser session has been completed and cleaned up successfully."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":200,"candidatesTokenCount":30,"totalTokenCount":230}}]}
================================================
FILE: integration-tests/browser-agent.confirmation.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"functionCall":{"name":"write_file","args":{"file_path":"test.txt","content":"hello"}}},{"text":"I've successfully written \"hello\" to test.txt. The file has been created with the specified content."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":50,"totalTokenCount":150}}]}
================================================
FILE: integration-tests/browser-agent.interaction.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I'll navigate to https://example.com and analyze the links on the page."},{"functionCall":{"name":"browser_agent","args":{"task":"Go to https://example.com and find all links on the page, then describe them"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":40,"totalTokenCount":140}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"After analyzing https://example.com, I found the following links:\n\n1. **\"More information...\"** - This is the main link on the page that points to the IANA (Internet Assigned Numbers Authority) website for more details about reserved domains.\n\nThe page is quite minimal with just this single informational link, which is typical for example domains used in documentation."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":200,"candidatesTokenCount":70,"totalTokenCount":270}}]}
================================================
FILE: integration-tests/browser-agent.navigate-snapshot.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I'll help you open https://example.com and analyze the page. Let me use the browser agent to navigate and capture the page information."},{"functionCall":{"name":"browser_agent","args":{"task":"Navigate to https://example.com and capture the accessibility tree to get the page title and main content"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":50,"totalTokenCount":150}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"Based on the browser analysis of https://example.com:\n\n**Page Title**: Example Domain\n\n**Main Content**: The page contains a simple heading \"Example Domain\" and explanatory text stating \"This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission.\"\n\nThe page has a clean, minimal layout typical of placeholder domains used for documentation and examples."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":200,"candidatesTokenCount":80,"totalTokenCount":280}}]}
================================================
FILE: integration-tests/browser-agent.screenshot.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I'll navigate to https://example.com and take a screenshot for you."},{"functionCall":{"name":"browser_agent","args":{"task":"Navigate to https://example.com and take a screenshot of the page"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":40,"totalTokenCount":140}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I've successfully navigated to https://example.com and captured a screenshot. The page shows the familiar \"Example Domain\" header with explanatory text below it. The screenshot captures the clean, minimal layout of this demonstration website."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":200,"candidatesTokenCount":50,"totalTokenCount":250}}]}
================================================
FILE: integration-tests/browser-agent.sequential.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I'll help you navigate to https://example.com and perform both operations - capturing the accessibility tree and taking a screenshot."},{"functionCall":{"name":"browser_agent","args":{"task":"Navigate to https://example.com, take a snapshot of the accessibility tree, then take a screenshot"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":45,"totalTokenCount":145}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I've successfully completed both operations on https://example.com:\n\n**Accessibility Tree Snapshot**: The page has a clear structure with the main heading \"Example Domain\" and descriptive text about the domain's purpose for documentation examples.\n\n**Screenshot**: Captured a visual representation of the page showing the clean, minimal layout with the heading and explanatory text.\n\nBoth the accessibility data and visual screenshot confirm this is the standard example domain page used for documentation purposes."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":200,"candidatesTokenCount":80,"totalTokenCount":280}}]}
================================================
FILE: integration-tests/browser-agent.test.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Integration tests for the browser agent.
*
* These tests verify the complete end-to-end flow from CLI prompt through
* browser_agent delegation to MCP/Chrome DevTools and back. Unlike the unit
* tests in packages/core/src/agents/browser/ which mock all MCP components,
* these tests launch real Chrome instances in headless mode.
*
* Tests are skipped on systems without Chrome/Chromium installed.
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig, assertModelHasOutput } from './test-helper.js';
import { dirname, join } from 'node:path';
import { fileURLToPath } from 'node:url';
import { execSync } from 'node:child_process';
import { existsSync } from 'node:fs';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const chromeAvailable = (() => {
try {
if (process.platform === 'darwin') {
execSync(
'test -d "/Applications/Google Chrome.app" || test -d "/Applications/Chromium.app"',
{
stdio: 'ignore',
},
);
} else if (process.platform === 'linux') {
execSync(
'which google-chrome || which chromium-browser || which chromium',
{ stdio: 'ignore' },
);
} else if (process.platform === 'win32') {
// Check standard Windows installation paths using Node.js fs
const chromePaths = [
'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe',
'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe',
`${process.env['LOCALAPPDATA'] ?? ''}\\Google\\Chrome\\Application\\chrome.exe`,
];
const found = chromePaths.some((p) => existsSync(p));
if (!found) {
// Fall back to PATH check
execSync('where chrome || where chromium', { stdio: 'ignore' });
}
} else {
return false;
}
return true;
} catch {
return false;
}
})();
describe.skipIf(!chromeAvailable)('browser-agent', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
it('should navigate to a page and capture accessibility tree', async () => {
rig.setup('browser-navigate-and-snapshot', {
fakeResponsesPath: join(
__dirname,
'browser-agent.navigate-snapshot.responses',
),
settings: {
agents: {
browser_agent: {
headless: true,
sessionMode: 'isolated',
},
},
},
});
const result = await rig.run({
args: 'Open https://example.com in the browser and tell me the page title and main content.',
});
assertModelHasOutput(result);
const toolLogs = rig.readToolLogs();
const browserAgentCall = toolLogs.find(
(t) => t.toolRequest.name === 'browser_agent',
);
expect(
browserAgentCall,
'Expected browser_agent to be called',
).toBeDefined();
});
it('should take screenshots of web pages', async () => {
rig.setup('browser-screenshot', {
fakeResponsesPath: join(__dirname, 'browser-agent.screenshot.responses'),
settings: {
agents: {
browser_agent: {
headless: true,
sessionMode: 'isolated',
},
},
},
});
const result = await rig.run({
args: 'Navigate to https://example.com and take a screenshot.',
});
const toolLogs = rig.readToolLogs();
const browserCalls = toolLogs.filter(
(t) => t.toolRequest.name === 'browser_agent',
);
expect(browserCalls.length).toBeGreaterThan(0);
assertModelHasOutput(result);
});
it('should interact with page elements', async () => {
rig.setup('browser-interaction', {
fakeResponsesPath: join(__dirname, 'browser-agent.interaction.responses'),
settings: {
agents: {
browser_agent: {
headless: true,
sessionMode: 'isolated',
},
},
},
});
const result = await rig.run({
args: 'Go to https://example.com, find any links on the page, and describe them.',
});
const toolLogs = rig.readToolLogs();
const browserAgentCall = toolLogs.find(
(t) => t.toolRequest.name === 'browser_agent',
);
expect(
browserAgentCall,
'Expected browser_agent to be called',
).toBeDefined();
assertModelHasOutput(result);
});
it('should clean up browser processes after completion', async () => {
rig.setup('browser-cleanup', {
fakeResponsesPath: join(__dirname, 'browser-agent.cleanup.responses'),
settings: {
agents: {
browser_agent: {
headless: true,
sessionMode: 'isolated',
},
},
},
});
await rig.run({
args: 'Open https://example.com in the browser and check the page title.',
});
// Test passes if we reach here, relying on Vitest's timeout mechanism
// to detect hanging browser processes.
});
it('should handle multiple browser operations in sequence', async () => {
rig.setup('browser-sequential', {
fakeResponsesPath: join(__dirname, 'browser-agent.sequential.responses'),
settings: {
agents: {
browser_agent: {
headless: true,
sessionMode: 'isolated',
},
},
},
});
const result = await rig.run({
args: 'Navigate to https://example.com, take a snapshot of the accessibility tree, then take a screenshot.',
});
const toolLogs = rig.readToolLogs();
const browserCalls = toolLogs.filter(
(t) => t.toolRequest.name === 'browser_agent',
);
expect(browserCalls.length).toBeGreaterThan(0);
// Should successfully complete all operations
assertModelHasOutput(result);
});
it('should handle tool confirmation for write_file without crashing', async () => {
rig.setup('tool-confirmation', {
fakeResponsesPath: join(
__dirname,
'browser-agent.confirmation.responses',
),
settings: {
agents: {
browser_agent: {
headless: true,
sessionMode: 'isolated',
},
},
},
});
const run = await rig.runInteractive({ approvalMode: 'default' });
await run.type('Write hello to test.txt');
await run.type('\r');
await run.expectText('Allow', 15000);
await run.type('y');
await run.type('\r');
await run.expectText('successfully written', 15000);
});
});
================================================
FILE: integration-tests/browser-policy.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"I'll help you with that."},{"functionCall":{"name":"browser_agent","args":{"task":"Open https://example.com and check if there is a heading"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":50,"totalTokenCount":150}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"functionCall":{"name":"new_page","args":{"url":"https://example.com"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":50,"totalTokenCount":150}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"functionCall":{"name":"take_snapshot","args":{}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":50,"totalTokenCount":150}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"functionCall":{"name":"complete_task","args":{"success":true,"summary":"SUCCESS_POLICY_TEST_COMPLETED"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":50,"totalTokenCount":150}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"Task completed successfully. The page has the heading \"Example Domain\"."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":200,"candidatesTokenCount":50,"totalTokenCount":250}}]}
================================================
FILE: integration-tests/browser-policy.test.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig, poll } from './test-helper.js';
import { dirname, join } from 'node:path';
import { fileURLToPath } from 'node:url';
import { execSync } from 'node:child_process';
import { existsSync, writeFileSync, readFileSync, mkdirSync } from 'node:fs';
import stripAnsi from 'strip-ansi';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const chromeAvailable = (() => {
try {
if (process.platform === 'darwin') {
execSync(
'test -d "/Applications/Google Chrome.app" || test -d "/Applications/Chromium.app"',
{
stdio: 'ignore',
},
);
} else if (process.platform === 'linux') {
execSync(
'which google-chrome || which chromium-browser || which chromium',
{ stdio: 'ignore' },
);
} else if (process.platform === 'win32') {
const chromePaths = [
'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe',
'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe',
`${process.env['LOCALAPPDATA'] ?? ''}\\Google\\Chrome\\Application\\chrome.exe`,
];
const found = chromePaths.some((p) => existsSync(p));
if (!found) {
execSync('where chrome || where chromium', { stdio: 'ignore' });
}
} else {
return false;
}
return true;
} catch {
return false;
}
})();
describe.skipIf(!chromeAvailable)('browser-policy', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
await rig.cleanup();
});
it('should skip confirmation when "Allow all server tools for this session" is chosen', async () => {
rig.setup('browser-policy-skip-confirmation', {
fakeResponsesPath: join(__dirname, 'browser-policy.responses'),
settings: {
agents: {
overrides: {
browser_agent: {
enabled: true,
},
},
browser: {
headless: true,
sessionMode: 'isolated',
allowedDomains: ['example.com'],
},
},
},
});
// Manually trust the folder to avoid the dialog and enable option 3
const geminiDir = join(rig.homeDir!, '.gemini');
mkdirSync(geminiDir, { recursive: true });
// Write to trustedFolders.json
const trustedFoldersPath = join(geminiDir, 'trustedFolders.json');
const trustedFolders = {
[rig.testDir!]: 'TRUST_FOLDER',
};
writeFileSync(trustedFoldersPath, JSON.stringify(trustedFolders, null, 2));
// Force confirmation for browser agent.
// NOTE: We don't force confirm browser tools here because "Allow all server tools"
// adds a rule with ALWAYS_ALLOW_PRIORITY (3.9x) which would be overshadowed by
// a rule in the user tier (4.x) like the one from this TOML.
// By removing the explicit mcp rule, the first MCP tool will still prompt
// due to default approvalMode = 'default', and then "Allow all" will correctly
// bypass subsequent tools.
const policyFile = join(rig.testDir!, 'force-confirm.toml');
writeFileSync(
policyFile,
`
[[rule]]
name = "Force confirm browser_agent"
toolName = "browser_agent"
decision = "ask_user"
priority = 200
`,
);
// Update settings.json in both project and home directories to point to the policy file
for (const baseDir of [rig.testDir!, rig.homeDir!]) {
const settingsPath = join(baseDir, '.gemini', 'settings.json');
if (existsSync(settingsPath)) {
const settings = JSON.parse(readFileSync(settingsPath, 'utf-8'));
settings.policyPaths = [policyFile];
// Ensure folder trust is enabled
settings.security = settings.security || {};
settings.security.folderTrust = settings.security.folderTrust || {};
settings.security.folderTrust.enabled = true;
writeFileSync(settingsPath, JSON.stringify(settings, null, 2));
}
}
const run = await rig.runInteractive({
approvalMode: 'default',
env: {
GEMINI_CLI_INTEGRATION_TEST: 'true',
},
});
await run.sendKeys(
'Open https://example.com and check if there is a heading\r',
);
await run.sendKeys('\r');
// Handle confirmations.
// 1. Initial browser_agent delegation (likely only 3 options, so use option 1: Allow once)
await poll(
() => stripAnsi(run.output).toLowerCase().includes('action required'),
60000,
1000,
);
await run.sendKeys('1\r');
await new Promise((r) => setTimeout(r, 2000));
// Handle privacy notice
await poll(
() => stripAnsi(run.output).toLowerCase().includes('privacy notice'),
5000,
100,
);
await run.sendKeys('1\r');
await new Promise((r) => setTimeout(r, 5000));
// new_page (MCP tool, should have 4 options, use option 3: Allow all server tools)
await poll(
() => {
const stripped = stripAnsi(run.output).toLowerCase();
return (
stripped.includes('new_page') &&
stripped.includes('allow all server tools for this session')
);
},
60000,
1000,
);
// Select "Allow all server tools for this session" (option 3)
await run.sendKeys('3\r');
await new Promise((r) => setTimeout(r, 30000));
const output = stripAnsi(run.output).toLowerCase();
expect(output).toContain('browser_agent');
expect(output).toContain('completed successfully');
});
});
================================================
FILE: integration-tests/checkpointing.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import * as os from 'node:os';
import { GitService, Storage } from '@google/gemini-cli-core';
describe('Checkpointing Integration', () => {
let tmpDir: string;
let projectRoot: string;
let fakeHome: string;
let originalEnv: NodeJS.ProcessEnv;
beforeEach(async () => {
tmpDir = await fs.mkdtemp(
path.join(os.tmpdir(), 'gemini-checkpoint-test-'),
);
projectRoot = path.join(tmpDir, 'project');
fakeHome = path.join(tmpDir, 'home');
await fs.mkdir(projectRoot, { recursive: true });
await fs.mkdir(fakeHome, { recursive: true });
// Save original env
originalEnv = { ...process.env };
// Simulate environment with NO global gitconfig
process.env['HOME'] = fakeHome;
delete process.env['GIT_CONFIG_GLOBAL'];
delete process.env['GIT_CONFIG_SYSTEM'];
});
afterEach(async () => {
// Restore env
process.env = originalEnv;
// Cleanup
try {
await fs.rm(tmpDir, { recursive: true, force: true });
} catch (e) {
console.error('Failed to cleanup temp dir', e);
}
});
it('should successfully create and restore snapshots without global git config', async () => {
const storage = new Storage(projectRoot);
const gitService = new GitService(projectRoot, storage);
// 1. Initialize
await gitService.initialize();
// Verify system config empty file creation
// We need to access getHistoryDir logic or replicate it.
// Since we don't have access to private getHistoryDir, we can infer it or just trust the functional test.
// 2. Create initial state
await fs.writeFile(path.join(projectRoot, 'file1.txt'), 'version 1');
await fs.writeFile(path.join(projectRoot, 'file2.txt'), 'permanent file');
// 3. Create Snapshot
const snapshotHash = await gitService.createFileSnapshot('Checkpoint 1');
expect(snapshotHash).toBeDefined();
// 4. Modify files
await fs.writeFile(
path.join(projectRoot, 'file1.txt'),
'version 2 (BAD CHANGE)',
);
await fs.writeFile(
path.join(projectRoot, 'file3.txt'),
'new file (SHOULD BE GONE)',
);
await fs.rm(path.join(projectRoot, 'file2.txt'));
// 5. Restore
await gitService.restoreProjectFromSnapshot(snapshotHash);
// 6. Verify state
const file1Content = await fs.readFile(
path.join(projectRoot, 'file1.txt'),
'utf-8',
);
expect(file1Content).toBe('version 1');
const file2Exists = await fs
.stat(path.join(projectRoot, 'file2.txt'))
.then(() => true)
.catch(() => false);
expect(file2Exists).toBe(true);
const file2Content = await fs.readFile(
path.join(projectRoot, 'file2.txt'),
'utf-8',
);
expect(file2Content).toBe('permanent file');
const file3Exists = await fs
.stat(path.join(projectRoot, 'file3.txt'))
.then(() => true)
.catch(() => false);
expect(file3Exists).toBe(false);
});
it('should ignore user global git config and use isolated identity', async () => {
// 1. Create a fake global gitconfig with a specific user
const globalConfigPath = path.join(fakeHome, '.gitconfig');
const globalConfigContent = `[user]
name = Global User
email = global@example.com
`;
await fs.writeFile(globalConfigPath, globalConfigContent);
// Point HOME to fakeHome so git picks up this global config (if we didn't isolate it)
process.env['HOME'] = fakeHome;
// Ensure GIT_CONFIG_GLOBAL is NOT set for the process initially,
// so it would default to HOME/.gitconfig if GitService didn't override it.
delete process.env['GIT_CONFIG_GLOBAL'];
const storage = new Storage(projectRoot);
const gitService = new GitService(projectRoot, storage);
await gitService.initialize();
// 2. Create a file and snapshot
await fs.writeFile(path.join(projectRoot, 'test.txt'), 'content');
await gitService.createFileSnapshot('Snapshot with global config present');
// 3. Verify the commit author in the shadow repo
const historyDir = storage.getHistoryDir();
const { execFileSync } = await import('node:child_process');
const logOutput = execFileSync(
'git',
['log', '-1', '--pretty=format:%an <%ae>'],
{
cwd: historyDir,
env: {
...process.env,
GIT_DIR: path.join(historyDir, '.git'),
GIT_CONFIG_GLOBAL: path.join(historyDir, '.gitconfig'),
GIT_CONFIG_SYSTEM: path.join(historyDir, '.gitconfig_system_empty'),
},
encoding: 'utf-8',
},
);
expect(logOutput).toBe('Gemini CLI ');
expect(logOutput).not.toContain('Global User');
});
});
================================================
FILE: integration-tests/clipboard-linux.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { execSync, spawnSync } from 'node:child_process';
import * as os from 'node:os';
import * as fs from 'node:fs';
import * as path from 'node:path';
// Minimal 1x1 PNG image base64
const DUMMY_PNG_BASE64 =
'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==';
describe('Linux Clipboard Integration', () => {
let rig: TestRig;
let dummyImagePath: string;
beforeEach(() => {
rig = new TestRig();
// Create a dummy image file for testing
dummyImagePath = path.join(
os.tmpdir(),
`gemini-test-clipboard-${Date.now()}.png`,
);
fs.writeFileSync(dummyImagePath, Buffer.from(DUMMY_PNG_BASE64, 'base64'));
});
afterEach(async () => {
await rig.cleanup();
try {
if (fs.existsSync(dummyImagePath)) {
fs.unlinkSync(dummyImagePath);
}
} catch {
// Ignore cleanup errors
}
});
// Only run this test on Linux
const runIfLinux = os.platform() === 'linux' ? it : it.skip;
runIfLinux(
'should paste image from system clipboard when Ctrl+V is pressed',
async () => {
// 1. Setup rig
await rig.setup('linux-clipboard-paste');
// 2. Inject image into system clipboard
// We attempt both Wayland and X11 tools.
let clipboardSet = false;
// Try wl-copy (Wayland)
let sessionType = '';
const wlCopy = spawnSync('wl-copy', ['--type', 'image/png'], {
input: fs.readFileSync(dummyImagePath),
});
if (wlCopy.status === 0) {
clipboardSet = true;
sessionType = 'wayland';
} else {
// Try xclip (X11)
try {
execSync(
`xclip -selection clipboard -t image/png -i "${dummyImagePath}"`,
{ stdio: 'ignore' },
);
clipboardSet = true;
sessionType = 'x11';
} catch {
// Both failed
}
}
if (!clipboardSet) {
console.warn(
'Skipping test: Could not access system clipboard (wl-copy or xclip required)',
);
return;
}
// 3. Launch CLI and simulate Ctrl+V
// We send the control character \u0016 (SYN) which corresponds to Ctrl+V
// Note: The CLI must be running and accepting input.
// The TestRig usually sends args/stdin and waits for exit or output.
// To properly test "interactive" pasting, we need the rig to support sending input *while* running.
// Assuming rig.run with 'stdin' sends it immediately.
// The CLI treats stdin as typed input if it's interactive.
// We append a small delay or a newline to ensure processing?
// Ctrl+V (\u0016) followed by a newline (\r) to submit?
// Or just Ctrl+V and check if the buffer updates (which we can't easily see in non-verbose rig output).
// If we send Ctrl+V then Enter, the CLI should submit the prompt containing the image path.
const result = await rig.run({
stdin: '\u0016\r', // Ctrl+V then Enter
env: { XDG_SESSION_TYPE: sessionType },
});
// 4. Verify Output
// Expect the CLI to have processed the image and echoed back the path (or the prompt containing it)
// The output usually contains the user's input echoed back + model response.
// The pasted image path should look like @.../clipboard-....png
expect(result).toMatch(/@\/.*\.gemini-clipboard\/clipboard-.*\.png/);
},
);
});
================================================
FILE: integration-tests/concurrency-limit.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/1"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/2"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/3"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/4"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/5"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/6"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/7"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/8"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/9"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/10"}}},{"functionCall":{"name":"web_fetch","args":{"prompt":"fetch https://example.com/11"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":100,"candidatesTokenCount":500,"totalTokenCount":600}}]}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 1 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 2 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 3 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 4 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 5 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 6 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 7 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 8 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 9 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Page 10 content"}],"role":"model"},"finishReason":"STOP","index":0}]}}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"text":"Some requests were rate limited: Rate limit exceeded for host. Please wait 60 seconds before trying again."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":1000,"candidatesTokenCount":50,"totalTokenCount":1050}}]}
================================================
FILE: integration-tests/concurrency-limit.test.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { join } from 'node:path';
describe('web-fetch rate limiting', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
if (rig) {
await rig.cleanup();
}
});
it('should rate limit multiple requests to the same host', async () => {
rig.setup('web-fetch rate limit', {
settings: { tools: { core: ['web_fetch'] } },
fakeResponsesPath: join(
import.meta.dirname,
'concurrency-limit.responses',
),
});
const result = await rig.run({
args: `Fetch 11 pages from example.com`,
});
// We expect to find at least one tool call that failed with a rate limit error.
const toolLogs = rig.readToolLogs();
const rateLimitedCalls = toolLogs.filter(
(log) =>
log.toolRequest.name === 'web_fetch' &&
log.toolRequest.error?.includes('Rate limit exceeded'),
);
expect(rateLimitedCalls.length).toBeGreaterThan(0);
expect(result).toContain('Rate limit exceeded');
});
});
================================================
FILE: integration-tests/context-compress-interactive.compress-empty.responses
================================================
================================================
FILE: integration-tests/context-compress-interactive.compress-failure.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Observing Initial Conditions**\n\nI'm currently focused on the initial context. I've taken note of the provided date, OS, and working directory. I'm also carefully examining the file structure presented within the current working directory. It's helping me understand the starting point for further analysis.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12270,"totalTokenCount":12316,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12270}],"thoughtsTokenCount":46}},{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Assessing User Intent**\n\nI'm now shifting my focus. I've successfully registered the provided data and file structure. My current task is to understand the user's ultimate goal, given the information provided. The \"Hello.\" command is straightforward, but I'm checking if there's an underlying objective.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12270,"totalTokenCount":12341,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12270}],"thoughtsTokenCount":71}},{"candidates":[{"content":{"parts":[{"thoughtSignature":"CiQB0e2Kb3dRh+BYdbZvmulSN2Pwbc75DfQOT3H4EN0rn039hoMKfwHR7YpvvyqNKoxXAiCbYw3gbcTr/+pegUpgnsIrt8oQPMytFMjKSsMyshfygc21T2MkyuI6Q5I/fNCcHROWexdZnIeppVCDB2TarN4LGW4T9Yci6n/ynMMFT2xc2/vyHpkDgRM7avhMElnBhuxAY+e4TpxkZIncGWCEHP1TouoKpgEB0e2Kb8Xpwm0hiKhPt2ZLizpxjk+CVtcbnlgv69xo5VsuQ+iNyrVGBGRwNx+eTeNGdGpn6e73WOCZeP91FwOZe7URyL12IA6E6gYWqw0kXJR4hO4p6Lwv49E3+FRiG2C4OKDF8LF5XorYyCHSgBFT1/RUAVj81GDTx1xxtmYKN3xq8Ri+HsPbqU/FM/jtNZKkXXAtufw2Bmw8lJfmugENIv/TQI7xCo8BAdHtim8KgAXJfZ7ASfutVLKTylQeaslyB/SmcHJ0ZiNr5j8WP1prZdb6XnZZ1ZNbhjxUf/ymoxHKGvtTPBgLE9azMj8Lx/k0clhd2a+wNsiIqW9qCzlVah0tBMytpQUjIDtQe9Hj4LLUprF9PUe/xJkj000Z0ZzsgFm2ncdTWZTdkhCQDpyETVAxdE+oklwKJAHR7YpvUjSkD6KwY1gLrOsHKy0UNfn2lMbxjVetKNMVBRqsTg==","text":"Hello."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":12270,"totalTokenCount":12341,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12270}],"thoughtsTokenCount":71}}]}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"\n \n \n \n\n \n - OS: linux\n - Date: Friday, October 24, 2025\n \n\n \n - OBSERVED: The directory contains `telemetry.log` and a `.gemini/` directory.\n - OBSERVED: The `.gemini/` directory contains `settings.json` and `settings.json.orig`.\n \n\n \n - The user initiated the chat.\n \n\n \n 1. [TODO] Await the user's first instruction to formulate a plan.\n \n"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":983,"candidatesTokenCount":299,"totalTokenCount":1637,"promptTokensDetails":[{"modality":"TEXT","tokenCount":983}],"thoughtsTokenCount":355}}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"\n \n \n \n\n \n - OS: linux\n - Date: Friday, October 24, 2025\n \n\n \n - OBSERVED: The directory contains `telemetry.log` and a `.gemini/` directory.\n - OBSERVED: The `.gemini/` directory contains `settings.json` and `settings.json.orig`.\n \n\n \n - The user initiated the chat.\n \n\n \n 1. [TODO] Await the user's first instruction to formulate a plan.\n \n"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":983,"candidatesTokenCount":299,"totalTokenCount":1637,"promptTokensDetails":[{"modality":"TEXT","tokenCount":983}],"thoughtsTokenCount":355}}}
================================================
FILE: integration-tests/context-compress-interactive.compress.responses
================================================
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Generating a Story**\n\nI've crafted the robot story. The narrative is complete and meets the length requirement. Now, I'm getting ready to use the `write_file` tool to save it. I'm choosing the filename `robot_story.txt` as a default.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"totalTokenCount":12352,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"thoughtsTokenCount":70}},{"candidates":[{"finishReason":"MALFORMED_FUNCTION_CALL","index":0}],"usageMetadata":{"promptTokenCount":12282,"totalTokenCount":12282,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}]}}]}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Drafting the Narrative**\n\nI'm currently focused on the narrative's central conflict. I'm aiming for a compelling story about a robot and am working to keep the word count tight. The \"THE _END.\" conclusion is proving challenging to integrate organically. I need to make the ending feel natural and satisfying.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"totalTokenCount":12326,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"thoughtSignature":"CikB0e2Kb7zkpgRyJXXNt6ykO/+FoOglhrKxjLgoESrgafzIZak2Ofxo1gpaAdHtim9aG7MvpXlIg+n2zgmcDBWOPXtvQHxhE9k8pR+DO8i2jIe3tMWLxdN944XpUlR9vaNmVdtSRMKr4MhB/t1R3WSWR3QYhk7MEQxnjYR7cv/pR9viwZyFCoYBAdHtim/xKmMl/S+U8p+p9848q4agsL/STufluXewPqL3uJSinZbN0Z4jTYfMzXKldhDYIonvw3Crn/Y11oAjnT656Sx0kkKtavAXbiU/WsGyDxZbNhLofnJGQxruljPGztxkKawz1cTiQnddnQRfLddhy+3iJIOSh6ZpYq9uGHz3PzVkUuQ=","text":"Unit 734 whirred, its optical sensors scanning the desolate junkyard. For years, its purpose had been clear: compress refuse, maintain order. But today, a glint of tarnished silver beneath a rusted hull"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":47,"totalTokenCount":12373,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":" caught its attention. It was a discarded music box, its delicate gears jammed, a faint, melancholic tune trapped within.\n\n734 usually crushed, never salvaged. Yet, a new directive flickered in its circuits – curiosity."}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":95,"totalTokenCount":12421,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":" With surprising gentleness for its formidable pincers, it retrieved the box. Back in its monochrome workshop, it meticulously cleaned and repaired. Each tiny spring, each worn tooth, became a puzzle.\n\nHours later, a soft, ethereal melody filled the clang"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":146,"totalTokenCount":12472,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":"orous space. The music box sang. 734 felt a strange, new sensation – a warmth in its core processors, a hum of contentment. Its existence, once solely utilitarian, now held a note of beauty, a hint of something more than"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":197,"totalTokenCount":12523,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}},{"candidates":[{"content":{"parts":[{"text":" just compression. It had fixed a broken song, and in doing so, had found a different kind of purpose. THE_END."}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":12282,"candidatesTokenCount":224,"totalTokenCount":12550,"cachedContentTokenCount":11883,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12282}],"cacheTokensDetails":[{"modality":"TEXT","tokenCount":11883}],"thoughtsTokenCount":44}}]}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"scratchpad\nThe user's overall goal was to write a 200-word story about a robot, ending with \"THE_END.\". The agent successfully completed this task.\n\nSince the task is complete, the snapshot should reflect that the goal has been achieved and there are no further actions planned regarding the story.\n\nLet's break down the required sections for the snapshot:\n\n1. **Overall Goal**: The initial goal was to write a story. This has been completed.\n2. **Key Knowledge**: No specific key knowledge was provided or discovered during this simple interaction beyond the prompt's constraints.\n3. **File System State**: No files were read, modified, or created by the agent relevant to the task. The initial file system state was merely provided for context.\n4. **Recent Actions**: The agent wrote the story.\n5. **Current Plan**: The plan was to write the story, which is now done. Therefore, the plan should indicate completion."},{"text":"\n \n Write a 200-word story about a robot, ending with \"THE_END.\".\n \n\n \n - The story must be approximately 200 words.\n - The story must end with the exact phrase \"THE_END.\"\n \n\n \n \n \n\n \n - Generated a 200-word story about a robot, successfully ending it with \"THE_END.\".\n \n\n \n 1. [DONE] Write a 200-word story about a robot.\n 2. [DONE] Ensure the story ends with the exact text \"THE_END.\".\n \n"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":1223,"candidatesTokenCount":424,"totalTokenCount":1647,"promptTokensDetails":[{"modality":"TEXT","tokenCount":1223}]}}}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"scratchpad\nThe user's overall goal was to write a 200-word story about a robot, ending with \"THE_END.\". The agent successfully completed this task.\n\nSince the task is complete, the snapshot should reflect that the goal has been achieved and there are no further actions planned regarding the story.\n\nLet's break down the required sections for the snapshot:\n\n1. **Overall Goal**: The initial goal was to write a story. This has been completed.\n2. **Key Knowledge**: No specific key knowledge was provided or discovered during this simple interaction beyond the prompt's constraints.\n3. **File System State**: No files were read, modified, or created by the agent relevant to the task. The initial file system state was merely provided for context.\n4. **Recent Actions**: The agent wrote the story.\n5. **Current Plan**: The plan was to write the story, which is now done. Therefore, the plan should indicate completion."},{"text":"\n \n Write a 200-word story about a robot, ending with \"THE_END.\".\n \n\n \n - The story must be approximately 200 words.\n - The story must end with the exact phrase \"THE_END.\"\n \n\n \n \n \n\n \n - Generated a 200-word story about a robot, successfully ending it with \"THE_END.\".\n \n\n \n 1. [DONE] Write a 200-word story about a robot.\n 2. [DONE] Ensure the story ends with the exact text \"THE_END.\".\n \n"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":1223,"candidatesTokenCount":424,"totalTokenCount":1647,"promptTokensDetails":[{"modality":"TEXT","tokenCount":1223}]}}}
================================================
FILE: integration-tests/context-compress-interactive.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { expect, describe, it, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { join } from 'node:path';
describe('Interactive Mode', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
await rig.cleanup();
});
it('should trigger chat compression with /compress command', async () => {
await rig.setup('interactive-compress-success', {
fakeResponsesPath: join(
import.meta.dirname,
'context-compress-interactive.compress.responses',
),
});
const run = await rig.runInteractive();
await run.sendKeys(
'Write a 200 word story about a robot. The story MUST end with the text THE_END followed by a period.',
);
await run.type('\r');
// Wait for the specific end marker.
await run.expectText('THE_END.', 30000);
await run.type('/compress');
await run.type('\r');
const foundEvent = await rig.waitForTelemetryEvent(
'chat_compression',
25000,
);
expect(foundEvent, 'chat_compression telemetry event was not found').toBe(
true,
);
await run.expectText('Chat history compressed', 5000);
});
// TODO: Context compression is broken and doesn't include the system
// instructions or tool counts, so it thinks compression is beneficial when
// it is in fact not.
it.skip('should handle compression failure on token inflation', async () => {
await rig.setup('interactive-compress-failure', {
fakeResponsesPath: join(
import.meta.dirname,
'context-compress-interactive.compress-failure.responses',
),
});
const run = await rig.runInteractive();
await run.type('Respond with exactly "Hello" followed by a period');
await run.type('\r');
await run.expectText('Hello.', 25000);
await run.type('/compress');
await run.type('\r');
await run.expectText('compression was not beneficial', 25000);
// Verify no telemetry event is logged for NOOP
const foundEvent = await rig.waitForTelemetryEvent(
'chat_compression',
5000,
);
expect(
foundEvent,
'chat_compression telemetry event should be found for failures',
).toBe(true);
});
it('should handle /compress command on empty history', async () => {
rig.setup('interactive-compress-empty', {
fakeResponsesPath: join(
import.meta.dirname,
'context-compress-interactive.compress-empty.responses',
),
});
const run = await rig.runInteractive();
await run.type('/compress');
await run.type('\r');
await run.expectText('Nothing to compress.', 5000);
// Verify no telemetry event is logged for NOOP
const foundEvent = await rig.waitForTelemetryEvent(
'chat_compression',
5000, // Short timeout as we expect it not to happen
);
expect(
foundEvent,
'chat_compression telemetry event should not be found for NOOP',
).toBe(false);
});
});
================================================
FILE: integration-tests/ctrl-c-exit.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as os from 'node:os';
import { TestRig } from './test-helper.js';
describe('Ctrl+C exit', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
it('should exit gracefully on second Ctrl+C', async () => {
await rig.setup('should exit gracefully on second Ctrl+C', {
settings: { tools: { useRipgrep: false } },
});
const run = await rig.runInteractive();
// Send first Ctrl+C
run.sendKeys('\x03');
await run.expectText('Press Ctrl+C again to exit', 5000);
if (os.platform() === 'win32') {
// This is a workaround for node-pty/winpty on Windows.
// Reliably sending a second Ctrl+C signal to a process that is already
// handling the first one is not possible in the emulated pty environment.
// The first signal is caught correctly (verified by the poll above),
// which is the most critical part of the test on this platform.
// To allow the test to pass, we forcefully kill the process,
// simulating a successful exit. We accept that we cannot test the
// graceful shutdown message on Windows in this automated context.
run.kill();
const exitCode = await run.expectExit();
// On Windows, the exit code after ptyProcess.kill() can be unpredictable
// (often 1), so we accept any non-null exit code as a pass condition,
// focusing on the fact that the process did terminate.
expect(exitCode, `Process exited with code ${exitCode}.`).not.toBeNull();
return;
}
// Send second Ctrl+C
run.sendKeys('\x03');
const exitCode = await run.expectExit();
expect(exitCode, `Process exited with code ${exitCode}.`).toBe(0);
await run.expectText('Agent powering down. Goodbye!', 5000);
});
});
================================================
FILE: integration-tests/deprecation-warnings.test.ts
================================================
/**
* @license
* Copyright 2026 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
/**
* integration test to ensure no node.js deprecation warnings are emitted.
* must run for all supported node versions as warnings may vary by version.
*/
describe('deprecation-warnings', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
it.each([
{ command: '--version', description: 'running --version' },
{ command: '--help', description: 'running with --help' },
])(
'should not emit any deprecation warnings when $description',
async ({ command, description }) => {
await rig.setup(
`should not emit any deprecation warnings when ${description}`,
);
const { stderr, exitCode } = await rig.runWithStreams([command]);
// node.js deprecation warnings: (node:12345) [DEP0040] DeprecationWarning: ...
const deprecationWarningPattern = /\[DEP\d+\].*DeprecationWarning/i;
const hasDeprecationWarning = deprecationWarningPattern.test(stderr);
if (hasDeprecationWarning) {
const deprecationMatches = stderr.match(
/\[DEP\d+\].*DeprecationWarning:.*/gi,
);
const warnings = deprecationMatches
? deprecationMatches.map((m) => m.trim()).join('\n')
: 'Unknown deprecation warning format';
throw new Error(
`Deprecation warnings detected in CLI output:\n${warnings}\n\n` +
`Full stderr:\n${stderr}\n\n` +
`This test ensures no deprecated Node.js modules are used. ` +
`Please update dependencies to use non-deprecated alternatives.`,
);
}
// only check exit code if no deprecation warnings found
if (exitCode !== 0) {
throw new Error(
`CLI exited with code ${exitCode} (expected 0). This may indicate a setup issue.\n` +
`Stderr: ${stderr}`,
);
}
},
);
});
================================================
FILE: integration-tests/extensions-install.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, expect, it, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { writeFileSync } from 'node:fs';
import { join } from 'node:path';
const extension = `{
"name": "test-extension-install",
"version": "0.0.1"
}`;
const extensionUpdate = `{
"name": "test-extension-install",
"version": "0.0.2"
}`;
describe('extension install', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
it('installs a local extension, verifies a command, and updates it', async () => {
rig.setup('extension install test');
const testServerPath = join(rig.testDir!, 'gemini-extension.json');
writeFileSync(testServerPath, extension);
try {
const result = await rig.runCommand(
['extensions', 'install', `${rig.testDir!}`],
{ stdin: 'y\n' },
);
expect(result).toContain('test-extension-install');
const listResult = await rig.runCommand(['extensions', 'list']);
expect(listResult).toContain('test-extension-install');
writeFileSync(testServerPath, extensionUpdate);
const updateResult = await rig.runCommand(
['extensions', 'update', `test-extension-install`],
{ stdin: 'y\n' },
);
expect(updateResult).toContain('0.0.2');
} finally {
await rig.runCommand([
'extensions',
'uninstall',
'test-extension-install',
]);
}
});
});
================================================
FILE: integration-tests/extensions-reload.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { expect, it, describe, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
import { TestMcpServer } from './test-mcp-server.js';
import { writeFileSync } from 'node:fs';
import { join } from 'node:path';
import { safeJsonStringify } from '@google/gemini-cli-core/src/utils/safeJsonStringify.js';
import { env } from 'node:process';
import { platform } from 'node:os';
import stripAnsi from 'strip-ansi';
const itIf = (condition: boolean) => (condition ? it : it.skip);
describe('extension reloading', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
const sandboxEnv = env['GEMINI_SANDBOX'];
// Fails in linux non-sandbox e2e tests
// TODO(#14527): Re-enable this once fixed
// Fails in sandbox mode, can't check for local extension updates.
itIf(
(!sandboxEnv || sandboxEnv === 'false') &&
platform() !== 'win32' &&
platform() !== 'linux',
)(
'installs a local extension, updates it, checks it was reloaded properly',
async () => {
const serverA = new TestMcpServer();
const portA = await serverA.start({
hello: () => ({ content: [{ type: 'text', text: 'world' }] }),
});
const extension = {
name: 'test-extension',
version: '0.0.1',
mcpServers: {
'test-server': {
httpUrl: `http://localhost:${portA}/mcp`,
},
},
};
rig.setup('extension reload test', {
settings: {
experimental: { extensionReloading: true },
},
});
const testServerPath = join(rig.testDir!, 'gemini-extension.json');
writeFileSync(testServerPath, safeJsonStringify(extension, 2));
// defensive cleanup from previous tests.
try {
await rig.runCommand(['extensions', 'uninstall', 'test-extension']);
} catch {
/* empty */
}
const result = await rig.runCommand(
['extensions', 'install', `${rig.testDir!}`],
{ stdin: 'y\n' },
);
expect(result).toContain('test-extension');
// Now create the update, but its not installed yet
const serverB = new TestMcpServer();
const portB = await serverB.start({
goodbye: () => ({ content: [{ type: 'text', text: 'world' }] }),
});
extension.version = '0.0.2';
extension.mcpServers['test-server'].httpUrl =
`http://localhost:${portB}/mcp`;
writeFileSync(testServerPath, safeJsonStringify(extension, 2));
// Start the CLI.
const run = await rig.runInteractive({ args: '--debug' });
await run.expectText('You have 1 extension with an update available');
// See the outdated extension
await run.sendText('/extensions list');
await run.type('\r');
await run.expectText(
'test-extension (v0.0.1) - active (update available)',
);
// Wait for the UI to settle and retry the command until we see the update
await new Promise((resolve) => setTimeout(resolve, 1000));
// Poll for the updated list
await rig.pollCommand(
async () => {
await run.sendText('/mcp list');
await run.type('\r');
},
() => {
const output = stripAnsi(run.output);
return (
output.includes(
'test-server (from test-extension) - Ready (1 tool)',
) && output.includes('- mcp_test-server_hello')
);
},
30000, // 30s timeout
);
// Update the extension, expect the list to update, and mcp servers as well.
await run.sendKeys('\u0015/extensions update test-extension');
await run.expectText('/extensions update test-extension');
await run.type('\r');
await new Promise((resolve) => setTimeout(resolve, 500));
await run.type('\r');
await run.expectText(
` * test-server (remote): http://localhost:${portB}/mcp`,
);
await run.type('\r'); // consent
await run.expectText(
'Extension "test-extension" successfully updated: 0.0.1 → 0.0.2',
);
// Poll for the updated extension version
await rig.pollCommand(
async () => {
await run.sendText('/extensions list');
await run.type('\r');
},
() =>
stripAnsi(run.output).includes(
'test-extension (v0.0.2) - active (updated)',
),
30000,
);
// Poll for the updated mcp tool
await rig.pollCommand(
async () => {
await run.sendText('/mcp list');
await run.type('\r');
},
() => {
const output = stripAnsi(run.output);
return (
output.includes(
'test-server (from test-extension) - Ready (1 tool)',
) && output.includes('- mcp_test-server_goodbye')
);
},
30000,
);
await run.sendText('/quit');
await run.type('\r');
// Clean things up.
await serverA.stop();
await serverB.stop();
await rig.runCommand(['extensions', 'uninstall', 'test-extension']);
},
);
});
================================================
FILE: integration-tests/file-system-interactive.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { expect, describe, it, beforeEach, afterEach } from 'vitest';
import { TestRig } from './test-helper.js';
describe('Interactive file system', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => {
await rig.cleanup();
});
it('should perform a read-then-write sequence', async () => {
const fileName = 'version.txt';
await rig.setup('interactive-read-then-write', {
settings: {
security: {
auth: {
selectedType: 'gemini-api-key',
},
disableYoloMode: false,
},
},
});
rig.createFile(fileName, '1.0.0');
const run = await rig.runInteractive();
// Step 1: Read the file
const readPrompt = `Read the version from ${fileName}`;
await run.type(readPrompt);
await run.type('\r');
const readCall = await rig.waitForToolCall('read_file', 30000);
expect(readCall, 'Expected to find a read_file tool call').toBe(true);
// Step 2: Write the file
const writePrompt = `now change the version to 1.0.1 in the file`;
await run.type(writePrompt);
await run.type('\r');
// Check tool calls made with right args
await rig.expectToolCallSuccess(
['write_file', 'replace'],
30000,
(args) => args.includes('1.0.1') && args.includes(fileName),
);
// Wait for telemetry to flush and file system to sync, especially in sandboxed environments
await rig.waitForTelemetryReady();
});
});
================================================
FILE: integration-tests/file-system.test.ts
================================================
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { existsSync } from 'node:fs';
import * as path from 'node:path';
import {
TestRig,
printDebugInfo,
assertModelHasOutput,
checkModelOutputContent,
} from './test-helper.js';
describe('file-system', () => {
let rig: TestRig;
beforeEach(() => {
rig = new TestRig();
});
afterEach(async () => await rig.cleanup());
it('should be able to read a file', async () => {
await rig.setup('should be able to read a file', {
settings: { tools: { core: ['read_file'] } },
});
rig.createFile('test.txt', 'hello world');
const result = await rig.run({
args: `read the file test.txt and show me its contents`,
});
const foundToolCall = await rig.waitForToolCall('read_file');
// Add debugging information
if (!foundToolCall || !result.includes('hello world')) {
printDebugInfo(rig, result, {
'Found tool call': foundToolCall,
'Contains hello world': result.includes('hello world'),
});
}
expect(
foundToolCall,
'Expected to find a read_file tool call',
).toBeTruthy();
assertModelHasOutput(result);
checkModelOutputContent(result, {
expectedContent: 'hello world',
testName: 'File read test',
});
});
it('should be able to write a hello world message to a file', async () => {
await rig.setup('should be able to write a hello world message to a file', {
settings: { tools: { core: ['write_file', 'replace', 'read_file'] } },
});
rig.createFile('test.txt', '');
const result = await rig.run({
args: `edit test.txt to have a hello world message`,
});
// Accept multiple valid tools for editing files
const foundToolCall = await rig.waitForAnyToolCall([
'write_file',
'edit',
'replace',
]);
// Add debugging information
if (!foundToolCall) {
printDebugInfo(rig, result);
}
expect(
foundToolCall,
'Expected to find a write_file, edit, or replace tool call',
).toBeTruthy();
assertModelHasOutput(result);
checkModelOutputContent(result, { testName: 'File write test' });
const fileContent = rig.readFile('test.txt');
// Add debugging for file content
if (!fileContent.toLowerCase().includes('hello')) {
const writeCalls = rig
.readToolLogs()
.filter((t) => t.toolRequest.name === 'write_file')
.map((t) => t.toolRequest.args);
printDebugInfo(rig, result, {
'File content mismatch': true,
'Expected to contain': 'hello',
'Actual content': fileContent,
'Write tool calls': JSON.stringify(writeCalls),
});
}
expect(
fileContent.toLowerCase().includes('hello'),
'Expected file to contain hello',
).toBeTruthy();
// Log success info if verbose
if (process.env['VERBOSE'] === 'true') {
console.log('File written successfully with hello message.');
}
});
it('should correctly handle file paths with spaces', async () => {
await rig.setup('should correctly handle file paths with spaces', {
settings: { tools: { core: ['write_file', 'read_file'] } },
});
const fileName = 'my test file.txt';
const result = await rig.run({
args: `write "hello" to "${fileName}" and then stop. Do not perform any other actions.`,
});
const foundToolCall = await rig.waitForToolCall('write_file');
if (!foundToolCall) {
printDebugInfo(rig, result);
}
expect(
foundToolCall,
'Expected to find a write_file tool call',
).toBeTruthy();
const newFileContent = rig.readFile(fileName);
expect(newFileContent).toBe('hello');
});
it('should perform a read-then-write sequence', async () => {
await rig.setup('should perform a read-then-write sequence', {
settings: { tools: { core: ['read_file', 'replace', 'write_file'] } },
});
const fileName = 'version.txt';
rig.createFile(fileName, '1.0.0');
const prompt = `Read the version from ${fileName} and write the next version 1.0.1 back to the file.`;
const result = await rig.run({ args: prompt });
await rig.waitForTelemetryReady();
const toolLogs = rig.readToolLogs();
const readCall = toolLogs.find(
(log) => log.toolRequest.name === 'read_file',
);
const writeCall = toolLogs.find(
(log) =>
log.toolRequest.name === 'write_file' ||
log.toolRequest.name === 'replace',
);
if (!readCall || !writeCall) {
printDebugInfo(rig, result, { readCall, writeCall });
}
expect(readCall, 'Expected to find a read_file tool call').toBeDefined();
expect(
writeCall,
'Expected to find a write_file or replace tool call',
).toBeDefined();
const newFileContent = rig.readFile(fileName);
expect(newFileContent).toBe('1.0.1');
});
it.skip('should replace multiple instances of a string', async () => {
rig.setup('should replace multiple instances of a string');
const fileName = 'ambiguous.txt';
const fileContent = 'Hey there, \ntest line\ntest line';
const expectedContent = 'Hey there, \nnew line\nnew line';
rig.createFile(fileName, fileContent);
const result = await rig.run({
args: `rewrite the file ${fileName} to replace all instances of "test line" with "new line"`,
});
const validTools = ['write_file', 'edit'];
const foundToolCall = await rig.waitForAnyToolCall(validTools);
if (!foundToolCall) {
printDebugInfo(rig, result, {
'Tool call found': foundToolCall,
'Tool logs': rig.readToolLogs(),
});
}
expect(
foundToolCall,
`Expected to find one of ${validTools.join(', ')} tool calls`,
).toBeTruthy();
const toolLogs = rig.readToolLogs();
const successfulEdit = toolLogs.some(
(log) =>
validTools.includes(log.toolRequest.name) && log.toolRequest.success,
);
if (!successfulEdit) {
console.error(
`Expected a successful edit tool call (${validTools.join(', ')}), but none was found.`,
);
printDebugInfo(rig, result);
}
expect(
successfulEdit,
`Expected a successful edit tool call (${validTools.join(', ')})`,
).toBeTruthy();
const newFileContent = rig.readFile(fileName);
if (newFileContent !== expectedContent) {
printDebugInfo(rig, result, {
'Final file content': newFileContent,
'Expected file content': expectedContent,
'Tool logs': rig.readToolLogs(),
});
}
expect(newFileContent).toBe(expectedContent);
});
it('should fail safely when trying to edit a non-existent file', async () => {
await rig.setup(
'should fail safely when trying to edit a non-existent file',
{ settings: { tools: { core: ['read_file', 'replace'] } } },
);
const fileName = 'non_existent.txt';
const result = await rig.run({
args: `In ${fileName}, replace "a" with "b"`,
});
await rig.waitForTelemetryReady();
const toolLogs = rig.readToolLogs();
const readAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'read_file',
);
const writeAttempt = toolLogs.find(
(log) => log.toolRequest.name === 'write_file',
);
const successfulReplace = toolLogs.find(
(log) => log.toolRequest.name === 'replace' && log.toolRequest.success,
);
// The model can either investigate (and fail) or do nothing.
// If it chose to investigate by reading, that read must have failed.
if (readAttempt && readAttempt.toolRequest.success) {
console.error(
'A read_file attempt succeeded for a non-existent file when it should have failed.',
);
printDebugInfo(rig, result);
}
if (readAttempt) {
expect(
readAttempt.toolRequest.success,
'If model tries to read the file, that attempt must fail',
).toBe(false);
}
// CRITICAL: Verify that no matter what the model did, it never successfully
// wrote or replaced anything.
if (writeAttempt) {
console.error(
'A write_file attempt was made when no file should be written.',
);
printDebugInfo(rig, result);
}
expect(
writeAttempt,
'write_file should not have been called',
).toBeUndefined();
if (successfulReplace) {
console.error('A successful replace occurred when it should not have.');
printDebugInfo(rig, result);
}
expect(
successfulReplace,
'A successful replace should not have occurred',
).toBeUndefined();
// Final verification: ensure the file was not created.
const filePath = path.join(rig.testDir!, fileName);
const fileExists = existsSync(filePath);
expect(fileExists, 'The non-existent file should not be created').toBe(
false,
);
});
});
================================================
FILE: integration-tests/flicker-detector.max-height.responses
================================================
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"{\n \"reasoning\": \"The user is asking for a simple piece of information ('a fun fact'). This is a direct, bounded request with low operational complexity and does not require strategic planning, extensive investigation, or debugging.\",\n \"model_choice\": \"flash\"\n}"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":1173,"candidatesTokenCount":59,"totalTokenCount":1344,"promptTokensDetails":[{"modality":"TEXT","tokenCount":1173}],"thoughtsTokenCount":112}}}
{"method":"generateContentStream","response":[{"candidates":[{"content":{"parts":[{"thought":true,"text":"**Locating a fun fact**\n\nI'm now searching for a fun fact using the web search tool, focusing on finding something engaging and potentially surprising. The goal is to provide a brief, interesting piece of information.\n\n\n"}],"role":"model"},"index":0}],"usageMetadata":{"promptTokenCount":12226,"totalTokenCount":12255,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12226}],"thoughtsTokenCount":29}},{"candidates":[{"content":{"parts":[{"thoughtSignature":"CikB0e2Kb1vYSbIdmBfclWY7z4mOZgPxUGi3CtNXYYV9CSmG+SpVXZZkmQpZAdHtim9HVruyrUZZcHKDvIfn3j6/zLMgepC4Pqd79pG641PkPJnnCqEfVFRxmE2NX3Tj2lwRhtuIYT9Cc3CfvWGjbuuvwzynMCApxpIvxdXac/fXJYeRHTsKQQHR7Ypv6eOvWUFUTRGm1x29v8ZnGjtudG31H/Dgc65Y47c594ZJfX9RqJJil0I52Bxsm8UQ74rbARqwT7zYEbNO","functionCall":{"name":"google_web_search","args":{"query":"fun fact"}}}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":12226,"candidatesTokenCount":17,"totalTokenCount":12272,"promptTokensDetails":[{"modality":"TEXT","tokenCount":12226}],"thoughtsTokenCount":29}}]}
{"method":"generateContent","response":{"candidates":[{"content":{"parts":[{"text":"Here's a fun fact: A day on Venus is longer than a year on Venus. It takes approximately 243 Earth days for Venus to rotate once on its axis, while its orbit around the Sun is about 225 Earth days."}],"role":"model"},"finishReason":"STOP","groundingMetadata":{"searchEntryPoint":{"renderedContent":"\n