Repository: RhysSullivan/executor Branch: main Commit: 0309aa49e526 Files: 459 Total size: 13.6 MB Directory structure: gitextract_p4l6ztj_/ ├── .changeset/ │ ├── README.md │ ├── config.json │ ├── eleven-towns-brush.md │ ├── fiery-pianos-argue.md │ ├── nice-bananas-hammer.md │ ├── pre.json │ ├── rude-shrimps-teach.md │ └── two-vans-attend.md ├── .executor/ │ └── executor.jsonc ├── .github/ │ └── workflows/ │ ├── publish-executor-package.yml │ └── release.yml ├── .gitignore ├── .oxlintrc.jsonc ├── ARCHITECTURE.md ├── PLAN.md ├── README.md ├── TRACING.md ├── apps/ │ ├── docs/ │ │ ├── CHANGELOG.md │ │ ├── developer/ │ │ │ ├── adapters-and-formats.mdx │ │ │ ├── cli.mdx │ │ │ ├── codemode.mdx │ │ │ ├── core-model.mdx │ │ │ ├── credentials-and-auth.mdx │ │ │ ├── extending-executor.mdx │ │ │ ├── import-fidelity.mdx │ │ │ ├── mcp.mdx │ │ │ ├── overview.mdx │ │ │ ├── persistence-and-migrations.mdx │ │ │ └── tool-catalog-and-execution.mdx │ │ ├── docs.json │ │ ├── introduction.mdx │ │ └── package.json │ ├── executor/ │ │ ├── CHANGELOG.md │ │ ├── bin/ │ │ │ └── executor │ │ ├── package.json │ │ ├── src/ │ │ │ ├── cli/ │ │ │ │ ├── dev.ts │ │ │ │ ├── interaction-handling.test.ts │ │ │ │ ├── interaction-handling.ts │ │ │ │ ├── main.ts │ │ │ │ ├── pending-interaction-output.test.ts │ │ │ │ ├── pending-interaction-output.ts │ │ │ │ └── runtime-paths.ts │ │ │ ├── distribution/ │ │ │ │ ├── artifact.ts │ │ │ │ ├── distribution.test.ts │ │ │ │ ├── harness.ts │ │ │ │ ├── metadata.ts │ │ │ │ └── publish.ts │ │ │ ├── effect-errors.ts │ │ │ └── server/ │ │ │ ├── server.real-ingestion.test.ts │ │ │ └── server.test.ts │ │ └── tsconfig.json │ └── web/ │ ├── CHANGELOG.md │ ├── package.json │ ├── src/ │ │ ├── components/ │ │ │ ├── code-block.tsx │ │ │ ├── document-panel.tsx │ │ │ ├── icons.tsx │ │ │ ├── loadable.tsx │ │ │ ├── local-mcp-install-card.tsx │ │ │ ├── markdown.tsx │ │ │ ├── shell.tsx │ │ │ ├── source-favicon.tsx │ │ │ ├── source-not-found-state.tsx │ │ │ ├── source-recovery-state.tsx │ │ │ └── ui/ │ │ │ ├── badge.tsx │ │ │ └── button.tsx │ │ ├── dev.ts │ │ ├── frontend.tsx │ │ ├── globals.css │ │ ├── index.html │ │ ├── lib/ │ │ │ ├── schema-display.ts │ │ │ ├── shiki.ts │ │ │ ├── source-favicon.ts │ │ │ └── utils.ts │ │ ├── main.tsx │ │ ├── server.ts │ │ └── views/ │ │ ├── add-source.tsx │ │ ├── home.tsx │ │ ├── json-form.ts │ │ ├── mcp-transport-state.ts │ │ ├── secrets.tsx │ │ ├── source-detail.tsx │ │ ├── source-editor.tsx │ │ └── source-templates.ts │ ├── tsconfig.json │ └── vite.config.ts ├── compose.tracing.yaml ├── examples/ │ ├── mcp-elicitation-demo/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ ├── main.ts │ │ │ └── server.ts │ │ └── tsconfig.json │ └── serve-skills-via-mcp/ │ ├── CHANGELOG.md │ ├── README.md │ ├── package.json │ ├── skills/ │ │ ├── postgres-incident-triage/ │ │ │ ├── SKILL.md │ │ │ ├── references/ │ │ │ │ └── error-codes.md │ │ │ └── scripts/ │ │ │ └── blocked-session-query.sql │ │ └── release-notes-writer/ │ │ ├── SKILL.md │ │ ├── assets/ │ │ │ └── release-template.md │ │ ├── references/ │ │ │ └── style-guide.md │ │ └── scripts/ │ │ └── validate-headings.ts │ ├── src/ │ │ ├── catalog.ts │ │ ├── index.ts │ │ ├── main.ts │ │ ├── server.ts │ │ └── smoke.ts │ └── tsconfig.json ├── knip.json ├── opencode.json ├── package.json ├── packages/ │ ├── auth/ │ │ ├── mcp-oauth/ │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── effect-errors.ts │ │ │ │ └── index.ts │ │ │ └── tsconfig.json │ │ └── oauth2/ │ │ ├── package.json │ │ ├── src/ │ │ │ └── index.ts │ │ └── tsconfig.json │ ├── clients/ │ │ └── react/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.test.tsx │ │ │ └── index.ts │ │ ├── tsconfig.json │ │ └── vitest.config.ts │ ├── dev/ │ │ └── kitchen-sink/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── effect-errors.ts │ │ │ ├── index.ts │ │ │ ├── kitchen-sink.test.ts │ │ │ ├── playground.ts │ │ │ └── source-runtime.test.ts │ │ ├── tsconfig.json │ │ └── vitest.config.ts │ ├── hosts/ │ │ ├── ai-sdk/ │ │ │ ├── CHANGELOG.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── ai.ts │ │ │ │ ├── effect-errors.ts │ │ │ │ ├── example-codemode-inproc.ts │ │ │ │ ├── example-dynamic.ts │ │ │ │ ├── example-static.ts │ │ │ │ ├── example.ts │ │ │ │ └── index.ts │ │ │ └── tsconfig.json │ │ └── mcp/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ ├── paused-result.test.ts │ │ │ └── paused-result.ts │ │ └── tsconfig.json │ ├── kernel/ │ │ ├── core/ │ │ │ ├── CHANGELOG.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── discovery.ts │ │ │ │ ├── effect-errors.ts │ │ │ │ ├── http-request-placements.ts │ │ │ │ ├── index.test.ts │ │ │ │ ├── index.ts │ │ │ │ ├── isomorphic-hash.ts │ │ │ │ ├── json-schema.ts │ │ │ │ ├── schema-types.test.ts │ │ │ │ ├── schema-types.ts │ │ │ │ ├── system-tools.ts │ │ │ │ ├── tool-map.ts │ │ │ │ └── types.ts │ │ │ ├── tsconfig.json │ │ │ └── vitest.config.ts │ │ ├── ir/ │ │ │ ├── README.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── catalog.test.ts │ │ │ │ ├── catalog.ts │ │ │ │ ├── ids.ts │ │ │ │ ├── index.ts │ │ │ │ └── model.ts │ │ │ └── tsconfig.json │ │ ├── runtime-deno-subprocess/ │ │ │ ├── CHANGELOG.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── deno-subprocess-worker.mjs │ │ │ │ ├── deno-worker-process.ts │ │ │ │ ├── index.test.ts │ │ │ │ └── index.ts │ │ │ ├── tsconfig.json │ │ │ └── vitest.config.ts │ │ ├── runtime-quickjs/ │ │ │ ├── CHANGELOG.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── index.test.ts │ │ │ │ └── index.ts │ │ │ └── tsconfig.json │ │ └── runtime-ses/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ └── sandbox-worker.mjs │ │ └── tsconfig.json │ ├── platform/ │ │ ├── control-plane/ │ │ │ ├── CHANGELOG.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── api/ │ │ │ │ │ ├── api.ts │ │ │ │ │ ├── errors.ts │ │ │ │ │ ├── executions/ │ │ │ │ │ │ ├── api.ts │ │ │ │ │ │ └── http.ts │ │ │ │ │ ├── http.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── local/ │ │ │ │ │ │ ├── api.ts │ │ │ │ │ │ └── http.ts │ │ │ │ │ ├── local-context.ts │ │ │ │ │ ├── oauth/ │ │ │ │ │ │ ├── api.ts │ │ │ │ │ │ └── http.ts │ │ │ │ │ ├── payload-schemas.test.ts │ │ │ │ │ ├── policies/ │ │ │ │ │ │ ├── api.ts │ │ │ │ │ │ └── http.ts │ │ │ │ │ ├── sources/ │ │ │ │ │ │ ├── api.ts │ │ │ │ │ │ └── http.ts │ │ │ │ │ └── string-schemas.ts │ │ │ │ ├── client.ts │ │ │ │ ├── index.ts │ │ │ │ ├── runtime/ │ │ │ │ │ ├── __fixtures__/ │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ └── v1.2.3-google-calendar-workspace/ │ │ │ │ │ │ ├── .executor/ │ │ │ │ │ │ │ ├── artifacts/ │ │ │ │ │ │ │ │ └── sources/ │ │ │ │ │ │ │ │ ├── google-calendar/ │ │ │ │ │ │ │ │ │ └── documents/ │ │ │ │ │ │ │ │ │ └── doc_3c24a6731267de91.txt │ │ │ │ │ │ │ │ └── google-calendar.json │ │ │ │ │ │ │ ├── executor.jsonc │ │ │ │ │ │ │ └── state/ │ │ │ │ │ │ │ └── workspace-state.json │ │ │ │ │ │ └── fixture.json │ │ │ │ │ ├── auth/ │ │ │ │ │ │ ├── auth-artifacts.ts │ │ │ │ │ │ ├── auth-leases.ts │ │ │ │ │ │ ├── mcp-auth-provider.ts │ │ │ │ │ │ ├── mcp-oauth.ts │ │ │ │ │ │ ├── oauth-loopback.test.ts │ │ │ │ │ │ ├── oauth-loopback.ts │ │ │ │ │ │ ├── oauth2-pkce.ts │ │ │ │ │ │ ├── provider-grant-lifecycle.ts │ │ │ │ │ │ ├── source-auth-material.test.ts │ │ │ │ │ │ └── source-auth-material.ts │ │ │ │ │ ├── catalog/ │ │ │ │ │ │ ├── catalog-typescript.ts │ │ │ │ │ │ ├── prettier-format.ts │ │ │ │ │ │ ├── schema-type-signature.ts │ │ │ │ │ │ └── source/ │ │ │ │ │ │ ├── reconcile.ts │ │ │ │ │ │ ├── runtime.test.ts │ │ │ │ │ │ ├── runtime.ts │ │ │ │ │ │ ├── snapshot.test.ts │ │ │ │ │ │ ├── sync.ts │ │ │ │ │ │ ├── type-declarations.test.ts │ │ │ │ │ │ └── type-declarations.ts │ │ │ │ │ ├── control-plane-runtime.test.ts │ │ │ │ │ ├── effect-errors.ts │ │ │ │ │ ├── execution/ │ │ │ │ │ │ ├── http.test.ts │ │ │ │ │ │ ├── ir-execution.ts │ │ │ │ │ │ ├── live.test.ts │ │ │ │ │ │ ├── live.ts │ │ │ │ │ │ ├── mcp-resume.test.ts │ │ │ │ │ │ ├── runtime-config.test.ts │ │ │ │ │ │ ├── runtime.ts │ │ │ │ │ │ ├── service.ts │ │ │ │ │ │ ├── state.ts │ │ │ │ │ │ ├── test-http-client.ts │ │ │ │ │ │ └── workspace/ │ │ │ │ │ │ ├── authorization.ts │ │ │ │ │ │ ├── environment.ts │ │ │ │ │ │ ├── local.ts │ │ │ │ │ │ ├── source-catalog.ts │ │ │ │ │ │ └── tool-invoker.ts │ │ │ │ │ ├── fixtures/ │ │ │ │ │ │ ├── google-sheets-discovery.json │ │ │ │ │ │ ├── linear-introspection.json │ │ │ │ │ │ ├── neon-openapi.json │ │ │ │ │ │ └── vercel-openapi.json │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── local/ │ │ │ │ │ │ ├── capture-release-workspace-fixture.ts │ │ │ │ │ │ ├── config-secrets.ts │ │ │ │ │ │ ├── config.test.ts │ │ │ │ │ │ ├── config.ts │ │ │ │ │ │ ├── control-plane-store.test.ts │ │ │ │ │ │ ├── control-plane-store.ts │ │ │ │ │ │ ├── errors.ts │ │ │ │ │ │ ├── installation.test.ts │ │ │ │ │ │ ├── installation.ts │ │ │ │ │ │ ├── operations.ts │ │ │ │ │ │ ├── release-upgrade-fixtures.ts │ │ │ │ │ │ ├── runtime-context.ts │ │ │ │ │ │ ├── secret-material-providers.ts │ │ │ │ │ │ ├── source-artifacts.test.ts │ │ │ │ │ │ ├── source-artifacts.ts │ │ │ │ │ │ ├── storage.ts │ │ │ │ │ │ ├── tools.ts │ │ │ │ │ │ ├── workspace-state.ts │ │ │ │ │ │ └── workspace-sync.ts │ │ │ │ │ ├── policy/ │ │ │ │ │ │ ├── invocation-policy-engine.test.ts │ │ │ │ │ │ ├── invocation-policy-engine.ts │ │ │ │ │ │ ├── operation-errors.ts │ │ │ │ │ │ ├── operations-shared.ts │ │ │ │ │ │ └── policies-operations.ts │ │ │ │ │ ├── sources/ │ │ │ │ │ │ ├── catalog-sync-result.ts │ │ │ │ │ │ ├── executor-tools.ts │ │ │ │ │ │ ├── graphql-tools.test.ts │ │ │ │ │ │ ├── graphql-tools.ts │ │ │ │ │ │ ├── slug.ts │ │ │ │ │ │ ├── source-adapter-fixture-matrix.test.ts │ │ │ │ │ │ ├── source-adapters/ │ │ │ │ │ │ │ ├── google-discovery.test.ts │ │ │ │ │ │ │ ├── google-discovery.ts │ │ │ │ │ │ │ ├── graphql.test.ts │ │ │ │ │ │ │ ├── graphql.ts │ │ │ │ │ │ │ ├── index.ts │ │ │ │ │ │ │ ├── internal.ts │ │ │ │ │ │ │ ├── mcp.test.ts │ │ │ │ │ │ │ ├── mcp.ts │ │ │ │ │ │ │ ├── openapi.ts │ │ │ │ │ │ │ ├── shared.ts │ │ │ │ │ │ │ └── types.ts │ │ │ │ │ │ ├── source-auth-service.ts │ │ │ │ │ │ ├── source-credential-interactions.ts │ │ │ │ │ │ ├── source-definitions.test.ts │ │ │ │ │ │ ├── source-definitions.ts │ │ │ │ │ │ ├── source-discovery.test.ts │ │ │ │ │ │ ├── source-discovery.ts │ │ │ │ │ │ ├── source-inspection.test.ts │ │ │ │ │ │ ├── source-inspection.ts │ │ │ │ │ │ ├── source-names.ts │ │ │ │ │ │ ├── source-store/ │ │ │ │ │ │ │ ├── auth.ts │ │ │ │ │ │ │ ├── config.ts │ │ │ │ │ │ │ ├── deps.ts │ │ │ │ │ │ │ ├── lifecycle.ts │ │ │ │ │ │ │ └── records.ts │ │ │ │ │ │ ├── source-store.test.ts │ │ │ │ │ │ ├── source-store.ts │ │ │ │ │ │ └── sources-operations.ts │ │ │ │ │ └── store.ts │ │ │ │ └── schema/ │ │ │ │ ├── common.ts │ │ │ │ ├── enums.ts │ │ │ │ ├── ids.ts │ │ │ │ ├── index.ts │ │ │ │ ├── models/ │ │ │ │ │ ├── auth/ │ │ │ │ │ │ ├── account.ts │ │ │ │ │ │ ├── index.ts │ │ │ │ │ │ └── principal.ts │ │ │ │ │ ├── auth-artifact.ts │ │ │ │ │ ├── auth-lease.ts │ │ │ │ │ ├── code-migration.ts │ │ │ │ │ ├── credential.ts │ │ │ │ │ ├── execution.ts │ │ │ │ │ ├── local-config.ts │ │ │ │ │ ├── local-installation.ts │ │ │ │ │ ├── policy.ts │ │ │ │ │ ├── provider-auth-grant.ts │ │ │ │ │ ├── secret-material.ts │ │ │ │ │ ├── source-auth-session.ts │ │ │ │ │ ├── source-catalog.ts │ │ │ │ │ ├── source-discovery.ts │ │ │ │ │ ├── source-inspection.ts │ │ │ │ │ ├── source-oauth-client.ts │ │ │ │ │ ├── source.ts │ │ │ │ │ └── workspace-oauth-client.ts │ │ │ │ └── schema.test.ts │ │ │ ├── tsconfig.json │ │ │ └── vitest.config.ts │ │ └── server/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── config.ts │ │ │ ├── effect-errors.ts │ │ │ ├── env.test.ts │ │ │ ├── env.ts │ │ │ ├── index.ts │ │ │ └── tracing.ts │ │ └── tsconfig.json │ └── sources/ │ ├── builtins/ │ │ ├── package.json │ │ ├── src/ │ │ │ └── index.ts │ │ └── tsconfig.json │ ├── core/ │ │ ├── package.json │ │ ├── src/ │ │ │ ├── catalog-fragment.ts │ │ │ ├── catalog-json-schema.ts │ │ │ ├── catalog-shared.ts │ │ │ ├── catalog-sync-result.ts │ │ │ ├── catalog-types.ts │ │ │ ├── catalog.ts │ │ │ ├── composition.ts │ │ │ ├── discovery-models.ts │ │ │ ├── discovery.ts │ │ │ ├── effect-errors.ts │ │ │ ├── index.ts │ │ │ ├── registry.ts │ │ │ ├── shared.ts │ │ │ ├── source-models.ts │ │ │ └── types.ts │ │ └── tsconfig.json │ ├── google-discovery/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── adapter.test.ts │ │ │ ├── adapter.ts │ │ │ ├── catalog.ts │ │ │ ├── discovery.ts │ │ │ ├── document.ts │ │ │ ├── index.ts │ │ │ ├── local-config.ts │ │ │ ├── tools.test.ts │ │ │ ├── tools.ts │ │ │ └── types.ts │ │ └── tsconfig.json │ ├── graphql/ │ │ ├── package.json │ │ ├── src/ │ │ │ ├── adapter.ts │ │ │ ├── catalog.ts │ │ │ ├── discovery.ts │ │ │ ├── graphql-tools.ts │ │ │ ├── index.ts │ │ │ ├── local-config.ts │ │ │ └── provider-data.ts │ │ └── tsconfig.json │ ├── mcp/ │ │ ├── CHANGELOG.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── adapter.ts │ │ │ ├── catalog.ts │ │ │ ├── connection-pool.ts │ │ │ ├── connection.ts │ │ │ ├── discovery.ts │ │ │ ├── elicitation-bridge.ts │ │ │ ├── elicitation-form.test.ts │ │ │ ├── elicitation.url.test.ts │ │ │ ├── index.ts │ │ │ ├── local-config.ts │ │ │ ├── manifest.ts │ │ │ ├── tools.test.ts │ │ │ └── tools.ts │ │ ├── tsconfig.json │ │ └── vitest.config.ts │ └── openapi/ │ ├── CHANGELOG.md │ ├── openapi-extractor-rs/ │ │ ├── .gitignore │ │ ├── Cargo.toml │ │ ├── build-wasm.sh │ │ └── src/ │ │ ├── lib.rs │ │ └── main.rs │ ├── package.json │ ├── src/ │ │ ├── adapter.ts │ │ ├── catalog.ts │ │ ├── definitions.ts │ │ ├── discovery.ts │ │ ├── document.test.ts │ │ ├── document.ts │ │ ├── extraction.real-specs.test.ts │ │ ├── extraction.ts │ │ ├── extractor-wasm.ts │ │ ├── http-serialization.ts │ │ ├── index.ts │ │ ├── local-config.ts │ │ ├── openapi-extractor-wasm/ │ │ │ ├── openapi_extractor.d.ts │ │ │ ├── openapi_extractor.js │ │ │ ├── openapi_extractor_bg.wasm │ │ │ └── openapi_extractor_bg.wasm.d.ts │ │ ├── real-spec-coverage.test.ts │ │ ├── schema-refs.ts │ │ ├── tool-presentation.test.ts │ │ ├── tool-presentation.ts │ │ ├── tools.test.ts │ │ ├── tools.ts │ │ └── types.ts │ ├── tsconfig.json │ └── vitest.config.ts ├── skills-lock.json ├── tools/ │ └── oxlint/ │ ├── README.md │ ├── plugin.mjs │ ├── rules/ │ │ ├── no-async-effect-vitest-tests.mjs │ │ ├── no-cross-workspace-relative-imports.mjs │ │ ├── no-direct-effect-tag-read.mjs │ │ ├── no-effect-run-in-effect-vitest-tests.mjs │ │ ├── no-node-fs-with-effect-imports.mjs │ │ ├── no-raw-effect-fail-errors.mjs │ │ ├── no-workspace-src-imports.mjs │ │ └── no-yield-effect-fail.mjs │ └── workspace-utils.mjs └── turbo.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .changeset/README.md ================================================ # Changesets Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works with multi-package repos, or single-package repos to help you version and publish your code. You can find the full documentation for it [in our repository](https://github.com/changesets/changesets). We have a quick list of common questions to get you started engaging with this project in [our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md). ================================================ FILE: .changeset/config.json ================================================ { "$schema": "https://unpkg.com/@changesets/config@3.1.3/schema.json", "changelog": "@changesets/cli/changelog", "commit": false, "fixed": [], "linked": [], "access": "public", "baseBranch": "main", "updateInternalDependencies": "patch", "ignore": [] } ================================================ FILE: .changeset/eleven-towns-brush.md ================================================ --- "executor": patch --- Auto migrate sources on startup ================================================ FILE: .changeset/fiery-pianos-argue.md ================================================ --- "executor": patch --- Move execution to adapters rather than IR model ================================================ FILE: .changeset/nice-bananas-hammer.md ================================================ --- "executor": patch --- Fix legacy format parsing ================================================ FILE: .changeset/pre.json ================================================ { "mode": "pre", "tag": "beta", "initialVersions": { "@executor/docs": "0.0.0", "executor": "1.2.3", "@executor/mcp-elicitation-demo": "0.0.0", "@executor/web": "0.0.0", "@executor/serve-skills-via-mcp-demo": "0.0.0", "@executor/auth-mcp-oauth": "0.0.0", "@executor/auth-oauth2": "0.0.0", "@executor/react": "0.0.0", "@executor/kitchen-sink": "0.0.0", "@executor/ai-sdk-adapter": "0.0.0", "@executor/executor-mcp": "0.0.0", "@executor/codemode-core": "0.0.0", "@executor/ir": "0.0.0", "@executor/runtime-deno-subprocess": "0.0.0", "@executor/runtime-quickjs": "0.0.0", "@executor/runtime-ses": "0.0.0", "@executor/control-plane": "0.0.0", "@executor/server": "0.0.0", "@executor/source-builtins": "0.0.0", "@executor/source-core": "0.0.0", "@executor/source-google-discovery": "0.0.0", "@executor/source-graphql": "0.0.0", "@executor/source-mcp": "0.0.0", "@executor/source-openapi": "0.0.0" }, "changesets": [ "eleven-towns-brush", "fiery-pianos-argue", "nice-bananas-hammer", "rude-shrimps-teach", "two-vans-attend" ] } ================================================ FILE: .changeset/rude-shrimps-teach.md ================================================ --- "executor": patch --- Fix Google Discovery tool execution for sources stored with discovery document endpoints ================================================ FILE: .changeset/two-vans-attend.md ================================================ --- "executor": patch --- Fix build ================================================ FILE: .executor/executor.jsonc ================================================ { "sources": { "vercel": { "kind": "openapi", "name": "Vercel API", "connection": { "endpoint": "https://api.vercel.com/" }, "binding": { "specUrl": "https://openapi.vercel.sh/", "defaultHeaders": null } } } } ================================================ FILE: .github/workflows/publish-executor-package.yml ================================================ name: Publish Executor run-name: "${{ format('publish executor {0}', github.event_name == 'workflow_dispatch' && inputs.tag || github.ref_name) }}" on: push: tags: - "v*" workflow_dispatch: inputs: tag: description: Git tag to publish required: true type: string permissions: contents: write id-token: write concurrency: group: publish-executor-package-${{ github.ref }} cancel-in-progress: false jobs: publish: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 ref: ${{ github.event_name == 'workflow_dispatch' && format('refs/tags/{0}', inputs.tag) || github.ref }} - name: Setup Bun uses: oven-sh/setup-bun@v2 - name: Setup Node uses: actions/setup-node@v4 with: node-version: 24 registry-url: https://registry.npmjs.org - name: Update npm for trusted publishing run: | npm install -g npm@latest npm --version - name: Install dependencies run: bun install --frozen-lockfile - name: Run executor checks run: | bun run --cwd apps/executor typecheck bun run --cwd apps/executor test - name: Publish package and create release env: GH_TOKEN: ${{ github.token }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} RELEASE_TAG: ${{ github.event_name == 'workflow_dispatch' && inputs.tag || github.ref_name }} run: | export GITHUB_REF_TYPE=tag export GITHUB_REF_NAME="$RELEASE_TAG" export GITHUB_REF="refs/tags/$RELEASE_TAG" if [ -n "${NPM_TOKEN:-}" ]; then export NODE_AUTH_TOKEN="$NPM_TOKEN" fi bun run --cwd apps/executor release:publish ================================================ FILE: .github/workflows/release.yml ================================================ name: Release run-name: "prepare release" on: push: branches: - main permissions: actions: write contents: write id-token: write pull-requests: write concurrency: group: release-${{ github.ref }} cancel-in-progress: false jobs: release: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Setup Bun uses: oven-sh/setup-bun@v2 - name: Setup Node uses: actions/setup-node@v4 with: node-version: 24 - name: Install dependencies run: bun install --frozen-lockfile - name: Create or update release pull request id: changesets uses: changesets/action@v1 with: version: bun run changeset:version commit: Version Packages title: Version Packages createGithubReleases: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Detect release version change if: steps.changesets.outputs.hasChangesets == 'false' id: detect_release run: | before="${{ github.event.before }}" if [ "$before" = "0000000000000000000000000000000000000000" ]; then before="$(git rev-list --max-count=1 HEAD^ 2>/dev/null || true)" fi version="$(node -e "console.log(JSON.parse(require('fs').readFileSync('apps/executor/package.json', 'utf8')).version)")" if [ -n "$before" ] && git cat-file -e "$before:apps/executor/package.json" 2>/dev/null; then previous_version="$(git show "$before:apps/executor/package.json" | node -e "let data = ''; process.stdin.setEncoding('utf8'); process.stdin.on('data', (chunk) => { data += chunk; }); process.stdin.on('end', () => { console.log(JSON.parse(data).version ?? ''); });")" else previous_version="" fi if [ -n "$previous_version" ] && [ "$previous_version" != "$version" ]; then echo "changed=true" >> "$GITHUB_OUTPUT" else echo "changed=false" >> "$GITHUB_OUTPUT" fi echo "version=$version" >> "$GITHUB_OUTPUT" - name: Create and push release tag if: steps.changesets.outputs.hasChangesets == 'false' && steps.detect_release.outputs.changed == 'true' run: | tag="v${{ steps.detect_release.outputs.version }}" if git ls-remote --exit-code --tags origin "refs/tags/$tag" >/dev/null 2>&1; then echo "Tag $tag already exists." exit 0 fi git config user.name "github-actions[bot]" git config user.email "41898282+github-actions[bot]@users.noreply.github.com" git tag "$tag" git push origin "$tag" - name: Trigger publish workflow if: steps.changesets.outputs.hasChangesets == 'false' && steps.detect_release.outputs.changed == 'true' env: GH_TOKEN: ${{ github.token }} run: | tag="v${{ steps.detect_release.outputs.version }}" gh workflow run publish-executor-package.yml --ref "$tag" -f tag="$tag" ================================================ FILE: .gitignore ================================================ # Environment variables .env .env.* !.env.example .env*.local # Vercel .vercel # Next.js .next/ # Turbo .turbo/ .turbo-prune-check/ # Executor .executor-v2/ .executor/* !.executor/executor.json !.executor/executor.jsonc .executor/artifacts/ .executor/state/ .executor/types/ # Cloudflare Workers local state **/.wrangler/ **/.dev.vars # Dependencies **/node_modules/ # TypeScript build info *.tsbuildinfo # Output / build artifacts out dist *.tgz # Rust / Cargo build artifacts **/target/ tools/openapi-extractor-rs/pkg-*/ # Code coverage coverage *.lcov # Logs logs *.log report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json # Caches .eslintcache .cache # IntelliJ based IDEs .idea # Finder (macOS) .DS_Store # Convex local backend state convex_local_backend.sqlite3 convex_local_storage/ # Local OpenAPI sources service state sources/data/*.sqlite sources/data/*.sqlite-shm sources/data/*.sqlite-wal # Composio toolkit scrape outputs sources/composio-scrape/data/ # Dev runner PID file .dev.pids # Misc *.har .chat-links.json .reference/ personal-notes/ .tmp/ executor/.env.executor-push ignored/ ================================================ FILE: .oxlintrc.jsonc ================================================ { "$schema": "./node_modules/oxlint/configuration_schema.json", // Keep the built-in default plugin set explicit so monorepo lint behavior // does not drift when oxlint changes its defaults. "plugins": ["typescript", "oxc", "unicorn"], "categories": { "correctness": "warn", }, "ignorePatterns": [ "**/node_modules/**", "**/.turbo/**", "**/dist/**", "**/coverage/**", "**/.next/**", "**/.mint/**", ], "settings": { "executorMonorepo": { "packageScopes": ["@executor/"], }, }, "jsPlugins": [ { "name": "executor-monorepo", "specifier": "./tools/oxlint/plugin.mjs", }, ], "rules": { // This repo uses Effect.gen heavily. The underlying generator syntax is an // implementation detail, so "no yield" is not a useful signal here. "require-yield": "off", "no-unused-vars": "warn", // Monorepo-specific rules. "executor-monorepo/no-async-effect-vitest-tests": "error", "executor-monorepo/no-cross-workspace-relative-imports": "error", "executor-monorepo/no-direct-effect-tag-read": "error", "executor-monorepo/no-effect-run-in-effect-vitest-tests": "error", "executor-monorepo/no-node-fs-with-effect-imports": "error", "executor-monorepo/no-raw-effect-fail-errors": "error", "executor-monorepo/no-yield-effect-fail": "error", "executor-monorepo/no-workspace-src-imports": "warn", }, "overrides": [ { // JSON Schema conditionals legitimately use a `then` key. "files": [ "packages/platform/control-plane/src/runtime/catalog/source/runtime.ts", ], "rules": { "unicorn/no-thenable": "off", }, }, { // This stack-integrity check intentionally prefers invariant failures in cleanup. "files": ["packages/sources/core/src/catalog-json-schema.ts"], "rules": { "no-unsafe-finally": "off", }, }, { // Generated wasm glue is vendored into the repo and not hand-maintained. "files": [ "packages/sources/openapi/src/openapi-extractor-wasm/openapi_extractor.js", ], "rules": { "no-unused-vars": "off", }, }, ], } ================================================ FILE: ARCHITECTURE.md ================================================ # Architecture This document explains the active v3 architecture at a high level. If `README.md` answers "what is this product and how do I use it?", this file answers "what are the major moving parts and how do they fit together?" ## One-line view `executor` is a local daemonized control plane that turns connected sources into a workspace tool catalog and then runs TypeScript against that catalog, pausing for user interaction whenever a tool or auth flow needs it. ## System shape ```text +-----------------------+ | CLI / executor | | apps/executor | +-----------+-----------+ | | HTTP v +-------------+ +---------+---------+ +----------------+ | Browser UI |-->| local server |<--| MCP clients | | apps/web | | packages/platform | | via /mcp | | | | /server | | | +-------------+ +---------+---------+ +----------------+ | | provides runtime layer v +---------+---------+ | control plane | | packages/platform/ | | control-plane | +----+-----+----+----+ | | | persistence --+ | +-- source auth / discovery / inspection | +-- execution environment resolver | +-- live execution manager v +---------+---------+ | QuickJS sandboxed | | executor runtime | | packages/kernel/ | | runtime-quickjs | | default executor | +--------------------+ ``` ## Design goals reflected in the code The current architecture optimizes for a few specific ideas: - local-first operation with one daemon instead of many disconnected tools - one shared runtime for CLI, browser UI, and MCP access - schema-rich tool usage instead of raw HTTP from prompts - reusable source connections that become workspace-scoped tools - human-in-the-loop execution that can pause and resume cleanly - adapters for multiple source kinds without hardwiring product logic to one protocol ## Major components ### `apps/executor`: installed CLI and daemon manager This is the main user entrypoint. Responsibilities: - exposes commands such as `up`, `down`, `status`, `doctor`, `call`, and `resume` - ensures the local daemon is running before execution - submits TypeScript executions to the local control plane - handles paused interaction flows, including opening browser URLs when needed - includes a few dev-only seed helpers for demo sources Conceptually, the CLI is not the business logic. It is a thin user-facing shell over the local runtime. ### `packages/platform/server`: one local process for API, MCP, and UI This package hosts the actual local server. Responsibilities: - creates the control-plane runtime - mounts the control-plane HTTP API at `/v1` - mounts the `executor` MCP endpoint at `/mcp` - serves the web UI assets for normal browser routes - writes PID metadata for daemon lifecycle management This is an important architectural choice: the API and UI are served by the same local process, so the product behaves like one install rather than a pile of separate services. ### `packages/platform/control-plane`: product core This is the center of the system. It contains the runtime layer, persistence integration, and the business logic for: - local installation bootstrap - accounts, organizations, and workspaces - source discovery and connection - source auth and credential flows - source inspection and tool indexing - secret and credential handling - execution creation, resumption, and state tracking - policy-aware tool invocation If you want to understand the behavior of the product, this is the most important package. ### `packages/kernel/runtime-quickjs`: default code execution runtime This package provides the TypeScript execution environment used by the local product. At a high level it receives: - an executor implementation - a tool catalog - a tool invoker and runs user-authored code against that environment. The default runtime executes code inside a QuickJS WebAssembly sandbox so tool calls stay proxied through the control plane. The workspace can override that in `.executor/executor.jsonc` with `runtime: "quickjs" | "ses" | "deno"`. ### Adapter packages Several packages exist to turn external systems into callable tools: - `packages/kernel/core`: shared tool abstractions, discovery, schemas, and system tools - `packages/sources/mcp`: MCP tool loading and invocation - `packages/sources/openapi`: OpenAPI extraction, manifests, and tool generation - `packages/hosts/mcp`: exposes the local runtime itself as an MCP server - `packages/clients/react`: React hooks and client state wrappers for the local UI These packages are what let the control plane treat multiple source kinds as one logical tool catalog. ## Runtime model ### Local installation bootstrap On first startup, the control plane provisions a local installation automatically. That bootstrap creates: - one local account - one personal organization - one default workspace - one local installation record that points at them This means the product can work out of the box as a local single-user system without requiring an external identity or tenant setup step. ### Single daemon, shared state All main entrypoints talk to the same local daemon: - CLI commands call the local HTTP API - the web app calls the same local HTTP API - MCP hosts talk to the `/mcp` handler exposed by that same process Because of that, these surfaces share: - the same workspace - the same connected sources - the same secrets and credentials - the same execution history and interaction state ## Persistence model The persistence layer is local-file-backed. Default behavior: - workspace config and state are stored in local files Optional behavior: - future cloud backends can plug in behind Effect service boundaries At a high level, the local control plane stores these domains: - installation identity: local installation identity for the workspace - source state: sources, auth sessions, source credential bindings - tool model: tool artifacts and related metadata extracted from sources - secret state: credentials and secret materials - execution state: executions and execution interactions - governance state: policies This is why `executor` can reconnect sources once, inspect them later, and run multiple executions over time without rebuilding everything from scratch on every prompt. ## Source lifecycle The source lifecycle is one of the defining architectural paths in the system. ### 1. Discovery The Add Source flow starts with URL discovery. The discovery service probes a URL and tries, in order, to determine whether it looks like: - an OpenAPI source - a GraphQL source - an MCP source - or an unknown endpoint Discovery also produces metadata such as: - inferred source kind - confidence level - probable auth style - optional spec URL - warnings - potential namespace and transport hints ### 2. Connection and auth Once a source is identified, the auth service drives the connection flow. Depending on the source, that may involve: - connecting immediately with no auth - asking for a bearer token or other credential material - creating an OAuth session and handing the user into a browser flow - persisting auth bindings back to the source The important architecture point is that source connection is not just a CRUD write. It is a runtime workflow that can branch into interactive auth. ### 3. Tool indexing After a source is connected, `executor` materializes a workspace-visible tool model. The exact extraction path depends on the source kind: - OpenAPI sources are transformed from the OpenAPI document into tool manifests and typed operations - GraphQL sources are introspected into callable query and mutation tools - MCP sources are represented through persisted tool artifacts and runtime invocation metadata This indexed representation is what powers search, inspection, and execution without re-parsing everything in the UI. ### 4. Inspection The source inspection service reconstructs a rich inspection bundle for the UI. That bundle includes: - source metadata - namespace and pipeline kind - tool summaries - tool detail payloads - raw document text when available - manifest and definitions JSON when available The UI uses this to show both a tree view of tools and a search-oriented discovery view. ## Execution architecture Execution is the other defining path in the system. ### 1. Create execution An execution begins when the CLI, API, or MCP bridge submits TypeScript code. The control plane creates an execution record with status such as: - `pending` - `running` - `waiting_for_interaction` - `completed` - `failed` ### 2. Resolve the workspace execution environment Before code runs, the runtime builds the execution environment for the current workspace. That environment combines: - built-in system tools from `codemode-core` - `executor` internal tools such as `executor.sources.add` - persisted tools generated from connected OpenAPI, GraphQL, and MCP sources The resolver returns three things: - the SES code executor - the workspace tool catalog - the tool invoker that actually dispatches calls This resolver is the composition point where the whole product becomes one callable tool surface. ### 3. Run TypeScript The code then runs inside the local SES sandbox runtime. The intended calling pattern is: 1. discover a tool by intent 2. inspect its schema if needed 3. call the selected `tools.*` path The runtime is deliberately opinionated here: the product is built around tool calls, not ad hoc `fetch` requests from user code. ### 4. Invoke tools through the control plane When code calls a tool, the control plane decides how to handle it. Possible paths include: - built-in system tool invocation - internal `executor` tool invocation - OpenAPI tool invocation with resolved auth headers - GraphQL tool invocation with resolved auth headers - MCP tool invocation through the MCP connector This is also the point where policy checks and source auth resolution happen. ### 5. Persist results and surface interactions The execution service updates persistent execution state as the run progresses. If a tool call or auth flow needs user interaction, the live execution manager: - creates an execution interaction record - marks the execution as `waiting_for_interaction` - stores enough payload to resume later - waits for a structured elicitation response Once the response arrives, the manager moves the execution back to `running` and the code continues. ## Human interaction model A major architectural feature of `executor` is that interactions are first-class runtime state. This is handled by the live execution manager and the execution interaction tables. That gives the product a clean pause/resume loop instead of forcing every host to improvise its own half-finished approval flow. Interaction types include: - URL-based flows, such as opening a secure credential page or OAuth URL - form-like elicitation where a host can provide a structured response This same idea is used both for direct CLI execution and for the MCP-facing `execute` and `resume` tools. ## API and MCP surfaces ### HTTP API The control-plane HTTP API is mounted under `/v1`. Major groups include: - local installation and secret management - OAuth - organizations and memberships - workspaces - sources - policies - executions The web UI is a client of this API. ### MCP bridge `packages/hosts/mcp` exposes the local runtime as an MCP server. The bridge registers two main tools: - `execute`: run TypeScript against the local runtime - `resume`: continue a paused execution When the MCP host supports managed elicitation, the bridge can drive the interaction loop directly through MCP instead of requiring the caller to implement custom resume handling. ## Frontend architecture The web app lives in `apps/web` and is a React app built with Vite. At a high level: - routes are managed with TanStack Router - data access is wrapped by `@executor/react` - the UI is focused on sources, source inspection, and secrets - production assets are served by the local server - development runs Vite separately while still embedding the same backend behavior The frontend is intentionally thin. It is mostly a presentation layer over the control-plane API. ## Why the architecture is shaped this way The system is trying to solve a specific product problem: - give agents a better way to use tools than pasting giant manifests into prompts - keep auth, credentials, and user interactions in a durable runtime - let many protocols look like one workspace tool surface - make CLI, UI, and MCP hosts all operate on the same local state That is why the architecture keeps converging on the same central idea: `executor` is not just a CLI and not just an MCP server. It is a local runtime that owns source connection, tool indexing, execution, and interaction state. ## Current boundaries A few practical boundaries are worth calling out: - the active implementation is local-first and single-daemon - the current web app is React/Vite, not the older architectures mentioned in planning notes - `legacy/` and `legacy2/` are historical context, not the active runtime - policy infrastructure exists in the core, but the central product loop today is source connection plus execution ## Read next - `README.md` for the product view and usage guidance - `apps/executor/src/cli/main.ts` for the CLI surface - `packages/platform/server/src/index.ts` for how the local server is assembled - `packages/platform/control-plane/src/runtime/` for the core runtime flows ================================================ FILE: PLAN.md ================================================ Executor: Project goal: Build an execution environment for agents to interact with APIs, MCPs, GraphQL, and other services via writing TypeScript. The way things work today is if people want to interact with services, they either add an mcp or they call it through a cli. for mcp servers when someone adds it to their agent, it blows up the context window of tokens. the agent isn't able to call it in an efficient way and it adds a lot of bloat so people have ended up using clis instead. the problem with the cli is people run things in dangerously allowable permissions. along with that you don't have typing and information about what clis are installed, what they can do, et cetera. by creating this typescript code execution environment we're able to handle both those problems. you can read the article code mode to understand deeper the benefits of this approach. the basic premise is that you give the agent the ability to search over what can be called from typescript and write code to call it. it can see the typings properly through typescript. let's look at some examples. let's say the agent wants to list the github issues for a repository. the agent call is executed through an mcp call and it parses typescript code from that. that typescript code will basically just be `tools.discover` and then the query which is github issues list. so `tools.discover("github issues list")` that then returns the callable paths and then the agent writes another line of code. that is await tools.github.issues.list. that executes in a sandbox and it calls a proxy object which makes a fetch to run the call and then that runs that call in the secure environment. that call completes and the results are given to the sandbox and the sandbox resumes. another instance is the case where a tool needs some form of user action. the example that i will give is approvals and if it's a create issue for example, the agent would write `await tools.github.issues.create`. that would then call the proxy object. the proxy object would trigger the interaction and then once the user approves that interaction it resumes. a note on the interaction model: i believe we want to model this off of mcp elicitation which you can see the spec for in references. the reason we want to model it off of mcp elicitation is that for apis for example that will just be a standard approve/deny model but for something like this since we support calling mcp servers those mcp servers could actually try and elicit input for the user. So let's talk about interaction model a little bit. Executor is going to be configurable through Executor, and so you would be able to add a source, for example, by just prompting: "Hey, please add Axiom as a source." What it would do is it would call tools.executor.AddSource, which calls the MCP server. That MCP server matches kind of the web flow where there's information or an action you need to elicit from the user. What that information action looks like is in the OAUTH flow. It's: they have to go sign in somewhere; there's some callback that happens, and then something's created for that. It's details on that, but we get them onto a web page in the browser to sign in. A similar thing would be if a source required an API key. It's the same concept if you open up a web page to securely input that. Current state: We are on v3 of this codebase which is a completely fresh start, the original codebase is in legacy/, the v2 is in legacy2/. Architecture I like: Database: Local-file-backed control plane for local use, with room for future hosted backends Server: - API The web dashboard calls the API via effect-atom. - MCP Executor is configurable via executor. A user can enter the prompt "Please add the https://mcp.notion.com/mcp to my workspace" We use MCP here instead of the API as the user has to perform actions in many cases. An example of this is OAuthing to an MCP server or opening a page to set a secret in. This prevents the secrets from being pasted over a chat Doing MCP here w/ MCP elicitation allows us to not give special treatment to the executor app while also giving a nice UX Ideally the MCP server shares a lot of logic between API Clients: - Web client Cloud product hosted at executor.sh, local one just on localhost This is a next.js app, so we will actually host the api/ in a route handler on it however it's important we implement the API in a standalone package and it just exposes a standard web request / response handler - CLI Connects to either the cloud api endpoint Standalone: - SDK Allows for people to use everything we've built in their own apps, including ingesting API specs / MCPs for calling Runtimes: - Deno - In process - Cloudflare worker runtime Other: - Avoid hardcoding strings in the core libraries, prefer programming to interfaces, this is because it allows people to build custom adapters without having to modify the core - Aim for composability of adapters. For example, the cloud product may eventually store secrets in a hosted backend while still supporting BYO 1Password. This does not need to be implemented today but is worth noting. - This is a fresh start so we can make whatever changes we need. Rough architecture is: Turborepo monorepo, effect-vitest for testing, bun for package manager / running apps (but we leverage Effect wherever possible rather than bun's apis i.e for the server), Next.js for web app References: https://blog.cloudflare.com/code-mode/ - concept we are implementing https://mcp.axiom.co/mcp - MCP server that requires auth https://modelcontextprotocol.io/specification/draft/client/elicitation - How we are going to be handling interactions from the user legacy/ original implementation legacy2 ================================================ FILE: README.md ================================================ # executor https://github.com/user-attachments/assets/11225f83-e848-42ba-99b2-a993bcc88dad `executor` is a local-first execution environment for AI agents. It gives an agent a TypeScript runtime, a discoverable tool catalog, and a single local place to connect external systems such as MCP servers, OpenAPI APIs, and GraphQL APIs. Instead of pasting large MCP manifests into every chat or giving an agent broad shell access, you run code inside `executor` and let it call typed `tools.*` functions. ## Community Join the Discord community: https://discord.gg/eF29HBHwM6 At runtime, `executor` behaves like one local product: - a CLI for starting the runtime and executing code - a local API server - a local web UI for connecting sources, inspecting tools, and managing secrets - an MCP endpoint for hosts that want to drive `executor` through MCP The current codebase lives in `apps/` and `packages/`. Older experiments stay in `legacy/` and `legacy2/`. ## Attribution - [Crystian](https://www.linkedin.com/in/crystian/) provided the npm package name `executor`. - The `codemode` concept in this project is inspired by Cloudflare's [Code Mode announcement](https://blog.cloudflare.com/code-mode/). ## Why this exists `executor` is built around a simple idea: agents should work against a structured tool environment instead of guessing at raw HTTP calls, carrying huge MCP definitions in context, or running arbitrary local commands with broad permissions. In practice that means: - sources are connected once and turned into a reusable workspace tool catalog - the agent discovers tools by intent, inspects schemas, and then calls typed functions - secrets and OAuth flows stay in the local runtime and web UI instead of being pasted into chat - human interaction can pause an execution and resume it cleanly ## Mental model Think of `executor` as a local control plane for agent tool use. 1. You start a local `executor` daemon. 2. You connect sources such as an MCP server, an OpenAPI document, or a GraphQL endpoint. 3. `executor` indexes those sources into a workspace tool catalog. 4. An agent runs TypeScript against that catalog through `executor call` or through the MCP bridge. 5. If a tool needs credentials or user input, execution pauses, opens a local flow, and then resumes. ## What it does today ### Connect external tool sources `executor` currently supports these source types: - `mcp`: remote MCP servers, including transport selection for streamable HTTP or SSE - `openapi`: REST APIs described by an OpenAPI document - `graphql`: GraphQL endpoints that can be introspected into callable tools The add-source flow can: - probe a URL and infer what kind of source it is - infer likely authentication requirements - prompt for credentials when discovery or connection needs them - start OAuth when a source requires it - persist the source and its indexed tool metadata in the local workspace The web app also includes templates for common providers so you can start from real examples instead of filling every field by hand. ### Run agent code against tools The main CLI workflow is `executor call`. The runtime expects the agent to use the built-in discovery workflow: ```ts const matches = await tools.discover({ query: "github issues", limit: 5 }); const path = matches.bestPath; const detail = await tools.describe.tool({ path, includeSchemas: true }); return await tools.github.issues.list({ owner: "vercel", repo: "next.js", }); ``` A few important rules shape the execution model: - write TypeScript, not raw shell pipelines - use `tools.*`, not direct `fetch` - discover first when the exact tool path is not known - inspect schemas before calling complex tools ### Handle credentials and user interaction When a source or tool needs human input, `executor` can pause the execution and create an interaction record. That interaction may ask you to: - open a secure local credential page - complete an OAuth flow in the browser - respond to a structured elicitation from a tool host - resume a paused execution from the CLI This is the core human-in-the-loop behavior that lets `executor` keep secrets and approvals outside the agent's raw context. ### Inspect the connected tool model The web UI is not just a setup surface. It is also where you can inspect what `executor` learned from a source. For each source you can: - browse its tool tree - search for tools by intent - inspect input and output schemas - view generated manifests, definitions, and raw source documents when available - edit source settings and authentication details ## Quick start If you want to use this a package distribution, install it via npm: ```bash npm install -g executor executor up ``` Then either tell your agent to use the CLI or to open the web UI and copy the MCP CLI install command. Then you can run the CLI as `executor`. If you are working from this repository locally, the easiest path is: ```bash bun install bun dev ``` That starts the local runtime. The default base URL is: ```text http://127.0.0.1:8788 ``` From there: 1. Open the web UI in your browser. 2. Add a source from `/sources/add`. 3. If needed, store credentials in `/secrets`. 4. Run TypeScript with `bun run executor call ...`. If you are using a packaged distribution, the command name is simply `executor` instead of `bun run executor`. ## Core CLI commands ```bash executor up executor down executor status --json executor doctor --json executor call --file script.ts executor resume --execution-id exec_123 ``` `executor call` accepts code in three ways: - inline as a positional argument - from `--file` - from standard input with `--stdin` Examples: ```bash executor call 'const matches = await tools.discover({ query: "repo details", limit: 1 }); return matches;' executor call --file script.ts cat script.ts | executor call --stdin executor call --no-open --file script.ts ``` If an execution pauses, resume it with: ```bash executor resume --execution-id exec_123 ``` ## Adding a source There are two main ways to add a source. ### In the web UI Use the Add Source flow to: - paste a URL - run discovery - review the inferred kind, namespace, transport, and auth - connect the source - complete credential or OAuth setup if required This is the easiest path for most users. ### From inside an execution The runtime also exposes `tools.executor.sources.add(...)`, which lets an agent add a source from code. Examples: ```ts return await tools.executor.sources.add({ endpoint: "https://example.com/mcp", name: "Example", namespace: "example", }); ``` ```ts return await tools.executor.sources.add({ kind: "openapi", endpoint: "https://api.github.com", specUrl: "https://raw.githubusercontent.com/github/rest-api-description/main/descriptions/api.github.com/api.github.com.json", name: "GitHub", namespace: "github", }); ``` For HTTP-style sources, `executor` can drive the credential flow for you. ## How execution works At a high level, every execution follows the same loop: 1. `executor` resolves the current local installation and workspace. 2. It builds a tool catalog from built-in tools plus all connected workspace sources. 3. It runs your TypeScript inside the configured sandbox runtime. QuickJS is the default, and `.executor/executor.jsonc` can set `"runtime": "quickjs" | "ses" | "deno"`. 4. Tool calls are dispatched through `executor` rather than directly from your code. 5. If a tool needs interaction, the run pauses and records a pending interaction. 6. Once the interaction is resolved, the execution continues and eventually completes or fails. Example: ```jsonc { "runtime": "ses", "sources": {} } ``` This gives you a stable surface for agent automation: - the agent sees a coherent catalog - connected sources become reusable namespace-based tools - auth stays attached to sources and secret material - the runtime can track execution state instead of losing it inside a one-shot prompt ## Web UI overview The React web app is served from the same local server as the API. Main screens: - `/`: list connected sources in the current local workspace - `/sources/add`: discover and connect new sources - `/sources/:sourceId`: inspect tools, search tools, and browse source artifacts - `/sources/:sourceId/edit`: edit source settings and auth - `/secrets`: create, update, and delete locally stored secrets The UI uses the same control-plane API as the CLI, so both surfaces are operating on the same local runtime state. ## Local-first runtime behavior By default `executor` runs as a single local daemon process. It serves: - `/v1` for the local control-plane API - `/mcp` for the `executor` MCP endpoint - the web UI for normal browser routes Default network location: - host: `127.0.0.1` - port: `8788` Default data locations are OS-aware: - Linux data: `~/.local/share/executor` - Linux runtime state: `~/.local/state/executor/run` - macOS: `~/Library/Application Support/Executor` - Windows: `%LOCALAPPDATA%\Executor` The server also maintains local PID and log files in its runtime directory. ## Persistence and data `executor` persists the local control plane to local files. Persisted concepts include: - local installation identity - connected sources - indexed tool artifacts and related metadata - credentials and secret material bindings - source auth sessions - execution and interaction state - executions and execution interactions - policies On first start, `executor` provisions a local account, a personal organization, and a default workspace automatically. ## Security and trust model `executor` is designed to narrow how agents interact with external systems. Compared with direct shell or raw API usage, the model is intentionally more structured: - tool calls are routed through a controlled runtime - secrets are stored separately from prompt text - OAuth and credential capture happen through local flows - executions can pause for interaction instead of guessing or failing silently - source auth and tool metadata live with the workspace rather than inside each prompt This does not make the system magically risk-free, but it gives the runtime places to enforce policy, collect approvals, and keep sensitive material out of the agent's immediate context. ## Repository layout If you are exploring the repo, these are the directories that matter most: - `apps/executor`: packaged CLI entrypoint and daemon lifecycle commands - `apps/web`: local React web UI - `packages/platform/server`: local HTTP server that serves API, MCP, and UI - `packages/platform/control-plane`: source management, secrets, persistence, execution, and inspection - `packages/kernel/runtime-deno-subprocess`: optional Deno subprocess runtime for TypeScript execution - `packages/kernel/runtime-quickjs`: default QuickJS sandbox runtime for TypeScript execution - `packages/kernel/runtime-ses`: optional SES sandbox runtime for TypeScript execution - `packages/hosts/mcp`: MCP bridge for `execute` and `resume` - `packages/kernel/core` plus `packages/sources/*`: core tool abstractions and first-party source integrations ## Releasing - Add a changeset in any PR that should release: `bun run changeset`. - Merge that PR to `main`. `.github/workflows/release.yml` opens or updates a `Version Packages` release PR for version bumps and changelog updates. - Merge the `Version Packages` PR. The release workflow pushes the matching git tag and dispatches `.github/workflows/publish-executor-package.yml`, which publishes to npm and creates the GitHub release. - Do not edit `apps/executor/package.json` by hand for normal releases. Changesets owns the version. - For a beta train, enter prerelease mode with `bun run release:beta:start`, commit `.changeset/pre.json`, and merge it. Release PRs will then use `-beta.x` versions until you exit with `bun run release:beta:stop`. - `bun run --cwd apps/executor release:publish` remains the publish implementation used by CI. - To build and pack the publish artifact locally without publishing, run `bun run --cwd apps/executor release:publish:dry-run`. - `.github/workflows/publish-executor-package.yml` can also be run manually with a tag input if a publish needs to be retried for an already-created version tag. - One-time npm setup: either configure npm trusted publishing for `RhysSullivan/executor` with the workflow file `.github/workflows/publish-executor-package.yml`, or add a GitHub Actions secret named `NPM_TOKEN` that can publish the `executor` package. - Stable releases use a normal semver like `1.2.3` and publish to npm under `latest`. - Beta releases use a prerelease semver like `1.3.0-beta.1` and publish to npm under `beta`. - When a release should become an upgrade test fixture, capture a real workspace snapshot with `bun run fixture:release:capture -- ...` and commit it under [`packages/platform/control-plane/src/runtime/__fixtures__`](./packages/platform/control-plane/src/runtime/__fixtures__/README.md). ## Project status This repository is explicitly on its third major architecture iteration. - `apps/` and `packages/` are the active implementation - `legacy/` is the original codebase - `legacy2/` is the second generation If you want the system view instead of the product view, read [`ARCHITECTURE.md`](./ARCHITECTURE.md). ================================================ FILE: TRACING.md ================================================ # Local Tracing Start Jaeger locally: ```bash bun run trace:up ``` Run the web/server dev process with tracing enabled: ```bash EXECUTOR_TRACE_ENABLED=1 \ EXECUTOR_TRACE_SERVICE_NAME=executor-local \ EXECUTOR_TRACE_OTLP_ENDPOINT=http://127.0.0.1:4317 \ bun run dev ``` Then open Jaeger: - UI: `http://127.0.0.1:16686/search?service=executor-local` You can also query traces over HTTP: ```bash curl "http://127.0.0.1:16686/api/traces?service=executor-local&limit=20" ``` Useful operations to search for: - `source.connect.http` - `source.catalog.sync` - `graphql.syncCatalog` - `graphql.introspection.fetch` - `graphql.manifest.extract` - `graphql.definitions.compile` - `graphql.operations.build` - `graphql.snapshot.build` Stop Jaeger: ```bash bun run trace:down ``` ================================================ FILE: apps/docs/CHANGELOG.md ================================================ # @executor/docs ## null ================================================ FILE: apps/docs/developer/adapters-and-formats.mdx ================================================ --- title: "Adapters And Formats" description: "How executor supports multiple integration types without hardwiring the core to one protocol." --- Adapters are the main extensibility boundary in executor. They let executor support multiple integration styles without turning the control plane into a long chain of provider-specific branches. ## What an adapter does An adapter owns the format-specific or protocol-specific behavior for a source. That includes: - connect payload shape - binding config serialization and decoding - source validation - source config extraction - materialization - manifest parsing - operation description - invocation The control plane owns the generic lifecycle around that. ## Why adapters exist Executor needs to support integrations that do not all look the same at the source boundary: - OpenAPI specs - GraphQL APIs - MCP servers - future Google Discovery imports - future Postman collections - future snippet bundles If the core storage model were shaped around one of those directly, every new integration type would cause another redesign. Adapters prevent that. ## Adapter vs format It is important not to confuse the adapter with the import format. Examples: - OpenAPI and GraphQL are different adapters even though both import HTTP-shaped API descriptions - MCP is its own adapter because it has distinct session, discovery, and resume semantics This lets executor share a thin catalog model without pretending every source has the same runtime behavior. ## What gets stored Adapters do **not** get to invent arbitrary top-level storage models. They compile into the same canonical layers: - recipe - revision - raw documents - schema bundles - operations That means the control plane can stay generic while adapters remain flexible. ## Why binding config is adapter-owned Adapter-owned binding config is one of the most important anti-churn decisions in the codebase. Instead of adding more and more provider columns to the source model, the adapter carries its own binding payload and version. That makes it much cheaper to evolve formats without reopening the core schema for every field addition. ## Why provider-specific detail still exists Executor does still store some provider-shaped detail in operation metadata and bundle/document kinds. That is intentional, but the direction is: - keep the generic IR thin - keep provider-specific detail in adapter code or typed provider metadata The goal is not to erase differences between formats. The goal is to stop those differences from dominating the core architecture. For the current fidelity tradeoffs and known lossy areas across adapters, see [Import Fidelity](/developer/import-fidelity). ## What a good new adapter should do A good new adapter should: - compile into the canonical recipe/revision/operation model - use adapter-owned binding config - avoid requiring new generic tables or provider columns unless truly necessary - expose stable tool paths and search text - make auth needs explicit through the existing auth slot model If an integration needs a new primitive outside the source/tool model entirely, that is a sign it may not belong as just another adapter. ================================================ FILE: apps/docs/developer/cli.mdx ================================================ --- title: "CLI" description: "How the executor CLI fits into the system and why it stays thin." --- The CLI lives in `apps/executor`. Its job is to be the installed user-facing shell around the local daemon, not the place where product logic is implemented. ## Responsibilities The CLI is responsible for: - starting and stopping the local daemon - checking daemon health and installation state - submitting executions - resuming paused interactions - packaging and distribution concerns for the installed binary ## What it should not own The CLI should not own: - source persistence rules - auth workflows - tool discovery - execution state machines - tool invocation behavior Those belong in the control plane. ## Why this boundary matters This split is intentional. It means the browser UI, CLI, and local MCP entrypoint all talk to the same product core instead of each carrying their own business logic. That keeps behavior consistent across entrypoints and avoids reimplementing source, credential, and execution rules in multiple places. ## How it talks to the rest of the system At a high level, the CLI: 1. ensures the local daemon is available 2. calls the local HTTP API exposed by `packages/platform/server` 3. hands off real work to `packages/platform/control-plane` That makes the CLI a stable shell over a shared local runtime rather than a second product core. ## Why this is flexible Because the CLI is thin, new product capabilities usually do not require redesigning the CLI architecture. Most new work lands in: - control-plane runtime logic - source adapters - codemode packages - server/API surfaces The CLI mostly needs new commands or command wiring on top of those primitives. ================================================ FILE: apps/docs/developer/codemode.mdx ================================================ --- title: "Code Mode" description: "The shared tool abstractions and adapter packages that connect sources to execution." --- The codemode packages are the bridge between imported integrations and executable tools. They provide the shared tool abstractions that both the control plane and runtime can depend on. ## Core role The codemode layer is where executor turns "some external thing" into a discoverable and invokable tool surface. That is why it exists as a separate layer instead of being buried directly inside each product surface. ## Main packages ### `packages/kernel/core` This package holds the common tool model: - tool definitions - tool catalogs - tool invokers - discovery helpers - schema-aware tool contracts - shared system-tool concepts This is the main abstraction boundary that other layers build on. ### `packages/sources/openapi` This package handles OpenAPI-specific extraction and tool generation. Its job is to understand OpenAPI and compile it into the shared codemode tool model, not to own workspace persistence or auth lifecycle. ### `packages/sources/mcp` This package handles MCP-specific tool discovery and invocation behavior and adapts remote MCP tools into the same shared tool model. ## Why this layer exists Without the codemode layer, the control plane would have to understand every source format and execution detail directly. Instead, executor keeps the core model broad and pushes format-specific transformation logic into focused packages. That is what makes the system extensible: - new adapters can target the same internal tool model - the runtime can execute against one logical tool surface - the same abstractions can be reused outside the control plane, including SDK-style consumers ## How it fits with the control plane The control plane owns: - bindings - credentials - revisions and materialization - tool indexing - policy and invocation lifecycle The codemode layer owns: - how external formats become tools - how those tools are described - how format-specific invocation helpers are implemented That boundary is what keeps the architecture adapter-based instead of turning the control plane into one large protocol-specific switch statement. ================================================ FILE: apps/docs/developer/core-model.mdx ================================================ --- title: "Core Model" description: "The main data and runtime concepts in executor today." --- The most important thing to understand about executor is that it does **not** model the system as one flat `source` row. The current model has a few separate concepts that each exist for a different reason. ## The primary concepts ### Source binding A source is the workspace-local installation record. It answers: - what is this integration called in this workspace? - is it enabled? - what namespace should its tools use? - what install-local config does it have? This is why a source carries local concerns like name, namespace, enabled state, binding config, and status. ### Recipe A recipe is the shareable integration identity. It answers: - what integration definition is this? - what adapter produced it? - what broad catalog kind does it belong to? A recipe is meant to converge across workspaces when the underlying integration definition is the same. ### Revision / materialization A recipe revision is the compiled result of an import. It answers: - what source config produced this compiled shape? - what manifest, documents, schema bundles, and operations belong to it? This is the durable execution artifact layer. ### Operation IR Operations are the canonical callable tool index. They answer: - what tools exist? - how should they be searched and described? - how does invocation find the right persisted operation quickly? The operation layer exists because raw upstream documents are not a good direct query surface for search, policy, and invocation lookup. ### Credential Credentials are private auth bindings. They answer: - who can authenticate for this binding? - for which slot? - using what auth kind and secret refs? Credentials are actor-scoped by default because two users in the same workspace may connect different identities to the same source. ## Why the split matters This split fixes a few old problems: - shareable definition is no longer confused with workspace-local installation - auth is no longer implicitly one-token-per-source - materialized tool content is no longer stored directly under one workspace source - provider-specific import details do not need to become universal top-level columns ## Binding config Source-specific connection config now lives in adapter-owned binding payloads: - `bindingVersion` - `binding` That means the core model does not need built-in fields forever for every provider-specific connection knob. Examples: - OpenAPI binding can carry `specUrl` and `defaultHeaders` - MCP binding can carry `transport`, `queryParams`, and `headers` - internal bindings can be empty The adapter owns serialization, decoding, defaults, and validation for that payload. ## Catalog kinds vs adapters Executor separates broad catalog kinds from adapter keys. Catalog kinds: - `imported` - `internal` Adapters are more specific: - `openapi` - `graphql` - `mcp` - `google_discovery` - future `postman_collection` - future `snippet_bundle` The important distinction is that execution now belongs to adapters. The shared catalog stays broad and stable, while connect/auth/invoke behavior is described through adapter strategies instead of one top-level runtime family enum. ## Why this is the right shape The current model is designed so that new support usually means: - add or evolve an adapter - compile into the same recipe/revision/operation layers and not: - redesign how source persistence fundamentally works That is the main goal of the current architecture. ================================================ FILE: apps/docs/developer/credentials-and-auth.mdx ================================================ --- title: "Credentials And Auth" description: "How executor models auth, interactive flows, and actor-scoped credential ownership." --- Auth in executor is not treated as a side detail on the source row. It is modeled as its own layer because auth has different ownership and lifecycle semantics from source bindings and recipe materialization. ## Auth ownership Credentials are scoped to: - workspace - source binding - actor - slot This is important because different users in the same workspace may connect different identities to the same source. ## Auth slots Executor distinguishes where auth is used. Important current slots: - `runtime` - `import` Why this matters: - some integrations use the same auth for both - some need auth only to fetch or generate the materialization - some may eventually need setup/bootstrap-specific auth This is why source auth is more than a single token field. ## Static credentials The common case today is still static auth backed by secret references. Examples: - bearer token - OAuth access token - OAuth refresh token The control plane stores secret refs, not raw secret values inline on the source or credential rows. ## Interactive auth sessions Interactive auth, such as OAuth flows, is modeled separately from stored credentials. That separation exists because: - a pending auth flow has state and lifecycle of its own - it may be tied to an interaction or execution - it is not the same thing as a resolved credential This is why executor has auth sessions and credentials as separate concepts. ## Shareable config vs private state Project config under `.executor/executor.jsonc` is intended to stay portable and committable. Private auth state does not belong in the workspace tree: - actor-scoped credentials - OAuth sessions - refresh leases - secret material That state lives in the local control-plane store under the user's home-scoped executor state instead of the project `.executor/state` directory. ## Why actor scope matters The old one-token-per-source assumption does not hold once: - multiple users share a workspace - each user connects their own Google account, GitHub account, or other external identity Actor-scoped ownership is the durable fix for that. ## The next auth primitive One important extension area is dynamic credentials. Static secret refs are not always enough for integrations that: - derive short-lived auth by calling login or refresh endpoints - need app-specific runtime headers - require token renewal logic The likely next auth-layer primitive is an auth provider or auth material resolver: - configured with secret refs and stable inputs - produces runtime auth material on demand - caches it with expiry metadata That is broader than sources and should be reusable by future source adapters, tool providers, and code bundles. ## Why auth is separated from recipes Recipes are shareable integration definitions. Credentials are private installation-specific state. That separation is important both for correctness and for shareability: - a recipe can be portable - a source binding can be local - credentials stay private This is the main reason auth is not modeled as part of the shareable recipe layer. ================================================ FILE: apps/docs/developer/extending-executor.mdx ================================================ --- title: "Extending Executor" description: "How to add new source support without breaking the current architecture." --- The best way to extend executor is to respect the current boundaries instead of stretching the core model. ## First question: is it really a source? Not every new integration should be modeled as a source. Use the source model when the thing is: - a persistent external system - with install-local configuration - with auth state - that materializes into callable tools If the new thing is really: - a local tool library - a package bundle - a browser automation runtime then it may need a different top-level primitive instead of being shoved into the source model. ## If it is a source, add an adapter The happy path for a new integration is: 1. define the adapter key, catalog kind, and strategies 2. define the connect and binding config shape 3. materialize into documents, schema bundles, and operations 4. implement invocation 5. add tests against the canonical model The adapter should absorb provider-specific logic so the core does not have to. ## What a new adapter should not do Avoid these patterns: - adding new provider-specific columns to generic tables because one adapter wants them - storing expanded presentation output as canonical truth - bypassing the operation layer and invoking directly from raw documents - treating auth as provider-specific ad hoc fields on the source row Those are the patterns that lead back to churn. ## The standard for a good extension A good new adapter should make the core system feel boring. That means: - persistence still looks like recipe/revision/operations - auth still flows through the same credential/session primitives - discovery and inspection still work through the same catalog ideas - execution still delegates through the same tool and adapter boundaries If a new adapter needs deep new behavior in all of those places, that is usually a sign it may be a new primitive, not just a new adapter. ## What feels like the next natural extensions The current architecture is well-suited to future additions like: - Google Discovery imports - Postman collection imports - snippet bundles - richer auth material providers Those should mostly be adapter and auth-layer work, not another persistence redesign. ## Why this is the best setup The current setup is not trying to be abstract for its own sake. It is trying to keep the codebase in a state where: - core concepts stay stable - new support lands as extensions - source support does not require philosophy rewrites every few weeks That is the real reason the system is shaped around adapters, recipes, revisions, operations, and actor-scoped credentials. ================================================ FILE: apps/docs/developer/import-fidelity.mdx ================================================ --- title: "Import Fidelity" description: "What executor preserves, normalizes, and currently loses when importing external formats." --- Executor should normalize syntax, not meaning. That means the import pipeline can simplify surface differences between OpenAPI, GraphQL, Google Discovery, and MCP, but it should not silently erase semantics that change execution, trust, or user understanding. The practical shape is: - keep the raw source document or manifest - keep adapter-native metadata that does not fit the common model cleanly - promote execution-relevant semantics into first-class IR ## The fidelity buckets This audit uses four buckets: - `Raw source`: the original imported document or manifest - `Adapter/native`: typed provider data or native blobs attached during import - `IR`: first-class catalog symbols and capability metadata - `UI today`: what current inspection surfaces directly Current inspection is still narrow. It mostly shows projected schemas plus `tool.executable.native[0]`, not all preserved native blobs or capability-native metadata. ## OpenAPI | Source detail | Raw source | Adapter/native | IR | UI today | Notes | | --- | --- | --- | --- | --- | --- | | Operation auth requirements and security schemes | Yes | No | No | No | Operation `security` is not promoted; imported capabilities currently use `auth: none`. | | Exact response variants, status codes, and headers | Yes | Partial | No | Partial | Import chooses one preferred response, then projects one synthetic `2XX` response set. | | Parameter serialization (`style`, `explode`, `allowReserved`, content-based params) | Yes | Mostly no | No | No | The IR can model these, but the OpenAPI importer currently only takes parameter `schema`. | | Multiple request and response media types | Yes | Partial | Partial | Partial | Import picks a preferred content entry for typing; response docs keep content types only for the chosen response. | | `servers` defaults | Yes | No | No | No | The IR can represent scope server defaults, but OpenAPI import does not populate them. | | Rich schema semantics beyond the importer subset | Yes | Partial | Partial | Partial | Unsupported JSON Schema shapes fall back to `unknown` plus native preservation. | ## GraphQL | Source detail | Raw source | Adapter/native | IR | UI today | Notes | | --- | --- | --- | --- | --- | --- | | Subscription root fields as tools | Yes | Partial | No | No | The manifest records `subscriptionTypeName`, but tool generation only materializes query and mutation fields. | | Full field result shapes for generated field tools | Yes | Partial | Partial | Partial | Field-tool outputs are heuristic projections, not full result types. | | Union and deep nested selections | Yes | Partial | Partial | Partial | Unions degrade to `__typename`; nested selection stops after depth 2 in generated field tools. | | Arbitrary caller-selected field projections | Yes | Partial | No | No | Generated field tools are compiled with fixed selections; the raw `request` tool is the escape hatch. | | Deprecation reasons | Yes | Partial | Partial | No | GraphQL schemas emit `deprecated: true` plus `x-deprecationReason`, but the generic schema importer only keeps `deprecated` as first-class shape metadata. | GraphQL is a special case: the main loss is not the raw source being discarded, but the generated field-tool projection being intentionally narrower than the schema. That tradeoff is reasonable for discoverability, but only if the system continues to preserve and expose the raw request path. ## Google Discovery | Source detail | Raw source | Adapter/native | IR | UI today | Notes | | --- | --- | --- | --- | --- | --- | | OAuth scope descriptions | Yes | Partial | No | No | Document-level scope descriptions are extracted, but the IR security scheme rewrites scopes to `{ scope: scope }`. | | Rich media upload metadata | Yes | Partial | Partial | Partial | Upload support is collapsed to booleans such as `supportsMediaUpload` and `supportsMediaDownload`. | | Service-level metadata (`title`, `description`, `batchPath`, `documentationLink`) | Yes | Partial | No | No | The extracted manifest has these fields, but per-operation provider data does not carry them into the catalog. | | Parameter descriptions, repeated-ness, enums, defaults | Yes | Yes | Mostly yes | Mostly yes | This area is comparatively good. | | Request and response schemas | Yes | Yes | Yes | Yes | Core schema structure is preserved reasonably well. | Google Discovery is less lossy than OpenAPI on request and response typing, but it still loses useful service metadata and the human meaning attached to OAuth scopes. ## MCP | Source detail | Raw source | Adapter/native | IR | UI today | Notes | | --- | --- | --- | --- | --- | --- | | Non-tool MCP resources such as prompts or resources | No | No | No | No | The adapter only discovers `listTools()`. Anything outside the tool list never enters import. | | Extra tool metadata beyond `name`, `description`, `inputSchema` or `parameters`, and `outputSchema` | Partial | Partial | No | No | The manifest schema keeps only a narrow subset of listed tool fields. | | Tool-level safety or effect hints, if present | Partial | No | No | No | Imported MCP operations are currently hardcoded to `effect: action`. | | Tool input and output schemas | Yes | Yes | Yes | Yes | This is the strongest part of the current MCP import. | | Server-level metadata outside the tool list | No | No | No | No | The stored MCP "document" is a synthesized tool manifest, not a richer upstream server description. | MCP is the most structurally lossy adapter today because the discovery step itself only imports tools and already narrows their metadata aggressively. ## Cross-cutting UI caveat Even when data is preserved natively, the current inspection UI often does not show it directly. Today inspection primarily exposes: - projected call and result schemas - capability summary text - the first executable native blob That means "preserved in native data" and "visible to a user" are still different states. ## What should be first-class in IR These semantics are worth promoting into the common model because they affect execution or trust: - auth and security requirements described by the source format - response and status variants - HTTP parameter serialization - upload, download, and streaming traits - GraphQL selection and subscription semantics - source-provided safety or idempotency hints By contrast, many provider-specific labels, service metadata fields, and raw source extras are fine to keep in adapter-native metadata as long as they remain inspectable. ## Recommended priority order ### P0 - OpenAPI auth and security requirements - OpenAPI response variants and status distinctions - Google Discovery scope descriptions - MCP discovery breadth: decide whether executor intends to import only tools or broader MCP surface area ### P1 - OpenAPI parameter serialization - GraphQL subscription modeling - GraphQL field-tool projection metadata so the fixed-selection behavior is explicit - Google Discovery service metadata surfacing - MCP tool metadata preservation beyond the current narrow manifest ### P2 - Better UI fallback to native/provider metadata - Richer diagnostics for intentionally lossy normalization - A fixture-backed audit matrix test per adapter ## The IR boundary The right boundary is: - `raw source` remains the source of truth - `adapter-native metadata` keeps source-specific details close to their format - `IR` stays unified, but it must be lossless for downstream semantics If executor keeps that line clear, a standardized representation is a strength. If executor lets semantically important details live only in hidden native blobs, the standardized representation becomes misleading. ================================================ FILE: apps/docs/developer/mcp.mdx ================================================ --- title: "MCP" description: "How executor uses MCP both as an adapter and as an external interface." --- MCP shows up in executor in two different ways. ## 1. Executor can connect to MCP servers This is the source side of MCP. When a user connects an MCP endpoint, executor treats it as an adapter with MCP-specific connection, discovery, and invocation behavior. The control plane persists the binding and credential state, while MCP-specific execution behavior lives behind the adapter and codemode layers. This is why MCP is modeled as its own adapter instead of being flattened into a generic HTTP execution model. ## 2. Executor can expose itself as an MCP server This is the outward-facing side of MCP. `packages/hosts/mcp` and the `/mcp` route in `packages/platform/server` let external MCP clients talk to the local executor runtime. That means external MCP hosts can: - discover executor tools - trigger executions - resume paused interactions - work against the same local workspace state as the CLI and browser UI ## Why the distinction matters These two roles are related, but they are not the same thing: - inbound MCP means executor is acting as a client to a remote MCP server - outbound MCP means executor is acting as a server for external MCP clients Keeping that distinction clear avoids collapsing very different responsibilities into one vague "MCP integration" concept. ## Where the logic lives Broadly: - control plane owns workspace state, bindings, credentials, and execution lifecycle - `packages/sources/mcp` owns MCP tool loading and invocation helpers - `packages/hosts/mcp` owns the local MCP server surface - `packages/platform/server` mounts the `/mcp` handler ## Why MCP stays flexible in this model Because MCP keeps its own adapter/runtime behavior, executor can support protocol-specific features such as sessions, elicitation, and resume without bending the generic imported-tool model to fit them. That is the main reason MCP is a first-class category in the architecture. ================================================ FILE: apps/docs/developer/overview.mdx ================================================ --- title: "Control Plane Overview" description: "How the control plane is structured today and how the major pieces fit together." --- Executor is built around one central idea: - turn connected systems into a durable, discoverable tool catalog - run TypeScript against that catalog inside a controlled runtime - keep connection state, auth, execution, and inspection in one local control plane ## One-line architecture Executor is a local control plane that: 1. stores connected integrations as sources 2. materializes those sources into callable tools 3. exposes those tools to the runtime, the UI, and MCP clients 4. resolves auth and policy at invocation time ## Major packages ### `apps/executor` The CLI and daemon manager. Responsibilities: - start and stop the local daemon - submit executions - resume paused interactions - seed and inspect sources in development flows ### `packages/platform/server` The local server shell around the control plane. Responsibilities: - mount the HTTP API at `/v1` - mount the local MCP server at `/mcp` - serve the web UI ### `packages/platform/control-plane` The product core. Responsibilities: - source connection and lifecycle - auth and credential state - persistence - tool discovery and inspection - execution management - policy-aware invocation ### Runtime and codemode packages These packages provide the execution and tool abstractions: - `packages/kernel/runtime-ses` - `packages/kernel/core` - `packages/sources/openapi` - `packages/sources/mcp` - `packages/clients/react` ## The mental model The current architecture is easiest to understand if you think of it as an adapter-based compiler pipeline: - adapters ingest external formats and protocols - the control plane stores a canonical internal representation - the runtime executes against that representation That is why executor has strong internal concepts like: - source bindings - recipes and revisions - materialized operations - schema bundles - actor-scoped credentials Those are not UI concepts. They are the durable internal model that lets executor support multiple integration styles without rewriting the core every time. ## What is intentionally centralized Executor intentionally centralizes a few things that many products split across multiple services: - local identity and workspace bootstrap - source connection state - secret material references - tool inspection data - execution history - interaction/resume state This is why the UI, CLI, and local MCP entrypoint can all behave like one system instead of separate tools bolted together. ## Why the architecture is shaped this way The current implementation optimizes for: - local-first operation - strong schema-aware tool usage - reusable connected integrations - minimal provider-specific logic in the control plane core - extensibility through adapters instead of one-off execution paths The rest of the developer docs explain how those goals show up in the current model. ================================================ FILE: apps/docs/developer/persistence-and-migrations.mdx ================================================ --- title: "Persistence And Migrations" description: "How executor stores source state today and how migration logic is intended to work." --- Executor persistence is designed around durable runtime state, not just CRUD storage. That means the database stores: - installation and workspace identity - source bindings - recipes and revisions - raw documents - schema bundles - operations - credentials and auth sessions - executions and interactions ## Why the storage is layered The storage model is layered because these concerns have different reuse and ownership semantics. Examples: - raw documents are provenance - revisions are compiled materializations - operations are the hot-path query surface - source bindings are workspace-local - credentials are private and actor-scoped Trying to collapse those into one table or one document shape causes churn quickly. ## What is normalized vs serialized Executor intentionally uses both relational columns and serialized JSON. Normalized: - ownership - ids and references - search and lookup fields - status and visibility - auth/session ownership Serialized: - adapter-owned binding config - source config blobs - provider metadata - some session payloads This is deliberate. The rule is to normalize what the core system queries and enforces, and serialize what belongs to adapter-specific contracts. ## Migrations philosophy The intended migration model is: - SQL migrations move structural schema into the new shape - one-shot code migrations repair or rebuild data that SQL cannot derive sanely - steady-state runtime paths should not carry compatibility branches forever That last point matters. Compatibility logic belongs in upgrade code, not in normal reads and writes. ## What is still acceptable in migration code One-shot rebuilds are still acceptable when: - the old data shape is structurally valid but not yet in the best compiled form - the rebuild can be derived from already-stored documents - the logic is ledgered and run once What is not acceptable: - provider-specific legacy fallbacks in steady-state runtime logic - carrying old columns indefinitely because one path still reads them ## Why this supports extensibility This persistence shape is meant to let new integrations land in one of two ways: - add a new adapter that compiles into the same source model - add a genuinely new top-level primitive if the thing is not really a source That is how executor avoids turning every new integration into another schema redesign. ================================================ FILE: apps/docs/developer/tool-catalog-and-execution.mdx ================================================ --- title: "Tool Catalog And Execution" description: "How source materialization becomes callable tools and how invocation works." --- Executor turns connected integrations into a workspace tool catalog. That catalog is the thing the runtime actually executes against. The execution sandbox is selected from `.executor/executor.jsonc`: ```jsonc { "runtime": "quickjs" } ``` Supported values are `quickjs` (default), `ses`, and `deno`. ## How tools are produced At a high level: 1. an adapter materializes a source 2. the materialization writes documents, schema bundles, and operations 3. the control plane loads those persisted operations into a workspace tool index 4. the runtime invokes one tool at a time through the control plane This means executor does not rebuild every integration from scratch on every execution. ## Why operations exist as a separate layer The operation layer exists to support: - search - inspection - policy - fast tool lookup - tool description without loading huge raw documents every time Raw upstream documents are still kept, but they are not the best hot-path query surface. ## Schema transport Executor uses compact operation-root schemas plus shared schema bundles. This is better than storing expanded per-tool schema blobs because it avoids: - massive duplicate schema payloads - slow formatting and rendering - treating presentation output as canonical storage The result is: - smaller payloads - more reusable schema graphs - cleaner tool detail responses ## Tool catalog shape The tool catalog is built from: - persisted source tools - built-in executor tools That lets executor present one unified discovery and invocation surface even though the underlying tools come from different places. ## Invocation flow At a high level, invocation works like this: 1. resolve the persisted operation by tool path 2. run policy and approval checks 3. resolve auth material for the correct slot 4. load the manifest and schema bundle needed for the call 5. delegate actual invocation to the adapter This is the key point: - the control plane orchestrates - the adapter performs the protocol-specific call ## Why this is flexible This architecture is flexible because it keeps a stable middle: - the runtime only needs tools and invokers - the control plane only needs the canonical operation and auth model - adapters can vary widely at the source boundary without changing the runtime contract That is what makes it possible to add more source types without constantly rewriting execution logic. ================================================ FILE: apps/docs/docs.json ================================================ { "$schema": "https://mintlify.com/docs.json", "theme": "mint", "name": "Executor", "description": "A local-first execution environment for AI agents.", "colors": { "primary": "#0D9373", "light": "#07C983", "dark": "#0D9373" }, "navigation": { "tabs": [ { "tab": "Product", "groups": [ { "group": "Introduction", "pages": [ "introduction" ] } ] }, { "tab": "Developer", "groups": [ { "group": "Control Plane", "pages": [ "developer/overview", "developer/core-model", "developer/adapters-and-formats", "developer/credentials-and-auth", "developer/tool-catalog-and-execution", "developer/persistence-and-migrations", "developer/extending-executor" ] }, { "group": "CLI", "pages": [ "developer/cli" ] }, { "group": "MCP", "pages": [ "developer/mcp" ] }, { "group": "Code Mode", "pages": [ "developer/codemode" ] } ] } ] }, "footer": { "socials": { "github": "https://github.com/anomalyco/executor", "discord": "https://discord.gg/eF29HBHwM6" } } } ================================================ FILE: apps/docs/introduction.mdx ================================================ --- title: "Introduction" description: "A local-first execution environment for AI agents." --- `executor` is a local-first execution environment for AI agents. It gives an agent a TypeScript runtime, a discoverable tool catalog, and a single local place to connect external systems such as MCP servers, OpenAPI APIs, and GraphQL APIs. This docs site is split into two documentation surfaces: - product-facing docs for how to install and use executor - developer-facing docs for how executor is built and how to extend it The product-facing section will come later. The current docs start with the developer-facing architecture and implementation model. If you are trying to understand the current system shape, start with: - [Control Plane](/developer/overview) - [CLI](/developer/cli) - [MCP](/developer/mcp) - [Code Mode](/developer/codemode) ================================================ FILE: apps/docs/package.json ================================================ { "name": "@executor/docs", "private": true, "type": "module", "scripts": { "dev": "bunx --bun mint dev --no-open --port 3337", "typecheck": "echo 'No typecheck for @executor/docs'", "validate": "bunx --bun mint validate" }, "version": "0.0.0" } ================================================ FILE: apps/executor/CHANGELOG.md ================================================ # executor ## 1.2.4-beta.4 ### Patch Changes - ec5e3a3: Fix Google Discovery tool execution for sources stored with discovery document endpoints ## 1.2.4-beta.3 ### Patch Changes - dc94998: Auto migrate sources on startup ## 1.2.4-beta.2 ### Patch Changes - f0a3802: Fix legacy format parsing ## 1.2.4-beta.1 ### Patch Changes - 5869ddb: Fix build ## 1.2.4-beta.0 ### Patch Changes - 74185a9: Move execution to adapters rather than IR model ## 1.2.3 ### Patch Changes - eda1217: Always request maximal scope for Google Apis ## 1.2.2 ### Patch Changes - 661ed29: Support selecting runtime ## 1.2.1 ### Patch Changes - 329cc41: fix migration - 86d4d4d: package the PGlite runtime assets in the published CLI bundle ## 1.2.0 ### Minor Changes - 7574535: add multiple sources at same time ### Patch Changes - a2ada62: Google workspace support, folder based config - @executor/codemode-core@null - @executor/control-plane@null - @executor/executor-mcp@null - @executor/server@null ## 1.2.0-beta.7 ### Minor Changes - 7574535: add multiple sources at same time ## 1.1.10-beta.6 ### Patch Changes - a2ada62: Google workspace support, folder based config ================================================ FILE: apps/executor/bin/executor ================================================ #!/usr/bin/env bash set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" exec bun "$SCRIPT_DIR/../src/cli/main.ts" "$@" ================================================ FILE: apps/executor/package.json ================================================ { "name": "executor", "version": "1.2.4-beta.4", "description": "Local AI executor with a CLI, local API server, and web UI.", "keywords": [ "executor", "ai", "agent", "cli", "automation", "local-first" ], "homepage": "https://github.com/RhysSullivan/executor", "bugs": { "url": "https://github.com/RhysSullivan/executor/issues" }, "repository": { "type": "git", "url": "git+https://github.com/RhysSullivan/executor.git" }, "license": "MIT", "private": true, "type": "module", "bin": { "executor": "bin/executor" }, "scripts": { "typecheck": "bunx tsc --noEmit -p tsconfig.json", "test": "vitest run", "test:watch": "vitest", "start": "bun src/cli/main.ts", "release:publish:dry-run": "bun src/distribution/publish.ts --dry-run", "release:publish": "bun src/distribution/publish.ts" }, "dependencies": { "@effect/cli": "catalog:", "@effect/platform": "catalog:", "@effect/platform-node": "catalog:", "@executor/codemode-core": "workspace:*", "@executor/control-plane": "workspace:*", "@executor/executor-mcp": "workspace:*", "@executor/server": "workspace:*", "effect": "catalog:" }, "devDependencies": { "@executor/mcp-elicitation-demo": "workspace:*", "@executor/runtime-ses": "workspace:*", "@effect/vitest": "catalog:", "@modelcontextprotocol/sdk": "^1.26.0", "@types/node": "catalog:", "bun-types": "catalog:", "vitest": "catalog:", "zod": "catalog:" } } ================================================ FILE: apps/executor/src/cli/dev.ts ================================================ import { type ControlPlaneClient, WorkspaceIdSchema, } from "@executor/control-plane"; import * as Effect from "effect/Effect"; const readBindingString = (binding: Record, key: string): string | null => typeof binding[key] === "string" ? String(binding[key]) : null; type SeedDemoMcpSourceInput = { client: ControlPlaneClient; workspaceId: string; endpoint: string; name: string; namespace: string; }; type SeedDemoMcpSourceResult = | { action: "noop"; sourceId: string; workspaceId: string; endpoint: string; } | { action: "updated" | "created"; sourceId: string; workspaceId: string; endpoint: string; }; type SeedGithubOpenApiSourceInput = { client: ControlPlaneClient; workspaceId: string; endpoint: string; specUrl: string; name: string; namespace: string; credentialEnvVar?: string; }; export const seedDemoMcpSourceInWorkspace = ( input: SeedDemoMcpSourceInput, ): Effect.Effect => Effect.gen(function* () { const workspaceId = WorkspaceIdSchema.make(input.workspaceId); const existing = yield* input.client.sources.list({ path: { workspaceId, }, }); const existingByName = existing.find( (source) => source.kind === "mcp" && source.name === input.name, ); const expected = { endpoint: input.endpoint, namespace: input.namespace, transport: "streamable-http" as const, }; if ( existingByName !== undefined && existingByName.endpoint === expected.endpoint && existingByName.namespace === expected.namespace && readBindingString(existingByName.binding, "transport") === expected.transport && existingByName.auth.kind === "none" ) { return { action: "noop", sourceId: existingByName.id, workspaceId: input.workspaceId, endpoint: existingByName.endpoint, }; } if (existingByName !== undefined) { const updated = yield* input.client.sources.update({ path: { workspaceId, sourceId: existingByName.id, }, payload: { endpoint: input.endpoint, status: "connected", enabled: true, namespace: input.namespace, binding: { transport: "streamable-http", queryParams: null, headers: null, }, auth: { kind: "none", }, }, }); return { action: "updated", sourceId: updated.id, workspaceId: input.workspaceId, endpoint: updated.endpoint, }; } const created = yield* input.client.sources.create({ path: { workspaceId, }, payload: { name: input.name, kind: "mcp", endpoint: input.endpoint, status: "connected", enabled: true, namespace: input.namespace, binding: { transport: "streamable-http", queryParams: null, headers: null, }, auth: { kind: "none", }, }, }); return { action: "created", sourceId: created.id, workspaceId: input.workspaceId, endpoint: created.endpoint, }; }); export const seedGithubOpenApiSourceInWorkspace = ( input: SeedGithubOpenApiSourceInput, ): Effect.Effect => Effect.gen(function* () { const workspaceId = WorkspaceIdSchema.make(input.workspaceId); const existing = yield* input.client.sources.list({ path: { workspaceId, }, }); const existingByName = existing.find( (source) => source.kind === "openapi" && source.name === input.name, ); const auth = { kind: "bearer" as const, headerName: "Authorization", prefix: "Bearer ", token: { providerId: "env", handle: input.credentialEnvVar ?? "GITHUB_TOKEN", }, }; if ( existingByName !== undefined && existingByName.endpoint === input.endpoint && existingByName.namespace === input.namespace && readBindingString(existingByName.binding, "specUrl") === input.specUrl && JSON.stringify(existingByName.binding.defaultHeaders ?? null) === JSON.stringify(null) && JSON.stringify(existingByName.auth) === JSON.stringify(auth) ) { return { action: "noop", sourceId: existingByName.id, workspaceId: input.workspaceId, endpoint: existingByName.endpoint, }; } if (existingByName !== undefined) { const updated = yield* input.client.sources.update({ path: { workspaceId, sourceId: existingByName.id, }, payload: { endpoint: input.endpoint, status: "connected", enabled: true, namespace: input.namespace, binding: { specUrl: input.specUrl, defaultHeaders: null, }, auth, }, }); return { action: "updated", sourceId: updated.id, workspaceId: input.workspaceId, endpoint: updated.endpoint, }; } const created = yield* input.client.sources.create({ path: { workspaceId, }, payload: { name: input.name, kind: "openapi", endpoint: input.endpoint, status: "connected", enabled: true, namespace: input.namespace, binding: { specUrl: input.specUrl, defaultHeaders: null, }, auth, }, }); return { action: "created", sourceId: created.id, workspaceId: input.workspaceId, endpoint: created.endpoint, }; }); ================================================ FILE: apps/executor/src/cli/interaction-handling.test.ts ================================================ import { describe, expect, it } from "@effect/vitest"; import { decideInteractionHandling } from "./interaction-handling"; describe("interaction-handling", () => { it("keeps URL interactions distinct even when the terminal is non-interactive", () => { expect(decideInteractionHandling({ parsed: { mode: "url", message: "Connect Axiom", url: "https://mcp.axiom.co/authorize", }, isInteractiveTerminal: false, })).toBe("url_paused"); }); it("waits on URL interactions in an interactive terminal", () => { expect(decideInteractionHandling({ parsed: { mode: "url", message: "Connect Axiom", url: "https://mcp.axiom.co/authorize", }, isInteractiveTerminal: true, })).toBe("url_interactive"); }); it("falls back to form pause handling for non-interactive prompts", () => { expect(decideInteractionHandling({ parsed: { mode: "form", message: "Approve tool call", }, isInteractiveTerminal: false, })).toBe("form_paused"); }); }); ================================================ FILE: apps/executor/src/cli/interaction-handling.ts ================================================ import type { ParsedInteractionPayload } from "./pending-interaction-output"; export type InteractionHandling = | "url_interactive" | "url_paused" | "form_interactive" | "form_paused"; export const decideInteractionHandling = (input: { parsed: ParsedInteractionPayload | null; isInteractiveTerminal: boolean; }): InteractionHandling => { if (input.parsed?.mode === "url") { return input.isInteractiveTerminal ? "url_interactive" : "url_paused"; } return input.isInteractiveTerminal ? "form_interactive" : "form_paused"; }; ================================================ FILE: apps/executor/src/cli/main.ts ================================================ import { spawn } from "node:child_process"; import { createRequire } from "node:module"; import { dirname } from "node:path"; import { createInterface } from "node:readline/promises"; import { FileSystem } from "@effect/platform"; import { Args, Command, Options } from "@effect/cli"; import { NodeFileSystem, NodePath, NodeRuntime, } from "@effect/platform-node"; import { EXECUTOR_SOURCES_ADD_HELP_LINES, ExecutionIdSchema, RuntimeExecutionResolverService, createControlPlaneClient, createControlPlaneRuntime, type ControlPlaneClient, type ExecutionEnvelope, type ExecutionInteraction, type ControlPlaneRuntime, } from "@executor/control-plane"; import type { ToolCatalog } from "@executor/codemode-core"; import * as Effect from "effect/Effect"; import * as Schema from "effect/Schema"; import * as Option from "effect/Option"; import * as Cause from "effect/Cause"; import { DEFAULT_SERVER_BASE_URL, DEFAULT_SERVER_HOST, DEFAULT_LOCAL_DATA_DIR, DEFAULT_SERVER_LOG_FILE, DEFAULT_SERVER_PID_FILE, DEFAULT_SERVER_PORT, SERVER_POLL_INTERVAL_MS, SERVER_START_TIMEOUT_MS, runLocalExecutorServer, } from "@executor/server"; import { seedDemoMcpSourceInWorkspace, seedGithubOpenApiSourceInWorkspace, } from "./dev"; import { resolveRuntimeWebAssetsDir, resolveSelfCommand, } from "./runtime-paths"; import { buildPausedExecutionOutput, parseInteractionPayload, } from "./pending-interaction-output"; import { decideInteractionHandling } from "./interaction-handling"; import { executorAppEffectError, type LocalServerReachabilityTimeoutError, localServerReachabilityTimeoutError, } from "../effect-errors"; const toError = (cause: unknown): Error => cause instanceof Error ? cause : new Error(String(cause)); const sleep = (ms: number) => Effect.promise(() => new Promise((resolve) => setTimeout(resolve, ms))); const openUrlInBrowser = (url: string): Effect.Effect => Effect.sync(() => { const cmd = process.platform === "darwin" ? ["open", url] : process.platform === "win32" ? ["cmd", "/c", "start", "", url] : ["xdg-open", url]; try { const child = spawn(cmd[0]!, cmd.slice(1), { detached: true, stdio: "ignore", }); child.on("error", () => undefined); child.unref(); } catch { // Best-effort browser launch only; always leave the URL in stdout. } }).pipe(Effect.catchAll(() => Effect.void)); const promptLine = (prompt: string): Effect.Effect => Effect.tryPromise({ try: async () => { const rl = createInterface({ input: process.stdin, output: process.stdout, }); try { return await rl.question(prompt); } finally { rl.close(); } }, catch: toError, }); const readStdin = (): Effect.Effect => Effect.tryPromise({ try: async () => { let contents = ""; process.stdin.setEncoding("utf8"); for await (const chunk of process.stdin) { contents += chunk; } return contents; }, catch: toError, }); const readCode = (input: { code?: string; file?: string; stdin?: boolean; }): Effect.Effect => Effect.gen(function* () { if (input.code && input.code.trim().length > 0) { return input.code; } if (input.file && input.file.trim().length > 0) { const fs = yield* FileSystem.FileSystem; const contents = yield* fs.readFileString(input.file!, "utf8").pipe( Effect.mapError(toError), ); if (contents.trim().length > 0) { return contents; } } const shouldReadStdin = input.stdin === true || !process.stdin.isTTY; if (shouldReadStdin) { const contents = yield* readStdin(); if (contents.trim().length > 0) { return contents; } } return yield* executorAppEffectError("cli/main", "Provide code as a positional argument, use --file, or pipe code over stdin."); }); const getBootstrapClient = (baseUrl: string = DEFAULT_SERVER_BASE_URL) => createControlPlaneClient({ baseUrl }); const decodeExecutionId = Schema.decodeUnknown(ExecutionIdSchema); const require = createRequire(import.meta.url); const CLI_NAME = "executor"; const CLI_VERSION = (() => { const candidatePaths = [ "../package.json", "../../package.json", ]; for (const candidatePath of candidatePaths) { try { const metadata = require(candidatePath) as { version?: string }; if (metadata.version && metadata.version.trim().length > 0) { return metadata.version; } } catch { // Fall through to the default version below. } } return "0.0.0-local"; })(); const HELP_TOKENS = ["--help", "-h", "help"] as const; const isHelpToken = (value: string | undefined): boolean => value !== undefined && HELP_TOKENS.includes(value as (typeof HELP_TOKENS)[number]); const normalizeCliArgs = (rawArgs: readonly string[]): string[] => { return rawArgs[0] === "run" ? ["call", ...rawArgs.slice(1)] : [...rawArgs]; }; const getCliArgs = (): string[] => normalizeCliArgs(process.argv.slice(2)); const toEffectCliArgv = (args: readonly string[]): string[] => [ process.execPath || CLI_NAME, CLI_NAME, ...args, ]; const buildWorkflowText = (namespaces: readonly string[] = []): string => [ "Execute TypeScript in sandbox; call tools via discovery workflow.", ...(namespaces.length > 0 ? [ "Available namespaces:", ...namespaces.map((namespace) => `- ${namespace}`), ] : []), "Workflow:", '1) const matches = await tools.discover({ query: "", limit: 12 });', "2) const details = await tools.describe.tool({ path, includeSchemas: true });", "3) Call selected tools.(input).", '4) To connect a source, call tools.executor.sources.add(...) for MCP, OpenAPI, or GraphQL APIs.', ...EXECUTOR_SOURCES_ADD_HELP_LINES, "5) If execution pauses for interaction, resume it with `executor resume --execution-id ...`.", "Do not use fetch; use tools.* only.", ].join("\n"); const DEFAULT_RUN_WORKFLOW = buildWorkflowText(); const indentBlock = (value: string, prefix: string = " "): string => value .split("\n") .map((line) => (line.length > 0 ? `${prefix}${line}` : "")) .join("\n"); const formatCauseMessage = (cause: Cause.Cause): string => { const failure = Option.getOrUndefined(Cause.failureOption(cause)); if (failure instanceof Error && failure.message.length > 0) { return failure.message; } if (typeof failure === "string" && failure.length > 0) { return failure; } const defect = Option.getOrUndefined(Cause.dieOption(cause)); if (defect instanceof Error && defect.message.length > 0) { return defect.message; } if (typeof defect === "string" && defect.length > 0) { return defect; } return Cause.pretty(cause).split("\n").find((line) => line.trim().length > 0) ?? "unknown error"; }; const formatCatalogUnavailableMessage = (cause: Cause.Cause): string => { const message = formatCauseMessage(cause); return message === "Error: An error has occurred" ? "Current workspace catalog unavailable." : `Current workspace catalog unavailable: ${message}`; }; const closeRuntime = (runtime: ControlPlaneRuntime) => Effect.tryPromise({ try: () => runtime.close(), catch: toError, }).pipe(Effect.catchAll(() => Effect.void)); const buildRunWorkflowText = ( catalog?: ToolCatalog, ): Effect.Effect => { if (!catalog) { return Effect.succeed(DEFAULT_RUN_WORKFLOW); } return catalog.listNamespaces({ limit: 200 }).pipe( Effect.map((namespaces) => buildWorkflowText( namespaces.length > 0 ? namespaces.map((namespace) => namespace.displayName ?? namespace.namespace) : ["none discovered yet"], ) ), Effect.mapError(toError), ); }; const loadRunWorkflowText = (): Effect.Effect => Effect.acquireUseRelease( createControlPlaneRuntime({ localDataDir: DEFAULT_LOCAL_DATA_DIR, }).pipe(Effect.mapError(toError)), (runtime) => Effect.gen(function* () { const environment = yield* Effect.gen(function* () { const resolveExecutionEnvironment = yield* RuntimeExecutionResolverService; return yield* resolveExecutionEnvironment({ workspaceId: runtime.localInstallation.workspaceId, accountId: runtime.localInstallation.accountId, executionId: ExecutionIdSchema.make("exec_help"), }); }).pipe( Effect.provide(runtime.runtimeLayer), Effect.mapError(toError), ); return yield* buildRunWorkflowText(environment.catalog); }), closeRuntime, ).pipe( Effect.catchAllCause((cause) => Effect.succeed( [ DEFAULT_RUN_WORKFLOW, "", formatCatalogUnavailableMessage(cause), ].join("\n"), ) ), ); const printRootHelp = (workflow: string) => Effect.sync(() => { console.log([ `${CLI_NAME} ${CLI_VERSION}`, "", "USAGE", "", " executor call [code] [--file text] [--stdin] [--base-url text] [--no-open]", " executor resume --execution-id text [--base-url text] [--no-open]", "", "CALL WORKFLOW", "", indentBlock(workflow), "", "COMMANDS", "", " call", " Execute code against the local executor server.", " resume", " Resume a paused execution.", "", "TIP", "", " Run `executor call --help` for more examples.", ].join("\n")); }); const printCallHelp = (workflow: string) => Effect.sync(() => { console.log([ "executor call", "", "USAGE", "", " executor call [code] [--file text] [--stdin] [--base-url text] [--no-open]", "", "DESCRIPTION", "", " Execute code against the local executor server.", "", "WORKFLOW", "", indentBlock(workflow), "", "OPTIONS", "", " [code]", " Inline code to execute.", " --file text", " Read code from a file.", " --stdin", " Read code from stdin.", " --base-url text", " Override the executor server base URL.", " --no-open", " Print interaction URLs without opening a browser.", "", "EXAMPLES", "", ' executor call \'const matches = await tools.discover({ query: "github issues", limit: 5 }); return matches;\'', ' executor call \'const matches = await tools.discover({ query: "repo details", limit: 1 }); const path = matches.bestPath; return await tools.describe.tool({ path, includeSchemas: true });\'', ' executor call \'return await tools.executor.sources.add({ endpoint: "https://example.com/mcp", name: "Example", namespace: "example" });\'', ' executor call \'return await tools.executor.sources.add({ kind: "openapi", endpoint: "https://api.github.com", specUrl: "https://raw.githubusercontent.com/github/rest-api-description/main/descriptions/api.github.com/api.github.com.json", name: "GitHub", namespace: "github" });\'', " cat script.ts | executor call --stdin", " executor call --file script.ts", " executor call --no-open --file script.ts", " executor resume --execution-id exec_123", ].join("\n")); }); const helpOverride = (): Effect.Effect | null => { const args = getCliArgs(); if (args.length === 0 || (args.length === 1 && isHelpToken(args[0]))) { return loadRunWorkflowText().pipe(Effect.flatMap(printRootHelp)); } if (args[0] === "call" && args.length === 2 && isHelpToken(args[1])) { return loadRunWorkflowText().pipe(Effect.flatMap(printCallHelp)); } return null; }; const getLocalAuthedClient = (baseUrl: string = DEFAULT_SERVER_BASE_URL) => Effect.gen(function* () { const bootstrapClient = yield* getBootstrapClient(baseUrl); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl, accountId: installation.accountId, }); return { installation, client, } as const; }); const isServerReachable = (baseUrl: string) => getBootstrapClient(baseUrl).pipe( Effect.flatMap((client) => client.local.installation({})), Effect.as(true), Effect.catchAll(() => Effect.succeed(false)), ); const getDefaultServerOptions = (port: number = DEFAULT_SERVER_PORT) => { const assetsDir = resolveRuntimeWebAssetsDir(); return { host: DEFAULT_SERVER_HOST, port, localDataDir: DEFAULT_LOCAL_DATA_DIR, pidFile: DEFAULT_SERVER_PID_FILE, ui: assetsDir ? { assetsDir } : undefined, }; }; const startServerInBackground = (port: number) => Effect.scoped( Effect.gen(function* () { const fs = yield* FileSystem.FileSystem; const command = resolveSelfCommand(["__local-server", "--port", String(port)]); yield* fs.makeDirectory(dirname(DEFAULT_SERVER_LOG_FILE), { recursive: true, }).pipe(Effect.mapError(toError)); const logHandle = yield* fs.open(DEFAULT_SERVER_LOG_FILE, { flag: "a", }).pipe(Effect.mapError(toError)); yield* Effect.try({ try: () => { const fd = Number(logHandle.fd); const child = spawn(command[0]!, command.slice(1), { detached: true, stdio: ["ignore", fd, fd], }); child.unref(); }, catch: toError, }); }), ); type LocalServerPidRecord = { pid?: number; port?: number; host?: string; baseUrl?: string; startedAt?: number; logFile?: string; }; const readPidRecord = (): Effect.Effect< LocalServerPidRecord | null, never, FileSystem.FileSystem > => Effect.gen(function* () { const fs = yield* FileSystem.FileSystem; const contents = yield* fs.readFileString(DEFAULT_SERVER_PID_FILE, "utf8").pipe( Effect.catchAll(() => Effect.succeed(null)), ); if (contents === null) { return null; } return JSON.parse(contents) as LocalServerPidRecord; }).pipe(Effect.catchAll(() => Effect.succeed(null))); const isPidRunning = (pid: number): boolean => { try { process.kill(pid, 0); return true; } catch (error) { return error instanceof Error && "code" in error && error.code === "EPERM"; } }; const readServerLogTail = ( logFile: string = DEFAULT_SERVER_LOG_FILE, maxLines: number = 40, maxChars: number = 6000, ): Effect.Effect => Effect.gen(function* () { const fs = yield* FileSystem.FileSystem; const contents = yield* fs.readFileString(logFile, "utf8").pipe( Effect.catchAll(() => Effect.succeed(null)), ); if (contents === null) { return null; } const lines = contents.split(/\r?\n/u).filter((line) => line.length > 0); const tail = lines.slice(-maxLines).join("\n"); return tail.length > maxChars ? tail.slice(-maxChars) : tail; }); const failReachabilityTimeout = (input: { baseUrl: string; expected: boolean; logFile?: string; }): Effect.Effect => Effect.gen(function* () { const logFile = input.logFile ?? DEFAULT_SERVER_LOG_FILE; const logTail = yield* readServerLogTail(logFile); return yield* localServerReachabilityTimeoutError({ baseUrl: input.baseUrl, expected: input.expected, logFile, logTail, }); }); const waitForReachability = (baseUrl: string, expected: boolean) => Effect.gen(function* () { const startedAt = Date.now(); while (Date.now() - startedAt < SERVER_START_TIMEOUT_MS) { const reachable = yield* isServerReachable(baseUrl); if (reachable === expected) { return; } yield* sleep(SERVER_POLL_INTERVAL_MS); } return yield* failReachabilityTimeout({ baseUrl, expected }); }); type LocalServerStatus = { baseUrl: string; reachable: boolean; pidFile: string; pid: number | null; pidRunning: boolean; logFile: string; localDataDir: string; webAssetsDir: string | null; installation: { accountId: string; workspaceId: string; } | null; denoVersion: string | null; }; const renderDenoSandboxDetail = (denoVersion: string | null): string => denoVersion !== null ? `deno ${denoVersion}` : "deno not found (run `executor sandbox` to install)"; const getServerStatus = ( baseUrl: string, ): Effect.Effect => Effect.gen(function* () { const pidRecord = yield* readPidRecord(); const reachable = yield* isServerReachable(baseUrl); const installation = reachable ? yield* getBootstrapClient(baseUrl).pipe( Effect.flatMap((client) => client.local.installation({})), Effect.catchAll(() => Effect.succeed(null)), ) : null; const pid = typeof pidRecord?.pid === "number" ? pidRecord.pid : null; const pidRunning = pid !== null ? isPidRunning(pid) : false; const logFile = pidRecord?.logFile ?? DEFAULT_SERVER_LOG_FILE; const denoVersion = yield* getDenoVersion(); return { baseUrl, reachable, pidFile: DEFAULT_SERVER_PID_FILE, pid, pidRunning, logFile, localDataDir: DEFAULT_LOCAL_DATA_DIR, webAssetsDir: resolveRuntimeWebAssetsDir(), installation, denoVersion, } satisfies LocalServerStatus; }); const renderStatus = (status: LocalServerStatus): string => [ `baseUrl: ${status.baseUrl}`, `reachable: ${status.reachable ? "yes" : "no"}`, `pid: ${status.pid ?? "none"}`, `pidRunning: ${status.pidRunning ? "yes" : "no"}`, `pidFile: ${status.pidFile}`, `logFile: ${status.logFile}`, `localDataDir: ${status.localDataDir}`, `webAssetsDir: ${status.webAssetsDir ?? "missing"}`, `workspaceId: ${status.installation?.workspaceId ?? "unavailable"}`, `denoSandbox: ${renderDenoSandboxDetail(status.denoVersion)}`, ].join("\n"); const getDoctorReport = (baseUrl: string) => getServerStatus(baseUrl).pipe( Effect.map((status) => { const checks = { serverReachable: { ok: status.reachable, detail: status.reachable ? `reachable at ${status.baseUrl}` : `not reachable at ${status.baseUrl}`, }, pidFile: { ok: status.pid !== null, detail: status.pid !== null ? `pid ${status.pid}` : `missing pid file at ${status.pidFile}`, }, process: { ok: status.pidRunning, detail: status.pidRunning ? `pid ${status.pid}` : "no live daemon process recorded", }, database: { ok: status.localDataDir.length > 0, detail: status.localDataDir, }, webAssets: { ok: status.webAssetsDir !== null, detail: status.webAssetsDir ?? "missing bundled web assets", }, installation: { ok: status.installation !== null, detail: status.installation ? `workspace ${status.installation.workspaceId}` : "local installation unavailable", }, denoSandbox: { ok: status.denoVersion !== null, detail: renderDenoSandboxDetail(status.denoVersion), }, } as const; return { ok: Object.values(checks).every((check) => check.ok), status, checks, }; }), ); const printJson = (value: unknown) => Effect.sync(() => { console.log(JSON.stringify(value, null, 2)); }); const printText = (value: string) => Effect.sync(() => { console.log(value); }); const stopServer = (baseUrl: string) => Effect.gen(function* () { const fs = yield* FileSystem.FileSystem; const removePidFile = fs.remove(DEFAULT_SERVER_PID_FILE, { force: true, }).pipe(Effect.ignore); const pidRecord = yield* readPidRecord(); const pid = typeof pidRecord?.pid === "number" ? pidRecord.pid : null; if (pid === null) { yield* removePidFile; return false; } if (!isPidRunning(pid)) { yield* removePidFile; return false; } yield* Effect.sync(() => { process.kill(pid, "SIGTERM"); }); yield* waitForReachability(baseUrl, false).pipe( Effect.catchAll(() => removePidFile.pipe( Effect.ignore, Effect.zipRight(Effect.fail(executorAppEffectError("cli/main", `Timed out stopping local executor server pid ${pid}`))), ), ), ); return true; }); const ensureServer = (baseUrl: string = DEFAULT_SERVER_BASE_URL) => Effect.gen(function* () { const reachable = yield* isServerReachable(baseUrl); if (reachable) { return; } const url = new URL(baseUrl); const port = Number(url.port || DEFAULT_SERVER_PORT); yield* startServerInBackground(port); yield* waitForReachability(baseUrl, true); }); const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null && !Array.isArray(value); type PromptField = { name: string; label: string; description?: string; type: string; required: boolean; enumValues?: readonly unknown[]; }; const getPromptFields = (requestedSchema: Record | undefined): PromptField[] => { if (!requestedSchema || !isRecord(requestedSchema.properties)) { return []; } const required = new Set( Array.isArray(requestedSchema.required) ? requestedSchema.required.filter((value): value is string => typeof value === "string") : [], ); return Object.entries(requestedSchema.properties).flatMap(([name, property]) => { if (!isRecord(property)) { return []; } return [{ name, label: typeof property.title === "string" && property.title.trim().length > 0 ? property.title.trim() : name, description: typeof property.description === "string" && property.description.trim().length > 0 ? property.description.trim() : undefined, type: typeof property.type === "string" ? property.type : "string", required: required.has(name), enumValues: Array.isArray(property.enum) ? property.enum : undefined, }]; }); }; const parsePromptValue = (field: PromptField, raw: string): | { ok: true; value: unknown } | { ok: false; message: string } => { if (field.enumValues && field.enumValues.length > 0) { const normalized = field.enumValues.map((value) => String(value)); if (!normalized.includes(raw)) { return { ok: false, message: `Enter one of: ${normalized.join(", ")}`, }; } } if (field.type === "boolean") { const normalized = raw.trim().toLowerCase(); if (["y", "yes", "true"].includes(normalized)) { return { ok: true, value: true }; } if (["n", "no", "false"].includes(normalized)) { return { ok: true, value: false }; } return { ok: false, message: "Enter yes or no" }; } if (field.type === "number" || field.type === "integer") { const value = Number(raw); if (!Number.isFinite(value)) { return { ok: false, message: "Enter a number" }; } if (field.type === "integer" && !Number.isInteger(value)) { return { ok: false, message: "Enter an integer" }; } return { ok: true, value }; } if (field.type === "object" || field.type === "array") { try { return { ok: true, value: JSON.parse(raw) }; } catch { return { ok: false, message: "Enter valid JSON" }; } } return { ok: true, value: raw }; }; const promptStructuredInteraction = (parsed: { message: string; requestedSchema?: Record; }) => Effect.gen(function* () { const fields = getPromptFields(parsed.requestedSchema); if (fields.length === 0) { return null as Record | null; } yield* Effect.sync(() => { process.stdout.write(`${parsed.message}\n`); }); const content: Record = {}; for (const field of fields) { if (field.description) { yield* Effect.sync(() => { process.stdout.write(`${field.description}\n`); }); } while (true) { const raw = yield* promptLine( `${field.label}${field.required ? "" : " (optional)"}: `, ); const trimmed = raw.trim(); if (trimmed.length === 0) { if (field.required) { return null; } break; } const parsedValue = parsePromptValue(field, trimmed); if (parsedValue.ok) { content[field.name] = parsedValue.value; break; } yield* Effect.sync(() => { process.stdout.write(`${parsedValue.message}\n`); }); } } return content; }); const printUrlInteraction = (input: { message: string; url: string | null; shouldOpen: boolean; }) => Effect.gen(function* () { yield* Effect.sync(() => { process.stdout.write(`${input.message}\n${input.url ?? ""}\n`); }); if (input.shouldOpen && input.url) { yield* openUrlInBrowser(input.url); } }); const executionInteractionMode = (): "live_form" | "detach" => process.stdin.isTTY && process.stdout.isTTY ? "live_form" : "detach"; const promptInteraction = (input: { interaction: ExecutionInteraction; shouldOpenUrls: boolean; }) => Effect.gen(function* () { const parsed = parseInteractionPayload(input.interaction); if (!process.stdin.isTTY || !process.stdout.isTTY || parsed === null) { return null; } if (parsed.mode === "url") { yield* printUrlInteraction({ message: parsed.message, url: parsed.url ?? null, shouldOpen: input.shouldOpenUrls, }); return null; } const structured = yield* promptStructuredInteraction(parsed); if (structured !== null) { return JSON.stringify({ action: "accept", content: structured, }); } const line = yield* promptLine(`${parsed.message} [y/N] `); const normalized = line.trim().toLowerCase(); if (normalized.length === 0) { return null; } if (normalized !== "y" && normalized !== "yes" && normalized !== "n" && normalized !== "no") { return null; } const accepted = normalized === "y" || normalized === "yes"; return JSON.stringify({ action: accepted ? "accept" : "decline", content: { approve: accepted, }, }); }); const waitForExecutionProgress = (input: { client: ControlPlaneClient; workspaceId: ExecutionEnvelope["execution"]["workspaceId"]; executionId: ExecutionEnvelope["execution"]["id"]; pendingInteractionId: ExecutionInteraction["id"]; }) => Effect.gen(function* () { while (true) { yield* sleep(SERVER_POLL_INTERVAL_MS); const next = yield* input.client.executions.get({ path: { workspaceId: input.workspaceId, executionId: input.executionId, }, }); if ( next.execution.status !== "waiting_for_interaction" || next.pendingInteraction === null || next.pendingInteraction.id !== input.pendingInteractionId ) { return next; } } }); const printExecution = (envelope: ExecutionEnvelope) => Effect.sync(() => { const execution = envelope.execution; if (execution.status === "completed") { if (execution.resultJson) { console.log(execution.resultJson); } else { console.log("completed"); } return; } if (execution.status === "failed") { console.error(execution.errorText ?? "Execution failed"); process.exitCode = 1; return; } if (execution.status === "waiting_for_interaction" && envelope.pendingInteraction !== null) { return; } console.log(JSON.stringify({ id: execution.id, status: execution.status, })); }); const seedDemoMcpSource = (input: { baseUrl: string; endpoint: string; name: string; namespace: string; }) => Effect.gen(function* () { yield* ensureServer(input.baseUrl); const { installation, client } = yield* getLocalAuthedClient(input.baseUrl); const result = yield* seedDemoMcpSourceInWorkspace({ client, workspaceId: installation.workspaceId, endpoint: input.endpoint, name: input.name, namespace: input.namespace, }); yield* Effect.sync(() => { console.log(JSON.stringify(result)); }); }); const seedGithubOpenApiSource = (input: { baseUrl: string; endpoint: string; specUrl: string; name: string; namespace: string; credentialEnvVar?: string; }) => Effect.gen(function* () { yield* ensureServer(input.baseUrl); const { installation, client } = yield* getLocalAuthedClient(input.baseUrl); const result = yield* seedGithubOpenApiSourceInWorkspace({ client, workspaceId: installation.workspaceId, endpoint: input.endpoint, specUrl: input.specUrl, name: input.name, namespace: input.namespace, credentialEnvVar: input.credentialEnvVar, }); yield* Effect.sync(() => { console.log(JSON.stringify(result)); }); }); const driveExecution = (input: { client: ControlPlaneClient; workspaceId: ExecutionEnvelope["execution"]["workspaceId"]; envelope: ExecutionEnvelope; baseUrl: string; shouldOpenUrls: boolean; }) => Effect.gen(function* () { let current = input.envelope; while (current.execution.status === "waiting_for_interaction") { const pending = current.pendingInteraction; if (pending === null) { return current; } const parsed = parseInteractionPayload(pending); const handling = decideInteractionHandling({ parsed, isInteractiveTerminal: process.stdin.isTTY && process.stdout.isTTY, }); if (handling === "url_interactive" && parsed?.mode === "url") { yield* printUrlInteraction({ message: parsed.message, url: parsed.url ?? null, shouldOpen: input.shouldOpenUrls, }); current = yield* waitForExecutionProgress({ client: input.client, workspaceId: input.workspaceId, executionId: current.execution.id, pendingInteractionId: pending.id, }); continue; } if (handling === "url_paused") { if (input.shouldOpenUrls && parsed?.mode === "url" && parsed.url) { yield* openUrlInBrowser(parsed.url); } const paused = buildPausedExecutionOutput({ executionId: current.execution.id, interaction: pending, baseUrl: input.baseUrl, shouldOpenUrls: input.shouldOpenUrls, cliName: CLI_NAME, }); yield* Effect.sync(() => { console.log(JSON.stringify(paused)); process.exitCode = 20; }); return current; } if (handling === "form_paused") { const paused = buildPausedExecutionOutput({ executionId: current.execution.id, interaction: pending, baseUrl: input.baseUrl, shouldOpenUrls: input.shouldOpenUrls, cliName: CLI_NAME, }); yield* Effect.sync(() => { console.log(JSON.stringify(paused)); process.exitCode = 20; }); return current; } const responseJson = yield* promptInteraction({ interaction: pending, shouldOpenUrls: input.shouldOpenUrls, }); if (responseJson === null) { const paused = buildPausedExecutionOutput({ executionId: current.execution.id, interaction: pending, baseUrl: input.baseUrl, shouldOpenUrls: input.shouldOpenUrls, cliName: CLI_NAME, }); yield* Effect.sync(() => { console.log(JSON.stringify(paused)); process.exitCode = 20; }); return current; } current = yield* input.client.executions.resume({ path: { workspaceId: input.workspaceId, executionId: current.execution.id, }, payload: { responseJson, interactionMode: executionInteractionMode(), }, }); } return current; }); const serverStartCommand = Command.make( "start", { port: Options.integer("port").pipe(Options.withDefault(DEFAULT_SERVER_PORT)), }, ({ port }) => runLocalExecutorServer(getDefaultServerOptions(port)), ).pipe(Command.withDescription("Start the local executor server")); const serverCommand = Command.make("server").pipe( Command.withSubcommands([serverStartCommand] as const), Command.withDescription("Local server commands"), ); const upCommand = Command.make( "up", { baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), }, ({ baseUrl }) => ensureServer(baseUrl).pipe( Effect.zipRight(getServerStatus(baseUrl)), Effect.flatMap((status) => printText(renderStatus(status))), ), ).pipe(Command.withDescription("Ensure the local executor server is running")); const downCommand = Command.make( "down", { baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), }, ({ baseUrl }) => stopServer(baseUrl).pipe( Effect.flatMap((stopped) => printText(stopped ? "Stopped local executor server." : "Local executor server is not running."), ), ), ).pipe(Command.withDescription("Stop the local executor server")); const statusCommand = Command.make( "status", { baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), json: Options.boolean("json").pipe(Options.withDefault(false)), }, ({ baseUrl, json }) => getServerStatus(baseUrl).pipe( Effect.flatMap((status) => json ? printJson(status) : printText(renderStatus(status))), ), ).pipe(Command.withDescription("Show local executor server status")); const doctorCommand = Command.make( "doctor", { baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), json: Options.boolean("json").pipe(Options.withDefault(false)), }, ({ baseUrl, json }) => getDoctorReport(baseUrl).pipe( Effect.flatMap((report) => json ? printJson(report) : printText([ `ok: ${report.ok ? "yes" : "no"}`, ...Object.entries(report.checks).map(([name, check]) => `${name}: ${check.ok ? "ok" : "fail"} - ${check.detail}`), ].join("\n"))), ), ).pipe(Command.withDescription("Check local executor install and daemon health")); const getDenoVersion = (): Effect.Effect => Effect.gen(function* () { const fs = yield* FileSystem.FileSystem; const configuredDenoExecutable = process.env.DENO_BIN?.trim(); const bundledDenoExecutable = process.env.HOME?.trim() ? `${process.env.HOME.trim()}/.deno/bin/deno` : null; const bundledDenoExists = bundledDenoExecutable === null ? false : yield* fs.exists(bundledDenoExecutable).pipe( Effect.catchAll(() => Effect.succeed(false)), ); const denoExecutable = configuredDenoExecutable || (bundledDenoExists ? bundledDenoExecutable : null) || "deno"; return yield* Effect.tryPromise({ try: () => new Promise((resolveVersion) => { const child = spawn(denoExecutable, ["--version"], { stdio: ["ignore", "pipe", "ignore"], timeout: 5000, }); let stdout = ""; child.stdout?.setEncoding("utf8"); child.stdout?.on("data", (chunk: string) => { stdout += chunk; }); child.once("error", () => resolveVersion(null)); child.once("close", (code) => { if (code !== 0) { resolveVersion(null); return; } const match = /deno\s+(\S+)/i.exec(stdout); resolveVersion(match ? match[1] : null); }); }), catch: () => null, }).pipe(Effect.catchAll(() => Effect.succeed(null))); }).pipe(Effect.catchAll(() => Effect.succeed(null))); const sandboxCommand = Command.make( "sandbox", {}, () => Effect.gen(function* () { const version = yield* getDenoVersion(); if (version !== null) { yield* printText(`Deno sandbox is ready (deno ${version}).`); return; } yield* printText( [ "Deno is not installed.", "", "The executor sandbox requires Deno to run code in a secure, isolated subprocess.", "", "Install Deno:", " curl -fsSL https://deno.land/install.sh | sh", "", "Or see: https://docs.deno.com/runtime/getting_started/installation/", ].join("\n"), ); process.exitCode = 1; }), ).pipe(Command.withDescription("Check whether the Deno sandbox runtime is available")); const callCommand = Command.make( "call", { code: Args.text({ name: "code" }).pipe( Args.withDescription("Inline code to execute."), Args.optional, ), file: Options.text("file").pipe(Options.optional), stdin: Options.boolean("stdin").pipe(Options.withDefault(false)), baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), noOpen: Options.boolean("no-open").pipe(Options.withDefault(false)), }, ({ code, file, stdin, baseUrl, noOpen }) => Effect.gen(function* () { const resolvedCode = yield* readCode({ code: Option.getOrUndefined(code), file: Option.getOrUndefined(file), stdin, }); yield* ensureServer(baseUrl); const { installation, client } = yield* getLocalAuthedClient(baseUrl); const created = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: resolvedCode, interactionMode: executionInteractionMode(), }, }); const settled = yield* driveExecution({ client, workspaceId: installation.workspaceId, envelope: created, baseUrl, shouldOpenUrls: !noOpen, }); yield* printExecution(settled); }), ).pipe(Command.withDescription("Execute code against the local executor server")); const resumeCommand = Command.make( "resume", { executionId: Options.text("execution-id"), baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), noOpen: Options.boolean("no-open").pipe(Options.withDefault(false)), }, ({ executionId, baseUrl, noOpen }) => Effect.gen(function* () { yield* ensureServer(baseUrl); const { installation, client } = yield* getLocalAuthedClient(baseUrl); const decodedExecutionId = yield* decodeExecutionId(executionId).pipe( Effect.mapError((cause) => toError(cause)), ); const execution = yield* client.executions.get({ path: { workspaceId: installation.workspaceId, executionId: decodedExecutionId, }, }); const settled = yield* driveExecution({ client, workspaceId: installation.workspaceId, envelope: execution, baseUrl, shouldOpenUrls: !noOpen, }); yield* printExecution(settled); }), ).pipe(Command.withDescription("Resume a paused execution")); const devSeedMcpDemoCommand = Command.make( "seed-mcp-demo", { baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), endpoint: Options.text("endpoint").pipe( Options.withDefault("http://127.0.0.1:58506/mcp"), ), name: Options.text("name").pipe(Options.withDefault("Demo")), namespace: Options.text("namespace").pipe(Options.withDefault("demo")), }, ({ baseUrl, endpoint, name, namespace }) => seedDemoMcpSource({ baseUrl, endpoint, name, namespace, }), ).pipe( Command.withDescription( "Seed the localhost MCP elicitation demo source into the default workspace", ), ); const devSeedGithubCommand = Command.make( "seed-github", { baseUrl: Options.text("base-url").pipe(Options.withDefault(DEFAULT_SERVER_BASE_URL)), endpoint: Options.text("endpoint").pipe( Options.withDefault("https://api.github.com"), ), specUrl: Options.text("spec-url").pipe( Options.withDefault( "https://raw.githubusercontent.com/github/rest-api-description/main/descriptions/api.github.com/api.github.com.json", ), ), name: Options.text("name").pipe(Options.withDefault("GitHub")), namespace: Options.text("namespace").pipe(Options.withDefault("github")), credentialEnvVar: Options.text("credential-env-var").pipe( Options.withDefault("GITHUB_TOKEN"), ), }, ({ baseUrl, endpoint, specUrl, name, namespace, credentialEnvVar }) => seedGithubOpenApiSource({ baseUrl, endpoint, specUrl, name, namespace, credentialEnvVar, }), ).pipe( Command.withDescription( "Seed a GitHub OpenAPI source into the default workspace", ), ); const devCommand = Command.make("dev").pipe( Command.withSubcommands([devSeedMcpDemoCommand, devSeedGithubCommand] as const), Command.withDescription("Development helpers"), ); const root = Command.make("executor").pipe( Command.withSubcommands([ serverCommand, upCommand, downCommand, statusCommand, doctorCommand, sandboxCommand, callCommand, resumeCommand, devCommand, ] as const), Command.withDescription("Executor local CLI"), ); const runCli = Command.run(root, { name: CLI_NAME, version: CLI_VERSION, executable: CLI_NAME, }); const hiddenServer = (): Effect.Effect | null => { const args = getCliArgs(); if (args[0] !== "__local-server") { return null; } const portFlagIndex = args.findIndex((arg) => arg === "--port"); const port = portFlagIndex >= 0 ? Number(args[portFlagIndex + 1]) : DEFAULT_SERVER_PORT; return runLocalExecutorServer({ ...getDefaultServerOptions( Number.isInteger(port) && port > 0 ? port : DEFAULT_SERVER_PORT, ), }); }; const program = (hiddenServer() ?? helpOverride() ?? runCli(toEffectCliArgv(getCliArgs())).pipe(Effect.mapError(toError))) .pipe(Effect.provide(NodeFileSystem.layer)) .pipe(Effect.provide(NodePath.layer)) .pipe( Effect.catchAllCause((cause) => Effect.sync(() => { console.error(Cause.pretty(cause)); process.exitCode = 1; }), ), ); // Effect CLI's environment does not fully narrow at the process boundary. NodeRuntime.runMain(program as Effect.Effect); ================================================ FILE: apps/executor/src/cli/pending-interaction-output.test.ts ================================================ import { describe, expect, it } from "@effect/vitest"; import type { ExecutionInteraction } from "@executor/control-plane"; import { buildPausedExecutionOutput, parseInteractionPayload, } from "./pending-interaction-output"; const makeInteraction = (patch: Partial = {}): ExecutionInteraction => ({ id: "exec_123:tool_execution_gate:call_1" as never, executionId: "exec_123" as never, status: "pending", kind: "form", purpose: "tool_execution_gate", payloadJson: JSON.stringify({ elicitation: { mode: "form", message: "Allow DELETE /v2/domains/{domain}/records/{recordId}?", requestedSchema: { type: "object", properties: { approve: { type: "boolean", description: "Whether to approve the tool call", }, }, required: ["approve"], additionalProperties: false, }, }, }), responseJson: null, responsePrivateJson: null, createdAt: 1, updatedAt: 1, ...patch, }); describe("pending-interaction-output", () => { it("parses requested schema from interaction payload", () => { const parsed = parseInteractionPayload(makeInteraction()); expect(parsed).not.toBeNull(); expect(parsed?.mode).toBe("form"); expect(parsed?.requestedSchema).toEqual({ type: "object", properties: { approve: { type: "boolean", description: "Whether to approve the tool call", }, }, required: ["approve"], additionalProperties: false, }); }); it("builds a structured paused execution instruction with full resume command", () => { const output = buildPausedExecutionOutput({ executionId: "exec_123", interaction: makeInteraction(), baseUrl: "http://127.0.0.1:8788", shouldOpenUrls: false, }); expect(output.resumeCommand).toBe( "executor resume --execution-id exec_123 --base-url http://127.0.0.1:8788 --no-open", ); expect(output.interaction.requestedSchema).toEqual({ type: "object", properties: { approve: { type: "boolean", description: "Whether to approve the tool call", }, }, required: ["approve"], additionalProperties: false, }); expect(output.instruction).toContain("requires approval"); expect(output.instruction).toContain("Allow DELETE /v2/domains/{domain}/records/{recordId}?"); expect(output.instruction).toContain(output.resumeCommand); expect(output.instruction).toContain("interaction.requestedSchema"); }); it("describes URL interactions with the next command to run", () => { const output = buildPausedExecutionOutput({ executionId: "exec_456", interaction: makeInteraction({ id: "exec_456:source_connect_oauth2:call_1" as never, executionId: "exec_456" as never, kind: "url", purpose: "source_connect_oauth2", payloadJson: JSON.stringify({ elicitation: { mode: "url", message: "Finish connecting Vercel", url: "https://vercel.com/oauth/start", }, }), }), baseUrl: "http://127.0.0.1:8788", shouldOpenUrls: true, }); expect(output.interaction.mode).toBe("url"); expect(output.interaction.url).toBe("https://vercel.com/oauth/start"); expect(output.instruction).toContain("https://vercel.com/oauth/start"); expect(output.instruction).toContain( "executor resume --execution-id exec_456 --base-url http://127.0.0.1:8788", ); }); }); ================================================ FILE: apps/executor/src/cli/pending-interaction-output.ts ================================================ import type { ExecutionInteraction } from "@executor/control-plane"; const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null && !Array.isArray(value); export type ParsedInteractionPayload = { message: string; mode: "form" | "url"; url?: string; requestedSchema?: Record; }; export const parseInteractionPayload = ( interaction: ExecutionInteraction, ): ParsedInteractionPayload | null => { try { const parsed = JSON.parse(interaction.payloadJson) as { elicitation?: { message?: string; mode?: "form" | "url"; url?: string; requestedSchema?: Record; }; }; if (!parsed.elicitation || typeof parsed.elicitation.message !== "string") { return null; } return { message: parsed.elicitation.message, mode: parsed.elicitation.mode === "url" ? "url" : "form", url: parsed.elicitation.url, requestedSchema: isRecord(parsed.elicitation.requestedSchema) ? parsed.elicitation.requestedSchema : undefined, }; } catch { return null; } }; const shellEscape = (value: string): string => { if (/^[A-Za-z0-9_./:=+-]+$/.test(value)) { return value; } return `'${value.replace(/'/g, `'"'"'`)}'`; }; const renderCommand = (argv: readonly string[]): string => argv.map(shellEscape).join(" "); const describePauseReason = (interaction: ExecutionInteraction): string => { switch (interaction.purpose) { case "tool_execution_gate": return "this tool call requires approval before executor can continue"; case "source_connect_oauth2": return "executor needs browser authentication to finish connecting the source"; case "source_connect_secret": return "executor needs credentials to finish connecting the source"; default: return "executor needs additional input before it can continue"; } }; const buildInstruction = (input: { interaction: ExecutionInteraction; parsed: ParsedInteractionPayload | null; resumeCommand: string; }): string => { const reason = describePauseReason(input.interaction); const prompt = input.parsed?.message ?? "Interaction required"; if (input.parsed?.mode === "url") { const url = input.parsed.url?.trim(); return url && url.length > 0 ? `Execution paused because ${reason}. Open ${url} and complete the requested flow for "${prompt}", then run ${input.resumeCommand} to continue if it does not resume automatically.` : `Execution paused because ${reason}. Run ${input.resumeCommand} to continue and complete the requested flow for "${prompt}".`; } if (input.parsed?.requestedSchema) { return `Execution paused because ${reason}. The interaction prompt is "${prompt}". Run ${input.resumeCommand} in an interactive terminal and respond with input matching interaction.requestedSchema.`; } return `Execution paused because ${reason}. The interaction prompt is "${prompt}". Run ${input.resumeCommand} in an interactive terminal to continue.`; }; export const buildPausedExecutionOutput = (input: { executionId: string; interaction: ExecutionInteraction; baseUrl: string; shouldOpenUrls: boolean; cliName?: string; }) => { const cliName = input.cliName ?? "executor"; const argv = [cliName, "resume", "--execution-id", input.executionId, "--base-url", input.baseUrl]; if (!input.shouldOpenUrls) { argv.push("--no-open"); } const command = renderCommand(argv); const parsed = parseInteractionPayload(input.interaction); return { id: input.executionId, status: "waiting_for_interaction" as const, interactionId: input.interaction.id, message: parsed?.message ?? "Interaction required", resumeCommand: command, interaction: { id: input.interaction.id, purpose: input.interaction.purpose, mode: parsed?.mode ?? (input.interaction.kind === "url" ? "url" : "form"), message: parsed?.message ?? "Interaction required", url: parsed?.url ?? null, requestedSchema: parsed?.requestedSchema ?? null, }, resume: { command, argv, }, instruction: buildInstruction({ interaction: input.interaction, parsed, resumeCommand: command, }), }; }; ================================================ FILE: apps/executor/src/cli/runtime-paths.ts ================================================ import { existsSync } from "node:fs"; import { dirname, extname, resolve } from "node:path"; import { fileURLToPath } from "node:url"; import { EXECUTOR_WEB_ASSETS_DIR_ENV, } from "@executor/server"; const sourceDir = dirname(fileURLToPath(import.meta.url)); const trim = (value: string | undefined): string | undefined => { const candidate = value?.trim(); return candidate && candidate.length > 0 ? candidate : undefined; }; const resolveIfExists = (value: string | undefined): string | null => { const candidate = trim(value); if (!candidate) { return null; } const resolved = resolve(candidate); return existsSync(resolved) ? resolved : null; }; const getSourceEntrypoint = (): string | null => { const candidate = trim(process.argv[1]); if (!candidate) { return null; } const resolved = resolve(candidate); const extension = extname(resolved).toLowerCase(); return [".ts", ".tsx", ".js", ".mjs", ".cjs"].includes(extension) ? resolved : null; }; const resolveBundledNodeLauncher = (): string | null => { const candidate = resolve(sourceDir, "executor.js"); return existsSync(candidate) ? candidate : null; }; const resolveRuntimeResourcesRoot = (): string | null => { const compiledCandidate = resolve(dirname(process.execPath), "../resources"); if (existsSync(compiledCandidate)) { return compiledCandidate; } const bundledCandidateFromModule = resolve(sourceDir, "../resources"); if (existsSync(bundledCandidateFromModule)) { return bundledCandidateFromModule; } const sourceEntrypoint = getSourceEntrypoint(); if (sourceEntrypoint) { const bundledCandidate = resolve(dirname(sourceEntrypoint), "../resources"); if (existsSync(bundledCandidate)) { return bundledCandidate; } } return null; }; export const resolveSelfCommand = (args: readonly string[]): readonly string[] => { const bundledLauncher = resolveBundledNodeLauncher(); if (bundledLauncher !== null) { return [process.execPath, bundledLauncher, ...args]; } const sourceEntrypoint = getSourceEntrypoint(); return sourceEntrypoint === null ? [process.execPath, ...args] : [process.execPath, sourceEntrypoint, ...args]; }; export const resolveRuntimeWebAssetsDir = (): string | null => { const explicit = resolveIfExists(process.env[EXECUTOR_WEB_ASSETS_DIR_ENV]); if (explicit) { return explicit; } const resourcesRoot = resolveRuntimeResourcesRoot(); const bundled = resourcesRoot ? resolveIfExists(resolve(resourcesRoot, "web")) : null; if (bundled) { return bundled; } return resolveIfExists(resolve(sourceDir, "../../../web/dist")); }; ================================================ FILE: apps/executor/src/distribution/artifact.ts ================================================ import { spawn } from "node:child_process"; import { existsSync } from "node:fs"; import { chmod, cp, mkdir, rm, writeFile } from "node:fs/promises"; import { createRequire } from "node:module"; import { dirname, join, resolve } from "node:path"; import { readDistributionPackageMetadata, repoRoot } from "./metadata"; const defaultOutputDir = resolve(repoRoot, "apps/executor/dist/npm"); export type BuildDistributionPackageOptions = { outputDir?: string; packageName?: string; packageVersion?: string; buildWeb?: boolean; }; export type DistributionPackageArtifact = { packageDir: string; launcherPath: string; bundlePath: string; resourcesDir: string; }; type CommandInput = { command: string; args: ReadonlyArray; cwd: string; }; const runCommand = async (input: CommandInput): Promise => { const child = spawn(input.command, [...input.args], { cwd: input.cwd, env: process.env, stdio: ["ignore", "pipe", "pipe"], }); let stdout = ""; let stderr = ""; child.stdout?.setEncoding("utf8"); child.stdout?.on("data", (chunk) => { stdout += chunk; }); child.stderr?.setEncoding("utf8"); child.stderr?.on("data", (chunk) => { stderr += chunk; }); const exitCode = await new Promise((resolveExitCode, reject) => { child.once("error", reject); child.once("close", (code) => { resolveExitCode(code ?? -1); }); }); if (exitCode === 0) { return; } throw new Error( [ `${input.command} ${input.args.join(" ")} exited with code ${exitCode}`, stdout.trim().length > 0 ? `stdout:\n${stdout.trim()}` : null, stderr.trim().length > 0 ? `stderr:\n${stderr.trim()}` : null, ] .filter((part) => part !== null) .join("\n\n"), ); }; const resolveQuickJsWasmPath = (): string => { const requireFromQuickJsRuntime = createRequire( join(repoRoot, "packages/kernel/runtime-quickjs/package.json"), ); const quickJsPackagePath = requireFromQuickJsRuntime.resolve( "quickjs-emscripten/package.json", ); const wasmPath = resolve( dirname(quickJsPackagePath), "../@jitl/quickjs-wasmfile-release-sync/dist/emscripten-module.wasm", ); if (!existsSync(wasmPath)) { throw new Error(`Unable to locate QuickJS wasm asset at ${wasmPath}`); } return wasmPath; }; const createPackageJson = (input: { packageName: string; packageVersion: string; description: string; keywords: ReadonlyArray; homepage?: string; bugs?: { url?: string; }; repository?: { type?: string; url?: string; }; license?: string; }) => { const packageJson = { name: input.packageName, version: input.packageVersion, description: input.description, keywords: input.keywords, homepage: input.homepage, bugs: input.bugs, repository: input.repository, license: input.license ?? "MIT", type: "module", private: false, bin: { executor: "bin/executor.js", }, files: [ "bin", "resources", "README.md", "package.json", ], engines: { node: ">=20", }, }; return JSON.stringify(packageJson, null, 2) + "\n"; }; const createLauncherSource = () => [ "#!/usr/bin/env node", 'import "./executor.mjs";', "", ].join("\n"); export const buildDistributionPackage = async ( options: BuildDistributionPackageOptions = {}, ): Promise => { const defaults = await readDistributionPackageMetadata(); const packageDir = resolve(options.outputDir ?? defaultOutputDir); const binDir = join(packageDir, "bin"); const resourcesDir = join(packageDir, "resources"); const webDir = join(resourcesDir, "web"); const bundlePath = join(binDir, "executor.mjs"); const launcherPath = join(binDir, "executor.js"); const quickJsWasmPath = resolveQuickJsWasmPath(); const webDistDir = join(repoRoot, "apps/web/dist"); const readmePath = join(repoRoot, "README.md"); const packageName = options.packageName ?? defaults.name; const packageVersion = options.packageVersion ?? defaults.version; await rm(packageDir, { recursive: true, force: true }); await mkdir(binDir, { recursive: true }); await mkdir(resourcesDir, { recursive: true }); if ((options.buildWeb ?? true) || !existsSync(webDistDir)) { await runCommand({ command: "bun", args: ["run", "build"], cwd: join(repoRoot, "apps/web"), }); } if (!existsSync(webDistDir)) { throw new Error(`Missing built web assets at ${webDistDir}`); } await runCommand({ command: "bun", args: [ "build", "./apps/executor/src/cli/main.ts", "--target", "node", "--outfile", bundlePath, ], cwd: repoRoot, }); await cp(webDistDir, webDir, { recursive: true }); await cp(quickJsWasmPath, join(binDir, "emscripten-module.wasm")); await mkdir(join(binDir, "openapi-extractor-wasm"), { recursive: true }); await cp( join(repoRoot, "packages/sources/openapi/src/openapi-extractor-wasm/openapi_extractor_bg.wasm"), join(binDir, "openapi-extractor-wasm/openapi_extractor_bg.wasm"), ); await cp( join(repoRoot, "packages/kernel/runtime-deno-subprocess/src/deno-subprocess-worker.mjs"), join(binDir, "deno-subprocess-worker.mjs"), ); await runCommand({ command: "bun", args: [ "build", "./packages/kernel/runtime-ses/src/sandbox-worker.mjs", "--target", "node", "--outfile", join(binDir, "sandbox-worker.mjs"), ], cwd: repoRoot, }); await writeFile(join(packageDir, "package.json"), createPackageJson({ packageName, packageVersion, description: defaults.description, keywords: defaults.keywords, homepage: defaults.homepage, bugs: defaults.bugs, repository: defaults.repository, license: defaults.license, })); await cp(readmePath, join(packageDir, "README.md")); await writeFile(launcherPath, createLauncherSource()); await chmod(launcherPath, 0o755); return { packageDir, launcherPath, bundlePath, resourcesDir, }; }; ================================================ FILE: apps/executor/src/distribution/distribution.test.ts ================================================ import { describe, expect, it } from "@effect/vitest"; import * as Effect from "effect/Effect"; import { DistributionHarness, LocalDistributionHarnessLive, } from "./harness"; describe("distribution flow", () => { const verifyInstallFlow = ( runCommand: ( args: ReadonlyArray, options?: { readonly okExitCodes?: ReadonlyArray; }, ) => Effect.Effect<{ stdout: string; stderr: string }, Error, R>, ) => Effect.gen(function* () { const harness = yield* DistributionHarness; yield* harness.writeProjectConfig(`{ "runtime": "ses", // local workspace config "sources": {}, } `); const initialDoctor = yield* runCommand([ "doctor", "--json", "--base-url", harness.baseUrl, ]); const initialDoctorJson = JSON.parse(initialDoctor.stdout) as { ok: boolean; checks: Record; }; expect(initialDoctorJson.ok).toBe(false); expect(initialDoctorJson.checks.webAssets?.ok).toBe(true); expect(initialDoctorJson.checks.database?.ok).toBe(true); yield* runCommand(["up", "--base-url", harness.baseUrl]); const statusResult = yield* runCommand([ "status", "--json", "--base-url", harness.baseUrl, ]); const status = JSON.parse(statusResult.stdout) as { reachable: boolean; pidRunning: boolean; installation: { workspaceId: string; accountId: string } | null; }; expect(status.reachable).toBe(true); expect(status.pidRunning).toBe(true); expect(status.installation).not.toBeNull(); const html = yield* harness.fetchText("/"); expect(html.status).toBe(200); expect(html.contentType).toContain("text/html"); expect(html.body).toContain("
"); const installationResponse = yield* harness.fetchText("/v1/local/installation"); expect(installationResponse.status).toBe(200); const installation = JSON.parse(installationResponse.body) as { workspaceId: string; accountId: string; }; const sesCall = yield* runCommand( [ "call", 'await fetch("https://example.com"); return 1;', "--base-url", harness.baseUrl, ], { okExitCodes: [1] }, ); expect(sesCall.stderr).toContain("fetch is disabled in SES executor"); yield* runCommand(["down", "--base-url", harness.baseUrl]); yield* runCommand(["up", "--base-url", harness.baseUrl]); const installationAfterRestartResponse = yield* harness.fetchText("/v1/local/installation"); expect(installationAfterRestartResponse.status).toBe(200); const installationAfterRestart = JSON.parse( installationAfterRestartResponse.body, ) as { workspaceId: string; accountId: string; }; expect(installationAfterRestart.workspaceId).toBe(installation.workspaceId); expect(installationAfterRestart.accountId).toBe(installation.accountId); yield* runCommand(["down", "--base-url", harness.baseUrl]); }); it.live("boots a staged package artifact in a fresh home", () => verifyInstallFlow((args, options) => Effect.flatMap(DistributionHarness, (harness) => harness.run(args, options)) ) .pipe(Effect.provide(LocalDistributionHarnessLive)), 240_000); it.live("boots an npm-installed package in a fresh home", () => verifyInstallFlow((args, options) => Effect.flatMap(DistributionHarness, (harness) => harness.runInstalled(args, options)) ).pipe(Effect.provide(LocalDistributionHarnessLive)), 240_000); }); ================================================ FILE: apps/executor/src/distribution/harness.ts ================================================ import { spawn } from "node:child_process"; import { createServer } from "node:net"; import { tmpdir } from "node:os"; import { dirname, join } from "node:path"; import { FileSystem } from "@effect/platform"; import { NodeFileSystem } from "@effect/platform-node"; import * as Context from "effect/Context"; import * as Effect from "effect/Effect"; import * as Layer from "effect/Layer"; import { buildDistributionPackage } from "./artifact"; import { executorAppEffectError } from "../effect-errors"; const toError = (cause: unknown): Error => cause instanceof Error ? cause : new Error(String(cause)); type CommandResult = { readonly stdout: string; readonly stderr: string; readonly exitCode: number; }; export class DistributionHarness extends Context.Tag( "@executor/apps/executor/distribution/DistributionHarness", )< DistributionHarness, { readonly packageDir: string; readonly launcherPath: string; readonly tarballPath: string; readonly executorHome: string; readonly baseUrl: string; readonly writeProjectConfig: ( contents: string, ) => Effect.Effect; readonly run: ( args: ReadonlyArray, options?: { readonly okExitCodes?: ReadonlyArray; }, ) => Effect.Effect; readonly runInstalled: ( args: ReadonlyArray, options?: { readonly okExitCodes?: ReadonlyArray; }, ) => Effect.Effect; readonly fetchText: ( pathname: string, ) => Effect.Effect<{ readonly status: number; readonly body: string; readonly contentType: string | null; }, Error, never>; } >() {} const runCommand = (input: { readonly command: string; readonly args: ReadonlyArray; readonly cwd: string; readonly env?: NodeJS.ProcessEnv; readonly okExitCodes?: ReadonlyArray; }): Effect.Effect => Effect.async((resume) => { const child = spawn(input.command, [...input.args], { cwd: input.cwd, env: input.env, stdio: ["ignore", "pipe", "pipe"], }); let stdout = ""; let stderr = ""; child.stdout?.setEncoding("utf8"); child.stdout?.on("data", (chunk) => { stdout += chunk; }); child.stderr?.setEncoding("utf8"); child.stderr?.on("data", (chunk) => { stderr += chunk; }); child.once("error", (error) => { resume(Effect.fail(error)); }); child.once("close", (code) => { const exitCode = code ?? -1; const result = { stdout: stdout.trim(), stderr: stderr.trim(), exitCode, } satisfies CommandResult; const okExitCodes = input.okExitCodes ?? [0]; if (okExitCodes.includes(exitCode)) { resume(Effect.succeed(result)); return; } resume(Effect.fail(executorAppEffectError("distribution/harness", [ `${input.command} ${input.args.join(" ")} exited with code ${exitCode}`, stdout.length > 0 ? `stdout:\n${stdout.trim()}` : null, stderr.length > 0 ? `stderr:\n${stderr.trim()}` : null, ].filter((part) => part !== null).join("\n\n"), ))); }); return Effect.sync(() => { child.kill("SIGTERM"); }); }); const allocatePort = (): Effect.Effect => Effect.tryPromise({ try: async () => { return await new Promise((resolvePort, reject) => { const server = createServer(); server.once("error", reject); server.listen(0, "127.0.0.1", () => { const address = server.address(); if (!address || typeof address === "string") { server.close(() => reject(new Error("Failed to allocate test port"))); return; } server.close((error) => { if (error) { reject(error); return; } resolvePort(address.port); }); }); }); }, catch: (cause) => cause instanceof Error ? cause : new Error(String(cause)), }); const buildPackage = (packageDir: string) => Effect.tryPromise({ try: () => buildDistributionPackage({ outputDir: packageDir, buildWeb: false, }), catch: (cause) => cause instanceof Error ? cause : new Error(String(cause)), }); const packPackage = (packageDir: string, outputDir: string) => runCommand({ command: "npm", args: ["pack", packageDir], cwd: outputDir, }).pipe( Effect.flatMap((result) => { const tarballName = result.stdout .split(/\s+/) .map((part) => part.trim()) .filter((part) => part.length > 0) .at(-1); if (!tarballName) { return Effect.fail( executorAppEffectError("distribution/harness", `Unable to determine tarball name from npm pack output: ${result.stdout}`), ); } return Effect.succeed(join(outputDir, tarballName)); }), ); export const LocalDistributionHarnessLive = Layer.scoped( DistributionHarness, Effect.gen(function* () { const fs = yield* FileSystem.FileSystem; const tempRoot = yield* Effect.acquireRelease( fs.makeTempDirectory({ directory: tmpdir(), prefix: "executor-distribution-", }).pipe(Effect.mapError(toError)), (path) => fs.remove(path, { recursive: true, force: true }).pipe( Effect.mapError(toError), Effect.orDie, ), ); const packageDir = join(tempRoot, "package"); const prefixDir = join(tempRoot, "prefix"); const homeDir = join(tempRoot, "home"); const executorHome = join(homeDir, ".executor"); const stagedWorkspaceRoot = packageDir; const installedWorkspaceRoot = tempRoot; const baseUrl = `http://127.0.0.1:${yield* allocatePort()}`; yield* Effect.all([ fs.makeDirectory(prefixDir, { recursive: true }), fs.makeDirectory(homeDir, { recursive: true }), fs.makeDirectory(executorHome, { recursive: true }), ]).pipe(Effect.mapError(toError)); const artifact = yield* buildPackage(packageDir); const tarballPath = yield* packPackage(packageDir, tempRoot); const env = { ...process.env, HOME: homeDir, EXECUTOR_HOME: executorHome, } satisfies NodeJS.ProcessEnv; const installedEnv = { ...env, NPM_CONFIG_PREFIX: prefixDir, PATH: `${join(prefixDir, "bin")}:${process.env.PATH ?? ""}`, } satisfies NodeJS.ProcessEnv; yield* runCommand({ command: "npm", args: ["install", "-g", tarballPath], cwd: tempRoot, env: installedEnv, }); const run = ( args: ReadonlyArray, options?: { readonly okExitCodes?: ReadonlyArray }, ) => runCommand({ command: "node", args: [artifact.launcherPath, ...args], cwd: dirname(artifact.launcherPath), env, okExitCodes: options?.okExitCodes, }); const runInstalled = ( args: ReadonlyArray, options?: { readonly okExitCodes?: ReadonlyArray }, ) => runCommand({ command: "executor", args, cwd: tempRoot, env: installedEnv, okExitCodes: options?.okExitCodes, }); const fetchText = (pathname: string) => Effect.tryPromise({ try: async () => { const response = await fetch(new URL(pathname, baseUrl)); return { status: response.status, body: await response.text(), contentType: response.headers.get("content-type"), }; }, catch: (cause) => cause instanceof Error ? cause : new Error(String(cause)), }); const writeProjectConfig = (contents: string) => Effect.forEach( [stagedWorkspaceRoot, installedWorkspaceRoot], (workspaceRoot) => Effect.gen(function* () { const configDir = join(workspaceRoot, ".executor"); yield* fs.makeDirectory(configDir, { recursive: true }).pipe( Effect.mapError(toError), ); yield* fs.writeFileString( join(configDir, "executor.jsonc"), contents, ).pipe(Effect.mapError(toError)); }), { discard: true }, ); return DistributionHarness.of({ packageDir, launcherPath: artifact.launcherPath, tarballPath, executorHome, baseUrl, writeProjectConfig, run, runInstalled, fetchText, }); }).pipe(Effect.provide(NodeFileSystem.layer)), ); ================================================ FILE: apps/executor/src/distribution/metadata.ts ================================================ import { readFile } from "node:fs/promises"; import { dirname, resolve } from "node:path"; import { fileURLToPath } from "node:url"; export const repoRoot = resolve(dirname(fileURLToPath(import.meta.url)), "../../../.."); const packageMetadataPath = resolve(repoRoot, "apps/executor/package.json"); export type DistributionPackageMetadata = { name: string; version: string; description: string; keywords: ReadonlyArray; homepage?: string; bugs?: { url?: string; }; repository?: { type?: string; url?: string; }; license?: string; }; export const readDistributionPackageMetadata = async (): Promise => { const contents = await readFile(packageMetadataPath, "utf8"); const metadata = JSON.parse(contents) as Partial; return { name: metadata.name ?? "executor", version: metadata.version ?? "0.0.0-local", description: metadata.description ?? "Local AI executor with a CLI, local API server, and web UI.", keywords: Array.isArray(metadata.keywords) ? metadata.keywords.filter((value): value is string => typeof value === "string") : ["executor", "ai", "agent", "cli"], homepage: metadata.homepage, bugs: metadata.bugs, repository: metadata.repository, license: metadata.license ?? "MIT", }; }; ================================================ FILE: apps/executor/src/distribution/publish.ts ================================================ import { spawn } from "node:child_process"; import { mkdir, rm } from "node:fs/promises"; import { join, resolve } from "node:path"; import { buildDistributionPackage } from "./artifact"; import { readDistributionPackageMetadata, repoRoot } from "./metadata"; const defaultReleaseDir = resolve(repoRoot, "apps/executor/dist/release"); const semverPattern = /^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[A-Za-z-][0-9A-Za-z-]*)(?:\.(?:0|[1-9]\d*|\d*[A-Za-z-][0-9A-Za-z-]*))*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?$/; type PublishCliOptions = { dryRun: boolean; }; type CommandInput = { command: string; args: ReadonlyArray; cwd: string; }; type CommandOutput = { stdout: string; stderr: string; }; type PackResult = { filename?: string; }; const parseArgs = (argv: ReadonlyArray): PublishCliOptions => { const options: PublishCliOptions = { dryRun: false, }; for (const arg of argv) { if (arg === "--dry-run") { options.dryRun = true; continue; } throw new Error(`Unknown argument: ${arg}`); } return options; }; const runCommand = async (input: CommandInput): Promise => { const child = spawn(input.command, [...input.args], { cwd: input.cwd, env: process.env, stdio: ["ignore", "pipe", "pipe"], }); let stdout = ""; let stderr = ""; child.stdout?.setEncoding("utf8"); child.stdout?.on("data", (chunk) => { stdout += chunk; }); child.stderr?.setEncoding("utf8"); child.stderr?.on("data", (chunk) => { stderr += chunk; }); const exitCode = await new Promise((resolveExitCode, reject) => { child.once("error", reject); child.once("close", (code) => { resolveExitCode(code ?? -1); }); }); if (exitCode !== 0) { throw new Error( [ `${input.command} ${input.args.join(" ")} exited with code ${exitCode}`, stdout.trim().length > 0 ? `stdout:\n${stdout.trim()}` : null, stderr.trim().length > 0 ? `stderr:\n${stderr.trim()}` : null, ] .filter((part) => part !== null) .join("\n\n"), ); } return { stdout: stdout.trim(), stderr: stderr.trim(), }; }; const resolveTagFromEnvironment = (): string | undefined => { const refName = process.env.GITHUB_REF_NAME?.trim(); if (process.env.GITHUB_REF_TYPE === "tag" && refName) { return refName; } const ref = process.env.GITHUB_REF?.trim(); if (ref?.startsWith("refs/tags/")) { return ref.slice("refs/tags/".length); } return undefined; }; const resolveGitHubRepository = (): string => { const repository = process.env.GH_REPO?.trim() || process.env.GITHUB_REPOSITORY?.trim(); if (!repository) { throw new Error("Set GH_REPO or GITHUB_REPOSITORY before creating a GitHub release."); } return repository; }; const validateVersion = (version: string): void => { if (!semverPattern.test(version)) { throw new Error(`apps/executor/package.json version is not valid semver: ${version}`); } }; const resolveChannel = (version: string): "latest" | "beta" => version.includes("-") ? "beta" : "latest"; const packDistributionPackage = async (releaseDir: string): Promise => { const output = await runCommand({ command: "npm", args: ["pack", "./apps/executor/dist/npm", "--pack-destination", releaseDir, "--json"], cwd: repoRoot, }); const [result] = JSON.parse(output.stdout) as ReadonlyArray; const filename = result?.filename; if (!filename) { throw new Error(`npm pack did not report an output filename. stdout:\n${output.stdout}`); } return join(releaseDir, filename); }; const publishDistributionPackage = async (channel: "latest" | "beta"): Promise => { const args = ["publish", "./dist/npm", "--access", "public", "--tag", channel]; if (process.env.GITHUB_ACTIONS === "true") { args.push("--provenance"); } await runCommand({ command: "npm", args, cwd: resolve(repoRoot, "apps/executor"), }); }; const createGitHubRelease = async (input: { tag: string; channel: "latest" | "beta"; assetPath: string; }): Promise => { if (!process.env.GH_TOKEN?.trim()) { throw new Error("GH_TOKEN is required to create a GitHub release."); } const args = [ "release", "create", input.tag, input.assetPath, "--repo", resolveGitHubRepository(), "--title", input.tag, "--generate-notes", "--verify-tag", ]; if (input.channel === "beta") { args.push("--prerelease"); } else { args.push("--latest"); } await runCommand({ command: "gh", args, cwd: repoRoot, }); }; const main = async () => { const options = parseArgs(process.argv.slice(2)); const metadata = await readDistributionPackageMetadata(); const version = metadata.version; const tag = `v${version}`; const refTag = resolveTagFromEnvironment(); validateVersion(version); if (refTag && refTag !== tag) { throw new Error(`GitHub tag ${refTag} does not match apps/executor/package.json version ${version}`); } const channel = resolveChannel(version); await rm(defaultReleaseDir, { recursive: true, force: true }); await mkdir(defaultReleaseDir, { recursive: true }); await buildDistributionPackage(); const archivePath = await packDistributionPackage(defaultReleaseDir); process.stdout.write(`Prepared executor@${version} for ${channel}\n`); process.stdout.write(`${archivePath}\n`); if (options.dryRun) { return; } await publishDistributionPackage(channel); await createGitHubRelease({ tag, channel, assetPath: archivePath, }); }; await main(); ================================================ FILE: apps/executor/src/effect-errors.ts ================================================ import * as Data from "effect/Data"; export class ExecutorAppEffectError extends Data.TaggedError( "ExecutorAppEffectError", )<{ readonly module: string; readonly message: string; }> {} export const executorAppEffectError = ( module: string, message: string, ) => new ExecutorAppEffectError({ module, message }); export class LocalServerReachabilityTimeoutError extends Data.TaggedError( "LocalServerReachabilityTimeoutError", )<{ readonly baseUrl: string; readonly action: "start" | "shutdown"; readonly logFile: string; readonly logTail: string | null; readonly message: string; }> {} export const localServerReachabilityTimeoutError = (input: { baseUrl: string; expected: boolean; logFile: string; logTail: string | null; }) => new LocalServerReachabilityTimeoutError({ baseUrl: input.baseUrl, action: input.expected ? "start" : "shutdown", logFile: input.logFile, logTail: input.logTail, message: input.logTail === null ? `Timed out waiting for local executor server ${input.expected ? "start" : "shutdown"} at ${input.baseUrl}\n\nDaemon log: ${input.logFile}` : `Timed out waiting for local executor server ${input.expected ? "start" : "shutdown"} at ${input.baseUrl}\n\nRecent daemon log (${input.logFile}):\n${input.logTail}`, }); ================================================ FILE: apps/executor/src/server/server.real-ingestion.test.ts ================================================ import { FileSystem } from "@effect/platform"; import { NodeFileSystem } from "@effect/platform-node"; import { describe, expect, it } from "@effect/vitest"; import * as Effect from "effect/Effect"; import { createControlPlaneClient, type SourceDiscoveryKind, } from "@executor/control-plane"; import { createLocalExecutorServer } from "@executor/server"; const REAL_VERCEL_SPEC_URL = "https://openapi.vercel.sh"; const REAL_VERCEL_API_ENDPOINT = "https://api.vercel.com"; const REAL_LINEAR_GRAPHQL_ENDPOINT = "https://api.linear.app/graphql"; const REAL_DEEPWIKI_MCP_ENDPOINT = "https://mcp.deepwiki.com/mcp"; const makeTempWorkspaceRoot = () => FileSystem.FileSystem.pipe( Effect.flatMap((fs) => fs.makeTempDirectory({ prefix: "executor-real-ingestion-" })), Effect.provide(NodeFileSystem.layer), ); const createIsolatedLocalExecutorServer = () => Effect.gen(function* () { const workspaceRoot = yield* makeTempWorkspaceRoot(); return yield* createLocalExecutorServer({ port: 0, localDataDir: ":memory:", workspaceRoot, }); }); const createApiClientHarness = () => Effect.gen(function* () { const server = yield* createIsolatedLocalExecutorServer(); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); return { installation, client, }; }); const canonicalUrl = (value: string): string => new URL(value).toString(); const expectDiscoveredKind = ( result: T, expectedKind: SourceDiscoveryKind, ) => { expect(result.detectedKind).toBe(expectedKind); }; describe("local-executor-server real ingestion", () => { it.live("discovers and ingests the real Vercel OpenAPI source through the API client", () => Effect.scoped(Effect.gen(function* () { const { installation, client } = yield* createApiClientHarness(); const discovered = yield* client.sources.discover({ payload: { url: REAL_VERCEL_SPEC_URL, }, }); expectDiscoveredKind(discovered, "openapi"); expect(canonicalUrl(discovered.endpoint)).toBe(canonicalUrl(REAL_VERCEL_API_ENDPOINT)); expect(canonicalUrl(discovered.specUrl ?? "")).toBe(canonicalUrl(REAL_VERCEL_SPEC_URL)); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "openapi", name: "Vercel", namespace: "vercel", endpoint: REAL_VERCEL_API_ENDPOINT, specUrl: REAL_VERCEL_SPEC_URL, auth: { kind: "none", }, }, }); expect(connected.kind).toBe("connected"); if (connected.kind !== "connected") { throw new Error(`Expected connected result, received ${connected.kind}`); } const inspection = yield* client.sources.inspection({ path: { workspaceId: installation.workspaceId, sourceId: connected.source.id, }, }); expect(inspection.namespace).toBe("vercel"); expect(inspection.toolCount).toBeGreaterThan(250); const discoveredTools = yield* client.sources.inspectionDiscover({ path: { workspaceId: installation.workspaceId, sourceId: connected.source.id, }, payload: { query: "list user events", limit: 5, }, }); expect(discoveredTools.bestPath).toBeTruthy(); expect(discoveredTools.total).toBeGreaterThan(0); })), 15_000, ); it.live("discovers and ingests the real Linear GraphQL source through the API client", () => Effect.scoped(Effect.gen(function* () { const { installation, client } = yield* createApiClientHarness(); const discovered = yield* client.sources.discover({ payload: { url: REAL_LINEAR_GRAPHQL_ENDPOINT, }, }); expectDiscoveredKind(discovered, "graphql"); expect(canonicalUrl(discovered.endpoint)).toBe(canonicalUrl(REAL_LINEAR_GRAPHQL_ENDPOINT)); expect(discovered.specUrl).toBeNull(); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "graphql", name: "Linear", namespace: "linear", endpoint: REAL_LINEAR_GRAPHQL_ENDPOINT, auth: { kind: "none", }, }, }); expect(connected.kind).toBe("connected"); if (connected.kind !== "connected") { throw new Error(`Expected connected result, received ${connected.kind}`); } expect(connected.source.status).toBe("connected"); expect(connected.source.namespace).toBe("linear"); const storedSource = yield* client.sources.get({ path: { workspaceId: installation.workspaceId, sourceId: connected.source.id, }, }); expect(storedSource.kind).toBe("graphql"); expect(storedSource.status).toBe("connected"); expect(storedSource.endpoint).toBe(REAL_LINEAR_GRAPHQL_ENDPOINT); })), 15_000, ); it.live("discovers and ingests a real public MCP source through the API client", () => Effect.scoped(Effect.gen(function* () { const { installation, client } = yield* createApiClientHarness(); const discovered = yield* client.sources.discover({ payload: { url: REAL_DEEPWIKI_MCP_ENDPOINT, }, }); expectDiscoveredKind(discovered, "mcp"); expect(canonicalUrl(discovered.endpoint)).toBe(canonicalUrl(REAL_DEEPWIKI_MCP_ENDPOINT)); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "mcp", name: "DeepWiki", namespace: "deepwiki", endpoint: REAL_DEEPWIKI_MCP_ENDPOINT, }, }); expect(connected.kind).toBe("connected"); if (connected.kind !== "connected") { throw new Error(`Expected connected result, received ${connected.kind}`); } const inspection = yield* client.sources.inspection({ path: { workspaceId: installation.workspaceId, sourceId: connected.source.id, }, }); expect(inspection.namespace).toBe("deepwiki"); expect(inspection.toolCount).toBeGreaterThan(0); const discoveredTools = yield* client.sources.inspectionDiscover({ path: { workspaceId: installation.workspaceId, sourceId: connected.source.id, }, payload: { query: "repository", limit: 5, }, }); expect(discoveredTools.bestPath).toBeTruthy(); expect(discoveredTools.total).toBeGreaterThan(0); })), 15_000, ); }); ================================================ FILE: apps/executor/src/server/server.test.ts ================================================ import { createServer, type IncomingMessage, type ServerResponse } from "node:http"; import { randomUUID } from "node:crypto"; import { HttpApi, HttpApiEndpoint, HttpApiGroup, HttpApiSchema, OpenApi, FileSystem, } from "@effect/platform"; import { NodeFileSystem } from "@effect/platform-node"; import { describe, expect, it } from "@effect/vitest"; import { assertTrue } from "@effect/vitest/utils"; import * as Effect from "effect/Effect"; import * as Schema from "effect/Schema"; import { startMcpElicitationDemoServer } from "@executor/mcp-elicitation-demo"; import { makeToolInvokerFromTools, toTool } from "@executor/codemode-core"; import { createControlPlaneClient, controlPlaneOpenApiSpec, buildLocalSourceArtifact, catalogSyncResultFromMcpManifest, deriveLocalInstallation, type ResolveExecutionEnvironment, resolveLocalWorkspaceContext, SourceIdSchema, writeLocalSourceArtifact, writeProjectLocalExecutorConfig, } from "@executor/control-plane"; import { makeSesExecutor } from "@executor/runtime-ses"; import { Client } from "@modelcontextprotocol/sdk/client/index.js"; import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js"; import { createMcpExpressApp } from "@modelcontextprotocol/sdk/server/express.js"; import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; import { ElicitRequestSchema } from "@modelcontextprotocol/sdk/types.js"; import { seedDemoMcpSourceInWorkspace, seedGithubOpenApiSourceInWorkspace, } from "../cli/dev"; import { createLocalExecutorServer } from "@executor/server"; const executionResolver: ResolveExecutionEnvironment = () => Effect.succeed({ executor: makeSesExecutor(), toolInvoker: makeToolInvokerFromTools({ tools: { "math.add": { description: "Add two numbers", inputSchema: Schema.standardSchemaV1( Schema.Struct({ a: Schema.optional(Schema.Number), b: Schema.optional(Schema.Number), }), ), execute: ({ a, b }) => ({ sum: (a ?? 0) + (b ?? 0) }), }, }, }), }); const makeTempWorkspaceRoot = () => FileSystem.FileSystem.pipe( Effect.flatMap((fs) => fs.makeTempDirectoryScoped({ prefix: "executor-server-test-" })), Effect.provide(NodeFileSystem.layer), ); const createIsolatedLocalExecutorServer = ( options: Parameters[0] = {}, ) => Effect.gen(function* () { const workspaceRoot = yield* makeTempWorkspaceRoot(); const server = yield* createLocalExecutorServer({ ...options, workspaceRoot, }); return { ...server, workspaceRoot, }; }); const writeConfiguredLocalMcpSource = (input: { workspaceRoot: string; sourceId: string; endpoint: string; name?: string; namespace?: string; }) => Effect.gen(function* () { const sourceId = SourceIdSchema.make(input.sourceId); const context = yield* resolveLocalWorkspaceContext({ workspaceRoot: input.workspaceRoot, }); const installation = deriveLocalInstallation(context); yield* writeProjectLocalExecutorConfig({ context, config: { sources: { [input.sourceId]: { kind: "mcp", name: input.name ?? "Demo", namespace: input.namespace ?? input.sourceId, connection: { endpoint: input.endpoint, }, binding: { transport: "streamable-http", }, }, }, }, }); const source = { id: sourceId, workspaceId: installation.workspaceId, name: input.name ?? "Demo", kind: "mcp" as const, endpoint: input.endpoint, status: "connected" as const, enabled: true, namespace: input.namespace ?? input.sourceId, bindingVersion: 1, binding: { transport: "streamable-http", queryParams: null, headers: null, }, importAuthPolicy: "reuse_runtime" as const, importAuth: { kind: "none" as const }, auth: { kind: "none" as const }, sourceHash: null, lastError: null, createdAt: Date.now(), updatedAt: Date.now(), }; const syncResult = catalogSyncResultFromMcpManifest({ source, endpoint: input.endpoint, manifest: { version: 2, server: null, tools: [{ toolId: "gated_echo", toolName: "gated_echo", description: "Asks for approval before echoing a value", inputSchema: { type: "object", properties: { value: { type: "string", }, }, required: ["value"], additionalProperties: false, }, }], }, }); yield* writeLocalSourceArtifact({ context, sourceId, artifact: buildLocalSourceArtifact({ source, syncResult, }), }); }).pipe(Effect.provide(NodeFileSystem.layer)); const makeServer = createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", executionResolver, }); const createApiClientHarness = () => Effect.gen(function* () { const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); return { server, bootstrapClient, installation, client, }; }); const gatedExecutionResolver: ResolveExecutionEnvironment = ({ onElicitation }) => Effect.succeed({ executor: makeSesExecutor(), toolInvoker: makeToolInvokerFromTools({ tools: { "demo.gated_echo": toTool({ tool: { description: "Asks for approval before echoing a value", inputSchema: Schema.standardSchemaV1( Schema.Struct({ value: Schema.String, approve: Schema.optional(Schema.Boolean), }), ), execute: ({ value, approve }: { value: string; approve?: boolean }) => approve === true ? `approved:${value}` : `denied:${value}`, }, metadata: { sourceKey: "demo", elicitation: { mode: "form", message: "Approve gated echo?", requestedSchema: { type: "object", properties: { approve: { type: "boolean", title: "Approve", }, }, required: ["approve"], }, }, }, }), }, onElicitation, }), }); type ExecutorMcpClient = { client: Client; close: () => Promise; }; type ElicitationHandler = NonNullable[1]>; const makeExecutorMcpClient = (input: { baseUrl: string; capabilities?: Record; onElicitation?: ElicitationHandler; }) => Effect.acquireRelease( Effect.promise(async () => { const client = new Client( { name: "executor-mcp-test-client", version: "1.0.0" }, { capabilities: input.capabilities ?? {} }, ); if (input.onElicitation) { client.setRequestHandler(ElicitRequestSchema, input.onElicitation); } const transport = new StreamableHTTPClientTransport(new URL(`${input.baseUrl}/mcp`)); await client.connect(transport); return { client, close: async () => { await client.close(); }, }; }), ({ close }) => Effect.promise(() => close()).pipe(Effect.orDie), ); const ownerParam = HttpApiSchema.param("owner", Schema.String); const repoParam = HttpApiSchema.param("repo", Schema.String); class ExecutorDemoReposApi extends HttpApiGroup.make("repos") .add( HttpApiEndpoint.get("getRepo")`/repos/${ownerParam}/${repoParam}` .addSuccess( Schema.Struct({ full_name: Schema.String, private: Schema.Boolean, }), ), ) {} class ExecutorDemoApi extends HttpApi.make("executorDemo").add(ExecutorDemoReposApi) {} const executorDemoOpenApiSpec = OpenApi.fromApi(ExecutorDemoApi); const startOpenApiDemoServer = async () => { const seenAuthHeaders: Array = []; const handler = (req: IncomingMessage, res: ServerResponse) => { if (req.url === "/openapi.json") { res.statusCode = 200; res.setHeader("content-type", "application/json"); res.end(JSON.stringify(executorDemoOpenApiSpec)); return; } const match = req.url?.match(/^\/repos\/([^/]+)\/([^/]+)$/); if (req.method === "GET" && match) { seenAuthHeaders.push( typeof req.headers.authorization === "string" ? req.headers.authorization : null, ); res.statusCode = 200; res.setHeader("content-type", "application/json"); res.end( JSON.stringify({ full_name: `${decodeURIComponent(match[1] ?? "")}/${decodeURIComponent(match[2] ?? "")}`, private: false, }), ); return; } res.statusCode = 404; res.end("not found"); }; const server = createServer(handler); await new Promise((resolve, reject) => { server.once("error", reject); server.listen(0, "127.0.0.1", () => resolve()); }); const address = server.address(); if (!address || typeof address === "string") { throw new Error("Failed to bind OpenAPI demo server"); } return { baseUrl: `http://127.0.0.1:${address.port}`, specUrl: `http://127.0.0.1:${address.port}/openapi.json`, seenAuthHeaders, close: () => new Promise((resolve, reject) => { server.close((error) => (error ? reject(error) : resolve())); }), }; }; const startMutatingOpenApiDemoServer = async () => { const createdBodies: Array> = []; const openApiDocument = JSON.stringify({ openapi: "3.0.3", info: { title: "Executor DNS Demo API", version: "1.0.0", }, paths: { "/records": { post: { operationId: "records.createRecord", tags: ["records"], summary: "Create a DNS record", requestBody: { required: true, content: { "application/json": { schema: { type: "object", properties: { type: { type: "string" }, name: { type: "string" }, value: { type: "string" }, }, required: ["type", "value"], }, }, }, }, responses: { 200: { description: "ok", }, }, }, }, }, }); const handler = (req: IncomingMessage, res: ServerResponse) => { if (req.url === "/openapi.json") { res.statusCode = 200; res.setHeader("content-type", "application/json"); res.end(openApiDocument); return; } if (req.method === "POST" && req.url === "/records") { const chunks: Array = []; req.on("data", (chunk) => { chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); }); req.on("end", () => { const raw = Buffer.concat(chunks).toString("utf8"); const parsed = JSON.parse(raw) as Record; createdBodies.push(parsed); res.statusCode = 200; res.setHeader("content-type", "application/json"); res.end(JSON.stringify({ ok: true, id: `rec_${createdBodies.length}`, record: parsed, })); }); return; } res.statusCode = 404; res.end("not found"); }; const server = createServer(handler); await new Promise((resolve, reject) => { server.once("error", reject); server.listen(0, "127.0.0.1", () => resolve()); }); const address = server.address(); if (!address || typeof address === "string") { throw new Error("Failed to bind mutating OpenAPI demo server"); } return { baseUrl: `http://127.0.0.1:${address.port}`, specUrl: `http://127.0.0.1:${address.port}/openapi.json`, createdBodies, close: () => new Promise((resolve, reject) => { server.close((error) => (error ? reject(error) : resolve())); }), }; }; const DEMO_OAUTH_ACCESS_TOKEN = "demo-access-token"; const DEMO_OAUTH_REFRESH_TOKEN = "demo-refresh-token"; const startOAuthProtectedMcpServer = async () => { const host = "127.0.0.1"; const app = createMcpExpressApp({ host }); const transports: Record = {}; const servers: Record = {}; const createAuthorizedServer = () => { const server = new McpServer( { name: "executor-oauth-mcp-demo", version: "1.0.0", }, { capabilities: { tools: {}, }, }, ); server.registerTool( "whoami", { description: "Return the active identity for the OAuth demo source.", inputSchema: {}, }, async () => ({ content: [{ type: "text", text: "oauth-demo" }], }), ); return server; }; const listener = await new Promise((resolve, reject) => { const server = app.listen(0, host); const onListening = () => { server.off("error", onError); resolve(server); }; const onError = (error: Error) => { server.off("listening", onListening); reject(error); }; server.once("listening", onListening); server.once("error", onError); }); const address = listener.address(); if (!address || typeof address === "string") { throw new Error("Failed to bind OAuth MCP demo server"); } const baseUrl = `http://${host}:${address.port}`; const endpoint = `${baseUrl}/mcp`; const resourceMetadataUrl = `${baseUrl}/.well-known/oauth-protected-resource/mcp`; const requireBearerAuth = (req: IncomingMessage, res: ServerResponse) => { const authorization = req.headers.authorization; if (authorization === `Bearer ${DEMO_OAUTH_ACCESS_TOKEN}`) { return true; } res.statusCode = 401; res.setHeader( "WWW-Authenticate", `Bearer resource_metadata="${resourceMetadataUrl}"`, ); res.setHeader("content-type", "application/json"); res.end( JSON.stringify({ error: "unauthorized", }), ); return false; }; app.get("/.well-known/oauth-protected-resource/mcp", (_req: any, res: any) => { res.status(200).json({ resource: endpoint, authorization_servers: [baseUrl], scopes_supported: ["openid", "offline_access"], bearer_methods_supported: ["header"], }); }); app.get("/.well-known/oauth-authorization-server", (_req: any, res: any) => { res.status(200).json({ issuer: baseUrl, authorization_endpoint: `${baseUrl}/authorize`, token_endpoint: `${baseUrl}/token`, registration_endpoint: `${baseUrl}/register`, response_types_supported: ["code"], grant_types_supported: ["authorization_code", "refresh_token"], code_challenge_methods_supported: ["S256"], token_endpoint_auth_methods_supported: ["none"], }); }); app.post("/register", (req: any, res: any) => { res.status(201).json({ ...req.body, client_id: `client_${randomUUID()}`, client_id_issued_at: Math.floor(Date.now() / 1000), }); }); app.get("/authorize", (req: any, res: any) => { const redirectUri = new URL(String(req.query.redirect_uri)); redirectUri.searchParams.set("code", "demo-code"); if (typeof req.query.state === "string") { redirectUri.searchParams.set("state", req.query.state); } res.redirect(302, redirectUri.toString()); }); app.post("/token", (_req: any, res: any) => { res.status(200).json({ access_token: DEMO_OAUTH_ACCESS_TOKEN, token_type: "Bearer", expires_in: 3600, refresh_token: DEMO_OAUTH_REFRESH_TOKEN, scope: "openid offline_access", }); }); app.post("/mcp", async (req: any, res: any) => { if (!requireBearerAuth(req, res)) { return; } const sessionIdHeader = req.headers["mcp-session-id"]; const sessionId = typeof sessionIdHeader === "string" ? sessionIdHeader : Array.isArray(sessionIdHeader) ? sessionIdHeader[0] : undefined; try { let transport: StreamableHTTPServerTransport; if (sessionId && transports[sessionId]) { transport = transports[sessionId]; } else { transport = new StreamableHTTPServerTransport({ sessionIdGenerator: () => randomUUID(), onsessioninitialized: (newSessionId) => { transports[newSessionId] = transport; }, }); transport.onclose = () => { const closedSessionId = transport.sessionId; if (closedSessionId && transports[closedSessionId]) { delete transports[closedSessionId]; } if (closedSessionId && servers[closedSessionId]) { void servers[closedSessionId].close().catch(() => undefined); delete servers[closedSessionId]; } }; const server = createAuthorizedServer(); await server.connect(transport); const newSessionId = transport.sessionId; if (newSessionId) { servers[newSessionId] = server; } } await transport.handleRequest(req, res, req.body); } catch (error) { if (!res.headersSent) { res.status(500).json({ jsonrpc: "2.0", error: { code: -32603, message: error instanceof Error ? error.message : "Internal server error", }, id: null, }); } } }); app.get("/mcp", async (req: any, res: any) => { if (!requireBearerAuth(req, res)) { return; } const sessionIdHeader = req.headers["mcp-session-id"]; const sessionId = typeof sessionIdHeader === "string" ? sessionIdHeader : Array.isArray(sessionIdHeader) ? sessionIdHeader[0] : undefined; if (!sessionId || !transports[sessionId]) { res.status(400).send("Invalid or missing session ID"); return; } await transports[sessionId].handleRequest(req, res); }); app.delete("/mcp", async (req: any, res: any) => { if (!requireBearerAuth(req, res)) { return; } const sessionIdHeader = req.headers["mcp-session-id"]; const sessionId = typeof sessionIdHeader === "string" ? sessionIdHeader : Array.isArray(sessionIdHeader) ? sessionIdHeader[0] : undefined; if (!sessionId || !transports[sessionId]) { res.status(400).send("Invalid or missing session ID"); return; } const transport = transports[sessionId]; await transport.handleRequest(req, res, req.body); await transport.close(); delete transports[sessionId]; if (servers[sessionId]) { await servers[sessionId].close().catch(() => undefined); delete servers[sessionId]; } }); return { baseUrl, endpoint, close: async () => { for (const transport of Object.values(transports)) { await transport.close().catch(() => undefined); } for (const server of Object.values(servers)) { await server.close().catch(() => undefined); } await new Promise((resolve, reject) => { listener.close((error) => (error ? reject(error) : resolve())); }); }, }; }; const extractUrlInteractionUrl = (payloadJson: string): string => { const parsed = JSON.parse(payloadJson) as { elicitation?: { url?: string; }; }; const url = parsed.elicitation?.url; if (typeof url !== "string" || url.trim().length === 0) { throw new Error("Missing URL elicitation in payloadJson"); } return url; }; describe("local-executor-server", () => { it.scoped("serves the control-plane OpenAPI spec at /v1/openapi.json", () => Effect.gen(function* () { const server = yield* makeServer; const response = yield* Effect.promise(() => fetch(`${server.baseUrl}/v1/openapi.json`, { headers: { accept: "application/json", }, }), ); const spec = yield* Effect.promise(() => response.json()); expect(response.status).toBe(200); expect(response.headers.get("content-type")).toContain("application/json"); expect(spec).toEqual(controlPlaneOpenApiSpec); }), ); it.scoped("serves the control-plane API and executes code", () => Effect.gen(function* () { const server = yield* makeServer; const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); const execution = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: "return await tools.math.add({ a: 20, b: 22 });", }, }); expect(execution.execution.status).toBe("completed"); expect(execution.execution.resultJson).toBe(JSON.stringify({ sum: 42 })); }), 15_000, ); it.scoped("includes completed MCP return values in text content", () => Effect.gen(function* () { const server = yield* makeServer; const mcp = yield* makeExecutorMcpClient({ baseUrl: server.baseUrl, }); const executed = yield* Effect.promise( () => mcp.client.callTool({ name: "execute", arguments: { code: "return await tools.math.add({ a: 20, b: 22 });", }, }) as Promise<{ content?: Array<{ type?: string; text?: string; }>; structuredContent?: { status?: string; result?: unknown; }; }>, ); expect(executed.structuredContent?.status).toBe("completed"); expect(executed.structuredContent?.result).toEqual({ sum: 42 }); expect(executed.content?.find((item) => item.type === "text")?.text).toContain("Result:"); expect(executed.content?.find((item) => item.type === "text")?.text).toContain('"sum": 42'); }), ); it.scoped("serves only execute over MCP when elicitation is supported", () => Effect.gen(function* () { const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", executionResolver: gatedExecutionResolver, }); const mcp = yield* makeExecutorMcpClient({ baseUrl: server.baseUrl, capabilities: { elicitation: { form: {}, url: {}, }, }, onElicitation: async () => ({ action: "accept", content: { approve: true, }, }), }); const listed = yield* Effect.promise( () => mcp.client.listTools() as Promise<{ tools: Array<{ name: string; description?: string }> }>, ); expect(listed.tools.map((tool) => tool.name)).toEqual(["execute"]); expect(listed.tools[0]?.description).toContain("Workflow:"); expect(listed.tools[0]?.description).toContain("tools.discover"); expect(listed.tools[0]?.description).toContain("tools.executor.sources.add"); const executed = yield* Effect.promise( () => mcp.client.callTool({ name: "execute", arguments: { code: 'return await tools.demo.gated_echo({ value: "from-mcp" });', }, }) as Promise<{ content?: Array<{ type?: string; text?: string; }>; structuredContent?: { status?: string; result?: unknown; }; }>, ); expect(executed.structuredContent?.status).toBe("completed"); expect(executed.structuredContent?.result).toBe("approved:from-mcp"); expect(executed.content?.find((item) => item.type === "text")?.text).toContain("approved:from-mcp"); }), ); it.scoped("serves execute and resume over MCP when elicitation is unavailable", () => Effect.gen(function* () { const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", executionResolver: gatedExecutionResolver, }); const mcp = yield* makeExecutorMcpClient({ baseUrl: server.baseUrl, }); const listed = yield* Effect.promise( () => mcp.client.listTools() as Promise<{ tools: Array<{ name: string; description?: string }> }>, ); expect(listed.tools.map((tool) => tool.name).sort()).toEqual(["execute", "resume"]); expect(listed.tools.find((tool) => tool.name === "execute")?.description).toContain("Workflow:"); expect(listed.tools.find((tool) => tool.name === "execute")?.description).toContain("tools.discover"); const executed = yield* Effect.promise( () => mcp.client.callTool({ name: "execute", arguments: { code: 'return await tools.demo.gated_echo({ value: "manual-mcp" });', }, }) as Promise<{ structuredContent?: { status?: string; interaction?: { message?: string; }; resumePayload?: { executionId?: string; }; }; }>, ); expect(executed.structuredContent?.status).toBe("waiting_for_interaction"); expect(executed.structuredContent?.interaction?.message).toContain("Approve gated echo"); expect(executed.structuredContent?.resumePayload?.executionId).toBeTruthy(); const resumed = yield* Effect.promise( () => mcp.client.callTool({ name: "resume", arguments: { resumePayload: executed.structuredContent?.resumePayload, response: { action: "accept", content: { approve: true, }, }, }, }) as Promise<{ content?: Array<{ type?: string; text?: string; }>; structuredContent?: { status?: string; result?: unknown; }; }>, ); expect(resumed.structuredContent?.status).toBe("completed"); expect(resumed.structuredContent?.result).toBe("approved:manual-mcp"); expect(resumed.content?.find((item) => item.type === "text")?.text).toContain("approved:manual-mcp"); }), 60_000, ); it.scoped("loads MCP sources from control-plane state and resumes elicitation", () => Effect.gen(function* () { const demoServer = yield* Effect.acquireRelease( Effect.promise(() => startMcpElicitationDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); yield* client.sources.create({ path: { workspaceId: installation.workspaceId, }, payload: { name: "Demo", kind: "mcp", endpoint: demoServer.endpoint, status: "connected", enabled: true, namespace: "demo", binding: { transport: "streamable-http", queryParams: null, headers: null, }, auth: { kind: "none", }, }, }); const created = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.demo.gated_echo({ value: "from-daemon" });', interactionMode: "live_form", }, }); expect(created.execution.status).toBe("waiting_for_interaction"); expect(created.pendingInteraction).not.toBeNull(); if (created.pendingInteraction !== null) { expect(created.pendingInteraction.kind).toBe("form"); expect(created.pendingInteraction.payloadJson).toContain("Allow gated_echo?"); } const approved = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: created.execution.id, }, payload: { interactionMode: "live_form", responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(approved.execution.status).toBe("waiting_for_interaction"); expect(approved.pendingInteraction).not.toBeNull(); if (approved.pendingInteraction !== null) { expect(approved.pendingInteraction.kind).toBe("form"); expect(approved.pendingInteraction.payloadJson).toContain("Approve gated echo for from-daemon?"); } const resumed = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: created.execution.id, }, payload: { interactionMode: "live_form", responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(resumed.execution.status).toBe("completed"); expect(resumed.pendingInteraction).toBeNull(); expect(resumed.execution.resultJson).toContain("approved:from-daemon"); }), 15_000, ); it.scoped("can run the same MCP elicitation flow more than once without interaction id collisions", () => Effect.gen(function* () { const demoServer = yield* Effect.acquireRelease( Effect.promise(() => startMcpElicitationDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); yield* client.sources.create({ path: { workspaceId: installation.workspaceId, }, payload: { name: "Demo", kind: "mcp", endpoint: demoServer.endpoint, status: "connected", enabled: true, namespace: "demo", binding: { transport: "streamable-http", queryParams: null, headers: null, }, auth: { kind: "none", }, }, }); for (const value of ["first", "second"]) { const created = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: `return await tools.demo.gated_echo({ value: "${value}" });`, interactionMode: "live_form", }, }); expect(created.execution.status).toBe("waiting_for_interaction"); expect(created.pendingInteraction).not.toBeNull(); const approved = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: created.execution.id, }, payload: { interactionMode: "live_form", responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(approved.execution.status).toBe("waiting_for_interaction"); expect(approved.pendingInteraction).not.toBeNull(); if (approved.pendingInteraction !== null) { expect(approved.pendingInteraction.kind).toBe("form"); expect(approved.pendingInteraction.payloadJson).toContain(`Approve gated echo for ${value}?`); } const resumed = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: created.execution.id, }, payload: { interactionMode: "live_form", responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(resumed.execution.status).toBe("completed"); expect(resumed.execution.resultJson).toContain(`approved:${value}`); } }), 15_000, ); it.scoped("does not create a duplicate when the demo MCP source already exists", () => Effect.gen(function* () { const demoServer = yield* Effect.acquireRelease( Effect.promise(() => startMcpElicitationDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); const existing = yield* client.sources.create({ path: { workspaceId: installation.workspaceId, }, payload: { name: "Demo", kind: "mcp", endpoint: demoServer.endpoint, status: "connected", enabled: true, namespace: "demo", binding: { transport: "streamable-http", queryParams: null, headers: null, }, auth: { kind: "none", }, }, }); const seeded = yield* seedDemoMcpSourceInWorkspace({ client, workspaceId: installation.workspaceId, endpoint: demoServer.endpoint, name: "Demo", namespace: "demo", }); expect(seeded.action).toBe("noop"); expect(seeded.sourceId).toBe(existing.id); const sources = yield* client.sources.list({ path: { workspaceId: installation.workspaceId, }, }); expect(sources).toHaveLength(1); expect(sources[0]?.endpoint).toBe(demoServer.endpoint); }), 15_000, ); it.scoped("loads OpenAPI sources from control-plane state and calls them", () => Effect.gen(function* () { const openApiServer = yield* Effect.acquireRelease( Effect.promise(() => startOpenApiDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const previousGithubToken = process.env.GITHUB_TOKEN; const previousAllowEnvSecrets = process.env.DANGEROUSLY_ALLOW_ENV_SECRETS; yield* Effect.acquireRelease( Effect.sync(() => { process.env.GITHUB_TOKEN = "ghp_test_executor"; process.env.DANGEROUSLY_ALLOW_ENV_SECRETS = "true"; }), () => Effect.sync(() => { if (previousGithubToken === undefined) { delete process.env.GITHUB_TOKEN; } else { process.env.GITHUB_TOKEN = previousGithubToken; } if (previousAllowEnvSecrets === undefined) { delete process.env.DANGEROUSLY_ALLOW_ENV_SECRETS; } else { process.env.DANGEROUSLY_ALLOW_ENV_SECRETS = previousAllowEnvSecrets; } }), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); yield* seedGithubOpenApiSourceInWorkspace({ client, workspaceId: installation.workspaceId, endpoint: openApiServer.baseUrl, specUrl: openApiServer.specUrl, name: "GitHub", namespace: "github", }); const execution = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.github.repos.getRepo({ owner: "vercel", repo: "ai" });', }, }); expect(execution.execution.status).toBe("completed"); expect(execution.pendingInteraction).toBeNull(); expect(execution.execution.resultJson).toContain("\"full_name\":\"vercel/ai\""); expect(openApiServer.seenAuthHeaders).toEqual(["Bearer ghp_test_executor"]); }), ); it.live("adds an OpenAPI source through the API client and calls it end to end", () => Effect.scoped(Effect.gen(function* () { const openApiServer = yield* Effect.acquireRelease( Effect.promise(() => startOpenApiDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const { installation, client } = yield* createApiClientHarness(); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "openapi", name: "GitHub", namespace: "github", endpoint: openApiServer.baseUrl, specUrl: openApiServer.specUrl, auth: { kind: "none", }, }, }); expect(connected.kind).toBe("connected"); if (connected.kind !== "connected") { throw new Error(`Expected connected result, received ${connected.kind}`); } expect(connected.source.status).toBe("connected"); const sources = yield* client.sources.list({ path: { workspaceId: installation.workspaceId, }, }); expect(sources).toHaveLength(1); expect(sources[0]?.namespace).toBe("github"); const execution = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.github.repos.getRepo({ owner: "vercel", repo: "ai" });', }, }); expect(execution.execution.status).toBe("completed"); expect(execution.pendingInteraction).toBeNull(); expect(execution.execution.resultJson).toContain("\"full_name\":\"vercel/ai\""); expect(openApiServer.seenAuthHeaders).toEqual([null]); })), 15_000, ); it.live("adds an MCP source through the API client and calls it end to end", () => Effect.scoped(Effect.gen(function* () { const demoServer = yield* Effect.acquireRelease( Effect.promise(() => startMcpElicitationDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const { installation, client } = yield* createApiClientHarness(); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "mcp", endpoint: demoServer.endpoint, name: "Demo", namespace: "demo", }, }); expect(connected.kind).toBe("connected"); if (connected.kind !== "connected") { throw new Error(`Expected connected result, received ${connected.kind}`); } expect(connected.source.status).toBe("connected"); const execution = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.demo.gated_echo({ value: "from-api-client" });', interactionMode: "live_form", }, }); expect(execution.execution.status).toBe("waiting_for_interaction"); expect(execution.pendingInteraction).not.toBeNull(); if (execution.pendingInteraction === null) { throw new Error("Expected pending MCP interaction"); } expect(execution.pendingInteraction.kind).toBe("form"); expect(execution.pendingInteraction.payloadJson).toContain("Allow gated_echo?"); const approved = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: execution.execution.id, }, payload: { interactionMode: "live_form", responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(approved.execution.status).toBe("waiting_for_interaction"); expect(approved.pendingInteraction).not.toBeNull(); if (approved.pendingInteraction === null) { throw new Error("Expected remote MCP tool interaction"); } expect(approved.pendingInteraction.kind).toBe("form"); expect(approved.pendingInteraction.payloadJson).toContain( "Approve gated echo for from-api-client?", ); const resumed = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: execution.execution.id, }, payload: { interactionMode: "live_form", responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(resumed.execution.status).toBe("completed"); expect(resumed.pendingInteraction).toBeNull(); expect(resumed.execution.resultJson).toContain("approved:from-api-client"); })), 15_000, ); it.live("adds an OAuth-protected MCP source through the API client and resumes after callback", () => Effect.scoped(Effect.gen(function* () { const oauthServer = yield* Effect.acquireRelease( Effect.promise(() => startOAuthProtectedMcpServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const { installation, client } = yield* createApiClientHarness(); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "mcp", endpoint: oauthServer.endpoint, name: "Axiom", namespace: "axiom", }, }); expect(connected.kind).toBe("oauth_required"); if (connected.kind !== "oauth_required") { throw new Error(`Expected oauth_required result, received ${connected.kind}`); } expect(connected.source.status).toBe("auth_required"); const callbackResponse = yield* Effect.promise(() => fetch(connected.authorizationUrl, { redirect: "follow", }), ); assertTrue(callbackResponse.ok); const callbackText = yield* Effect.promise(() => callbackResponse.text()); expect(callbackText).toContain("Source connected:"); const refreshedSource = yield* client.sources.get({ path: { workspaceId: installation.workspaceId, sourceId: connected.source.id, }, }); expect(refreshedSource.name).toBe("Axiom"); expect(refreshedSource.status).toBe("connected"); expect(refreshedSource.auth.kind).toBe("mcp_oauth"); const toolCall = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: "return await tools.axiom.whoami({});", }, }); expect(toolCall.execution.status).toBe("waiting_for_interaction"); expect(toolCall.pendingInteraction).not.toBeNull(); if (toolCall.pendingInteraction === null) { throw new Error("Expected pending tool approval interaction"); } expect(toolCall.pendingInteraction.kind).toBe("form"); expect(toolCall.pendingInteraction.payloadJson).toContain("Allow whoami?"); const approvedToolCall = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: toolCall.execution.id, }, payload: { responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(approvedToolCall.execution.status).toBe("completed"); expect(approvedToolCall.pendingInteraction).toBeNull(); expect(approvedToolCall.execution.resultJson).toContain("oauth-demo"); })), 15_000, ); it.scoped("adds an OAuth-protected MCP source via executor.sources.add and resumes after callback", () => Effect.gen(function* () { const oauthServer = yield* Effect.acquireRelease( Effect.promise(() => startOAuthProtectedMcpServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); const added = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: `return await tools.executor.sources.add({ endpoint: ${JSON.stringify(oauthServer.endpoint)}, name: "Axiom", namespace: "axiom" });`, interactionMode: "live", }, }); expect(added.execution.status).toBe("waiting_for_interaction"); expect(added.pendingInteraction).not.toBeNull(); if (added.pendingInteraction === null) { throw new Error("Expected pending OAuth interaction"); } expect(added.pendingInteraction.kind).toBe("url"); const authorizationUrl = extractUrlInteractionUrl(added.pendingInteraction.payloadJson); const callbackResponse = yield* Effect.promise(() => fetch(authorizationUrl, { redirect: "follow", }), ); assertTrue(callbackResponse.ok); const callbackText = yield* Effect.promise(() => callbackResponse.text()); expect(callbackText).toContain("Source connected:"); const connectedSource = yield* Effect.gen(function* () { while (true) { const sources = yield* client.sources.list({ path: { workspaceId: installation.workspaceId, }, }); const source = sources.find((entry) => entry.namespace === "axiom"); if (source?.status === "connected" && source.auth.kind === "mcp_oauth") { return source; } yield* Effect.sleep("100 millis"); } }); expect(connectedSource.name).toBe("Axiom"); expect(connectedSource.status).toBe("connected"); expect(connectedSource.auth.kind).toBe("mcp_oauth"); const toolCall = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: "return await tools.axiom.whoami({});", }, }); expect(toolCall.execution.status).toBe("waiting_for_interaction"); expect(toolCall.pendingInteraction).not.toBeNull(); if (toolCall.pendingInteraction !== null) { expect(toolCall.pendingInteraction.kind).toBe("form"); expect(toolCall.pendingInteraction.payloadJson).toContain("Allow whoami?"); } const approvedToolCall = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: toolCall.execution.id, }, payload: { responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(approvedToolCall.execution.status).toBe("completed"); expect(approvedToolCall.pendingInteraction).toBeNull(); expect(approvedToolCall.execution.resultJson).toContain("oauth-demo"); }), 15_000, ); it.scoped("gates mutating OpenAPI tools by default and allows them via workspace policy", () => Effect.gen(function* () { const openApiServer = yield* Effect.acquireRelease( Effect.promise(() => startMutatingOpenApiDemoServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); const connected = yield* client.sources.connect({ path: { workspaceId: installation.workspaceId, }, payload: { kind: "openapi", name: "DNS", namespace: "dns", endpoint: openApiServer.baseUrl, specUrl: openApiServer.specUrl, auth: { kind: "none", }, }, }); expect(connected.kind).toBe("connected"); const gated = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.dns.records.createRecord({ body: { type: "TXT", name: "", value: "hello world" } });', }, }); expect(gated.execution.status).toBe("waiting_for_interaction"); expect(gated.pendingInteraction).not.toBeNull(); if (gated.pendingInteraction === null) { throw new Error("Expected pending approval interaction"); } expect(gated.pendingInteraction.kind).toBe("form"); expect(gated.pendingInteraction.payloadJson).toContain("Allow Create a DNS record?"); expect(gated.pendingInteraction.payloadJson).toContain("\"approve\""); const approved = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: gated.execution.id, }, payload: { responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(approved.execution.status).toBe("completed"); expect(openApiServer.createdBodies).toHaveLength(1); const policy = yield* client.policies.create({ path: { workspaceId: installation.workspaceId, }, payload: { resourcePattern: "dns.records.createRecord", effect: "allow", approvalMode: "auto", }, }); expect(policy.key).toBe("dns.records.createRecord"); const automatic = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.dns.records.createRecord({ body: { type: "TXT", name: "", value: "hello again" } });', }, }); expect(automatic.execution.status).toBe("completed"); expect(automatic.pendingInteraction).toBeNull(); expect(openApiServer.createdBodies).toHaveLength(2); }), 15_000, ); it.scoped("starts source OAuth without creating a source and stores secrets on callback", () => Effect.gen(function* () { const oauthServer = yield* Effect.acquireRelease( Effect.promise(() => startOAuthProtectedMcpServer()), (server) => Effect.promise(() => server.close()).pipe(Effect.orDie), ); const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const startResponse = yield* Effect.promise(() => fetch(`${server.baseUrl}/v1/workspaces/${encodeURIComponent(installation.workspaceId)}/oauth/source-auth/start`, { method: "POST", headers: { "content-type": "application/json", "x-executor-account-id": installation.accountId, }, body: JSON.stringify({ provider: "mcp", endpoint: oauthServer.endpoint, transport: "auto", }), }), ); assertTrue(startResponse.ok); const started: { sessionId: string; authorizationUrl: string; } = yield* Effect.promise(() => startResponse.json() as Promise<{ sessionId: string; authorizationUrl: string; }>); expect(started.sessionId).toBeTruthy(); expect(started.authorizationUrl).toBeDefined(); const callbackResponse = yield* Effect.promise(() => fetch(started.authorizationUrl, { redirect: "follow", }), ); assertTrue(callbackResponse.ok); const callbackHtml = yield* Effect.promise(() => callbackResponse.text()); expect(callbackHtml).toContain("OAuth connected"); expect(callbackHtml).toContain("executor:oauth-result"); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); const sources = yield* client.sources.list({ path: { workspaceId: installation.workspaceId, }, }); expect(sources).toHaveLength(0); const secrets = yield* bootstrapClient.local.listSecrets({}); expect(secrets.some((secret) => secret.purpose === "oauth_access_token")).toBe(true); expect(secrets.some((secret) => secret.purpose === "oauth_refresh_token")).toBe(true); }), 15_000, ); it.scoped("marks execution failed when a configured MCP endpoint is invalid", () => Effect.gen(function* () { const server = yield* createIsolatedLocalExecutorServer({ port: 0, localDataDir: ":memory:", }); const bootstrapClient = yield* createControlPlaneClient({ baseUrl: server.baseUrl, }); const installation = yield* bootstrapClient.local.installation({}); const client = yield* createControlPlaneClient({ baseUrl: server.baseUrl, accountId: installation.accountId, }); yield* writeConfiguredLocalMcpSource({ workspaceRoot: server.workspaceRoot, sourceId: "demo", endpoint: "http://127.0.0.1:PORT/mcp", name: "Demo", namespace: "demo", }); const execution = yield* client.executions.create({ path: { workspaceId: installation.workspaceId, }, payload: { code: 'return await tools.demo.gated_echo({ value: "broken" });', }, }); expect(execution.execution.status).toBe("waiting_for_interaction"); expect(execution.pendingInteraction).not.toBeNull(); if (execution.pendingInteraction !== null) { expect(execution.pendingInteraction.kind).toBe("form"); expect(execution.pendingInteraction.payloadJson).toContain("Allow gated_echo?"); } const resumed = yield* client.executions.resume({ path: { workspaceId: installation.workspaceId, executionId: execution.execution.id, }, payload: { responseJson: JSON.stringify({ action: "accept", content: { approve: true, }, }), }, }); expect(resumed.execution.status).toBe("failed"); expect(resumed.pendingInteraction).toBeNull(); expect(resumed.execution.errorText).toMatch(/Invalid URL|Failed connecting to MCP server/); }), ); }); ================================================ FILE: apps/executor/tsconfig.json ================================================ { "compilerOptions": { "target": "ES2022", "module": "ESNext", "moduleResolution": "Bundler", "strict": true, "skipLibCheck": true, "noEmit": true, "lib": ["ES2022"], "types": ["bun-types", "node"] }, "include": [ "src/**/*.ts", "src/**/*.d.ts" ] } ================================================ FILE: apps/web/CHANGELOG.md ================================================ # @executor/web ## null ### Patch Changes - @executor/react@null - @executor/server@null ================================================ FILE: apps/web/package.json ================================================ { "name": "@executor/web", "private": true, "type": "module", "scripts": { "dev": "bunx --bun vite dev", "build": "bunx vite build --config vite.config.ts", "typecheck": "bunx tsc --noEmit -p tsconfig.json", "start": "NODE_ENV=production bun src/server.ts" }, "dependencies": { "@executor/react": "workspace:*", "@executor/server": "workspace:*", "@shikijs/langs": "^4.0.1", "@shikijs/themes": "^4.0.1", "@tanstack/react-router": "^1.132.7", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "effect": "catalog:", "react": "^19.2.0", "react-dom": "^19.2.0", "react-grab": "^0.1.26", "scheduler": "^0.27.0", "shiki": "^4.0.1", "streamdown": "^2.3.0", "tailwind-merge": "^3.5.0", "tldts": "^7.0.23" }, "devDependencies": { "@hono/vite-dev-server": "^0.25.0", "@tailwindcss/vite": "^4.2.1", "@types/react": "^19.2.10", "@types/react-dom": "^19.2.3", "@vitejs/plugin-react": "^5.1.4", "bun-types": "catalog:", "tailwindcss": "^4.2.1", "tw-animate-css": "^1.4.0", "vite": "^7.3.1" }, "version": "0.0.0" } ================================================ FILE: apps/web/src/components/code-block.tsx ================================================ import { useEffect, useState, useRef, useCallback } from "react"; import { codeToHtml, resolveLang } from "../lib/shiki"; import { cn } from "../lib/utils"; import { IconCopy, IconCheck } from "./icons"; // Lazily loaded, cached across renders const highlightCache = new Map(); function cacheKey(code: string, lang: string) { return `${lang}::${code.length}::${code.slice(0, 64)}`; } function detectLanguage(code: string, hint?: string): string { if (hint) return resolveLang(hint) ?? "json"; const trimmed = code.trimStart(); if (trimmed.startsWith("{") || trimmed.startsWith("[")) return "json"; if (trimmed.startsWith("<")) return "xml"; if (trimmed.startsWith("---")) return "yaml"; return "json"; } export function CodeBlock(props: { code: string; lang?: string; className?: string; }) { const { code, lang: langHint, className } = props; const [html, setHtml] = useState(null); const [copied, setCopied] = useState(false); const mountedRef = useRef(true); const language = detectLanguage(code, langHint); const key = cacheKey(code, language); useEffect(() => { mountedRef.current = true; return () => { mountedRef.current = false; }; }, []); useEffect(() => { const cached = highlightCache.get(key); if (cached) { setHtml(cached); return; } let cancelled = false; codeToHtml(code, { lang: language }).then((result) => { if (cancelled) return; highlightCache.set(key, result); if (mountedRef.current) setHtml(result); }); return () => { cancelled = true; }; }, [code, key, language]); const handleCopy = useCallback(() => { void navigator.clipboard.writeText(code).then(() => { setCopied(true); setTimeout(() => setCopied(false), 1500); }); }, [code]); return (
{/* Copy button */} {html ? (
) : ( // Fallback while shiki loads
          {code}
        
)}
); } ================================================ FILE: apps/web/src/components/document-panel.tsx ================================================ import { cn } from "../lib/utils"; import { CodeBlock } from "./code-block"; export function DocumentPanel(props: { title: string; body: string | null; lang?: string; empty: string; compact?: boolean; }) { return (
{props.title}
{props.body ? ( ) : (
{props.empty}
)}
); } ================================================ FILE: apps/web/src/components/icons.tsx ================================================ import type { SVGProps } from "react"; import { cn } from "../lib/utils"; type IconProps = SVGProps & { className?: string }; export function IconSources({ className, ...props }: IconProps) { return ( ); } export function IconTool({ className, ...props }: IconProps) { return ( ); } export function IconSearch({ className, ...props }: IconProps) { return ( ); } export function IconChevron({ className, ...props }: IconProps) { return ( ); } export function IconFolder({ className, ...props }: IconProps) { return ( ); } export function IconDocument({ className, ...props }: IconProps) { return ( ); } export function IconDiscover({ className, ...props }: IconProps) { return ( ); } export function IconCopy({ className, ...props }: IconProps) { return ( ); } export function IconCheck({ className, ...props }: IconProps) { return ( ); } export function IconClose({ className, ...props }: IconProps) { return ( ); } export function IconEmpty({ className, ...props }: IconProps) { return ( ); } export function IconSpinner({ className, ...props }: IconProps) { return ( ); } export function IconInfo({ className, ...props }: IconProps) { return ( ); } export function IconPlus({ className, ...props }: IconProps) { return ( ); } export function IconPencil({ className, ...props }: IconProps) { return ( ); } export function IconTrash({ className, ...props }: IconProps) { return ( ); } export function IconArrowLeft({ className, ...props }: IconProps) { return ( ); } ================================================ FILE: apps/web/src/components/loadable.tsx ================================================ import type { ReactNode } from "react"; import type { Loadable } from "@executor/react"; import { IconSpinner, IconEmpty } from "./icons"; export function LoadableBlock(props: { loadable: Loadable; loading?: string; children: (data: T) => ReactNode; }) { if (props.loadable.status === "loading") { return (
{props.loading ?? "Loading..."}
); } if (props.loadable.status === "error") { return (
{props.loadable.error.message}
); } return <>{props.children(props.loadable.data)}; } export function EmptyState(props: { title: string; description?: string; className?: string; }) { return (

{props.title}

{props.description && (

{props.description}

)}
); } ================================================ FILE: apps/web/src/components/local-mcp-install-card.tsx ================================================ import { useEffect, useMemo, useState } from "react"; import { CodeBlock } from "./code-block"; export function LocalMcpInstallCard(props: { title?: string; description?: string; className?: string; }) { const [origin, setOrigin] = useState(null); useEffect(() => { setOrigin(window.location.origin); }, []); const command = useMemo( () => origin ? `npx add-mcp "${origin}/mcp" --transport http --name "executor"` : 'npx add-mcp "/mcp" --transport http --name "executor"', [origin], ); return (

{props.title ?? "Install local MCP"}

{props.description ?? "Add this local executor server to an MCP client with one command. The URL uses the same origin as this web app."}

); } ================================================ FILE: apps/web/src/components/markdown.tsx ================================================ import { Streamdown } from "streamdown"; import { createLimitedCodePlugin } from "../lib/shiki"; const codePlugin = createLimitedCodePlugin(); const PROSE_CLASSES = [ "text-[13px] leading-relaxed text-muted-foreground", // paragraphs "[&_p]:mb-[0.4em] [&_p:last-child]:mb-0", // bold "[&_strong]:text-foreground [&_strong]:font-semibold", "[&_b]:text-foreground [&_b]:font-semibold", // inline code "[&_code]:font-mono [&_code]:text-xs [&_code]:bg-muted [&_code]:border [&_code]:border-border", "[&_code]:rounded-sm [&_code]:px-1.5 [&_code]:py-px [&_code]:text-primary", // pre blocks "[&_pre]:bg-muted [&_pre]:border [&_pre]:border-border [&_pre]:rounded-md", "[&_pre]:px-3 [&_pre]:py-2 [&_pre]:overflow-x-auto [&_pre]:my-2 [&_pre]:text-xs [&_pre]:leading-relaxed", "[&_pre_code]:bg-transparent [&_pre_code]:border-0 [&_pre_code]:p-0 [&_pre_code]:text-inherit", // links "[&_a]:text-primary [&_a]:underline [&_a]:underline-offset-2", "[&_a]:decoration-primary/30 hover:[&_a]:decoration-primary/80", // lists "[&_ul]:pl-5 [&_ul]:my-1.5 [&_ol]:pl-5 [&_ol]:my-1.5", "[&_li]:mb-0.5 [&_li_::marker]:text-muted-foreground", // tables "[&_table]:w-full [&_table]:border-collapse [&_table]:text-xs [&_table]:my-2", "[&_th]:border [&_th]:border-border [&_th]:px-2 [&_th]:py-1 [&_th]:text-left [&_th]:bg-muted [&_th]:font-semibold [&_th]:text-foreground", "[&_td]:border [&_td]:border-border [&_td]:px-2 [&_td]:py-1 [&_td]:text-left [&_td]:bg-background", // headings "[&_h1]:font-semibold [&_h1]:text-foreground [&_h1]:mt-2 [&_h1]:mb-1 [&_h1]:text-[15px]", "[&_h2]:font-semibold [&_h2]:text-foreground [&_h2]:mt-2 [&_h2]:mb-1 [&_h2]:text-sm", "[&_h3]:font-semibold [&_h3]:text-foreground [&_h3]:mt-2 [&_h3]:mb-1 [&_h3]:text-[13px]", "[&_h4]:font-semibold [&_h4]:text-foreground [&_h4]:mt-2 [&_h4]:mb-1 [&_h4]:text-[13px]", // blockquote "[&_blockquote]:border-l-2 [&_blockquote]:border-border [&_blockquote]:pl-3 [&_blockquote]:my-1.5 [&_blockquote]:text-muted-foreground", // hr "[&_hr]:border-0 [&_hr]:border-t [&_hr]:border-border [&_hr]:my-2", // images "[&_img]:max-w-full [&_img]:rounded", ].join(" "); export function Markdown(props: { children: string; className?: string }) { return (
{props.children}
); } ================================================ FILE: apps/web/src/components/shell.tsx ================================================ import { Link, Outlet, useMatchRoute } from "@tanstack/react-router"; import { useCallback, useEffect, useState } from "react"; import { useSources, type Source } from "@executor/react"; import { cn } from "../lib/utils"; import { IconPlus, IconCopy, IconCheck } from "./icons"; import { LoadableBlock } from "./loadable"; import { SourceFavicon } from "./source-favicon"; // ── Status dot color ───────────────────────────────────────────────────── const statusColor: Record = { connected: "bg-primary", probing: "bg-amber-400", draft: "bg-muted-foreground/30", auth_required: "bg-amber-500", error: "bg-destructive", }; type AppMetaEnv = { readonly VITE_APP_VERSION: string; readonly VITE_GITHUB_URL: string; }; const { VITE_APP_VERSION, VITE_GITHUB_URL } = (import.meta as ImportMeta & { readonly env: AppMetaEnv; }).env; type UpdateChannel = "latest" | "beta"; type ParsedVersion = { readonly major: number; readonly minor: number; readonly patch: number; readonly prerelease: ReadonlyArray | null; }; const semverPattern = /^(?\d+)\.(?\d+)\.(?\d+)(?:-(?[0-9A-Za-z.-]+))?(?:\+[0-9A-Za-z.-]+)?$/; const resolveUpdateChannel = (version: string): UpdateChannel => version.includes("-beta.") ? "beta" : "latest"; const parseVersion = (version: string): ParsedVersion | null => { const match = version.trim().match(semverPattern); if (!match?.groups) { return null; } return { major: Number(match.groups.major), minor: Number(match.groups.minor), patch: Number(match.groups.patch), prerelease: match.groups.prerelease ? match.groups.prerelease.split(".").map((identifier) => /^\d+$/.test(identifier) ? Number(identifier) : identifier, ) : null, }; }; const comparePrereleaseIdentifiers = ( left: ReadonlyArray | null, right: ReadonlyArray | null, ): number => { if (left === null && right === null) { return 0; } if (left === null) { return 1; } if (right === null) { return -1; } const length = Math.max(left.length, right.length); for (let index = 0; index < length; index += 1) { const leftIdentifier = left[index]; const rightIdentifier = right[index]; if (leftIdentifier === rightIdentifier) { continue; } if (leftIdentifier === undefined) { return -1; } if (rightIdentifier === undefined) { return 1; } if (typeof leftIdentifier === "number" && typeof rightIdentifier === "number") { return leftIdentifier < rightIdentifier ? -1 : 1; } if (typeof leftIdentifier === "number") { return -1; } if (typeof rightIdentifier === "number") { return 1; } return leftIdentifier < rightIdentifier ? -1 : 1; } return 0; }; const compareVersions = (left: string, right: string): number | null => { const leftVersion = parseVersion(left); const rightVersion = parseVersion(right); if (!leftVersion || !rightVersion) { return null; } if (leftVersion.major !== rightVersion.major) { return leftVersion.major < rightVersion.major ? -1 : 1; } if (leftVersion.minor !== rightVersion.minor) { return leftVersion.minor < rightVersion.minor ? -1 : 1; } if (leftVersion.patch !== rightVersion.patch) { return leftVersion.patch < rightVersion.patch ? -1 : 1; } return comparePrereleaseIdentifiers(leftVersion.prerelease, rightVersion.prerelease); }; // ── useLatestVersion ───────────────────────────────────────────────────── function useLatestVersion(currentVersion: string) { const channel = resolveUpdateChannel(currentVersion); const [latestVersion, setLatestVersion] = useState(null); useEffect(() => { let cancelled = false; fetch("https://registry.npmjs.org/-/package/executor/dist-tags") .then((res) => res.json()) .then((data: Partial>) => { if (!cancelled) { setLatestVersion(data[channel] ?? null); } }) .catch(() => {}); return () => { cancelled = true; }; }, [channel]); const updateAvailable = latestVersion !== null && compareVersions(currentVersion, latestVersion) === -1; return { latestVersion, updateAvailable, channel }; } // ── UpdateCard ─────────────────────────────────────────────────────────── function UpdateCard(props: { latestVersion: string; channel: UpdateChannel }) { const command = `npm i -g executor@${props.channel}`; const [copied, setCopied] = useState(false); const handleCopy = useCallback(() => { void navigator.clipboard.writeText(command).then(() => { setCopied(true); setTimeout(() => setCopied(false), 1500); }); }, [command]); return (

Update available

v{props.latestVersion}

); } // ── AppShell ───────────────────────────────────────────────────────────── export function AppShell() { const sources = useSources(); const matchRoute = useMatchRoute(); const isHome = matchRoute({ to: "/" }); const isSecrets = matchRoute({ to: "/secrets" }); const { latestVersion, updateAvailable, channel } = useLatestVersion(VITE_APP_VERSION); return (
{/* Sidebar */} {/* Main content */}
); } // ── SourceItem ─────────────────────────────────────────────────────────── function SourceItem(props: { source: Source; matchRoute: ReturnType; }) { const { source, matchRoute } = props; const active = matchRoute({ to: "/sources/$sourceId", params: { sourceId: source.id }, fuzzy: true, }); return (
{source.name} ); } // ── NavItem ────────────────────────────────────────────────────────────── function NavItem(props: { to: string; label: string; active: boolean }) { return ( {props.label} ); } ================================================ FILE: apps/web/src/components/source-favicon.tsx ================================================ import { useMemo, useState } from "react"; import type { Source } from "@executor/react"; import { getGoogleProductIconUrl, getSourceFaviconUrl } from "../lib/source-favicon"; import { cn } from "../lib/utils"; type SourceKind = Source["kind"] | string; export function SourceFavicon({ endpoint, kind, className, size = 16, }: { endpoint?: string | null; kind: SourceKind; className?: string; size?: number; }) { const faviconUrl = useMemo(() => { // For google_discovery, prefer the real product icon if (kind === "google_discovery") { return getGoogleProductIconUrl(endpoint) ?? getSourceFaviconUrl(endpoint); } return getSourceFaviconUrl(endpoint); }, [endpoint, kind]); const [failedUrl, setFailedUrl] = useState(null); const isFailed = Boolean(faviconUrl && failedUrl === faviconUrl); if (!faviconUrl || isFailed) { return ; } return ( setFailedUrl(faviconUrl)} /> ); } export function DefaultSourceIcon({ kind, className, }: { kind: SourceKind; className?: string; }) { const base = cn("shrink-0", className); switch (kind) { case "mcp": return ( ); case "graphql": return ( ); case "openapi": return ( ); case "google_discovery": return ( ); case "internal": return ( ); default: return ( ); } } ================================================ FILE: apps/web/src/components/source-not-found-state.tsx ================================================ import { Link } from "@tanstack/react-router"; import { IconEmpty } from "./icons"; import { cn } from "../lib/utils"; export function SourceNotFoundState() { return (

Source not found

This source no longer exists in the current workspace.

Back to dashboard Add source
); } ================================================ FILE: apps/web/src/components/source-recovery-state.tsx ================================================ import type { Source } from "@executor/react"; import { cn } from "../lib/utils"; import { Button } from "./ui/button"; import { Badge } from "./ui/badge"; import { IconSpinner, IconTool } from "./icons"; export function SourceRecoveryState(props: { source: Source; title: string; description: string; refreshLabel?: string; refreshTitle?: string; refreshDisabled?: boolean; refreshPending?: boolean; feedback?: { tone: "success" | "error"; text: string; } | null; onRefresh: () => void; }) { return (

{props.title}

{props.source.name} {props.source.kind}

{props.description}

{props.feedback && (

{props.feedback.text}

)}
); } ================================================ FILE: apps/web/src/components/ui/badge.tsx ================================================ import type { HTMLAttributes } from "react"; import { cva, type VariantProps } from "class-variance-authority"; import { cn } from "../../lib/utils"; const badgeVariants = cva( "inline-flex items-center rounded-full border px-2 py-0.5 text-[10px] font-medium tracking-wide uppercase", { variants: { variant: { default: "border-transparent bg-primary/15 text-primary", secondary: "border-transparent bg-secondary text-secondary-foreground", outline: "border-border text-muted-foreground", muted: "border-transparent bg-muted text-muted-foreground", get: "border-[var(--method-get)]/20 bg-[var(--method-get)]/10 text-[var(--method-get)]", post: "border-[var(--method-post)]/20 bg-[var(--method-post)]/10 text-[var(--method-post)]", put: "border-[var(--method-put)]/20 bg-[var(--method-put)]/10 text-[var(--method-put)]", delete: "border-[var(--method-delete)]/20 bg-[var(--method-delete)]/10 text-[var(--method-delete)]", destructive: "border-destructive/20 bg-destructive/10 text-destructive", }, }, defaultVariants: { variant: "default", }, }, ); export type BadgeProps = HTMLAttributes & VariantProps; export function Badge({ className, variant, ...props }: BadgeProps) { return ; } export function MethodBadge({ method }: { method: string }) { const variant = ({ GET: "get", POST: "post", PUT: "put", PATCH: "put", DELETE: "delete", } as const)[method.toUpperCase()] ?? "outline"; return {method}; } ================================================ FILE: apps/web/src/components/ui/button.tsx ================================================ import type { ButtonHTMLAttributes } from "react"; import { cva, type VariantProps } from "class-variance-authority"; import { cn } from "../../lib/utils"; const buttonVariants = cva( "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 outline-none focus-visible:ring-2 focus-visible:ring-ring/50 focus-visible:ring-offset-1 focus-visible:ring-offset-background cursor-pointer", { variants: { variant: { default: "bg-primary text-primary-foreground hover:bg-primary/90", secondary: "bg-secondary text-secondary-foreground hover:bg-secondary/80", outline: "border border-input bg-transparent hover:bg-accent hover:text-accent-foreground", ghost: "hover:bg-accent hover:text-accent-foreground", destructive: "bg-destructive text-white hover:bg-destructive/90", }, size: { default: "h-9 px-4 py-2", sm: "h-7 rounded-md px-2.5 text-xs", lg: "h-10 rounded-md px-6", icon: "h-8 w-8", }, }, defaultVariants: { variant: "default", size: "default", }, }, ); type ButtonProps = ButtonHTMLAttributes & VariantProps; export function Button({ className, variant, size, ...props }: ButtonProps) { return
{isDiscovering && (
)}
{/* Probe auth toggle */}
{showProbeAuth && (
setProbeField("kind", v as ProbeAuthKind) } options={probeAuthOptions.map((v) => ({ value: v, label: v, }))} /> {probeAuth.kind === "bearer" && (
setProbeField("headerName", v)} placeholder="Authorization" /> setProbeField("prefix", v)} placeholder="Bearer " /> setProbeField("token", v)} placeholder="sk-..." mono />
)} {probeAuth.kind === "basic" && (
setProbeField("username", v)} placeholder="user" /> setProbeField("password", v)} placeholder="pass" />
)} {probeAuth.kind === "headers" && ( setProbeField("headersText", v)} placeholder={'{\n "x-api-key": "..."\n}'} /> )}
)} {/* Skip discovery link */}
{/* ---- Template catalogue ---- */}

Start from a template

Pick a known source to skip discovery, or select multiple Google APIs for batch connect.

{/* -- Popular / non-Google templates -- */} {(() => { const popularTemplates = sourceTemplates.filter( (t) => t.groupId !== "google_workspace", ); return ( popularTemplates.length > 0 && (

Popular

{popularTemplates.map((template) => ( ))}
) ); })()} {/* -- Google Workspace batch section -- */}

Google Workspace

{googleTemplates.length > 0 && (
{selectedGoogleTemplateIds.length > 0 && ( )}
)}
{googleTemplates.map((template) => { const selected = selectedGoogleTemplateIds.includes( template.id, ); return ( ); })}
{/* -- Batch connect panel (slides in when templates selected) -- */} {selectedGoogleTemplateIds.length > 0 && (
{/* Header strip */}
{selectedGoogleTemplateIds.length}

Connect Google API {selectedGoogleTemplateIds.length === 1 ? "" : "s"}

One consent screen for all

{/* Body */}
{connectForm.workspaceOauthClientId.trim().length === 0 && (
setFormField("oauthClientId", v) } placeholder="1234567890-abcdef.apps.googleusercontent.com" /> setFormField("oauthClientSecret", v) } placeholder="GOCSPX-..." />
)}
)}
)} {/* Batch connecting interstitial */} {batchConnecting && phase === "connecting" && (

Connecting {selectedGoogleTemplateIds.length} source {selectedGoogleTemplateIds.length === 1 ? "" : "s"}

Setting up Google Workspace APIs\u2026

)} {/* Step 2-4: Editing / Connecting */} {(phase === "editing" || (phase === "connecting" && !batchConnecting) || phase === "credential_required") && (
setFormField("name", v)} placeholder="My API" /> setSourceKind(v as ConnectFormState["kind"]) } options={kindOptions.map((v) => ({ value: v, label: v }))} /> {connectForm.kind !== "google_discovery" && !( connectForm.kind === "mcp" && connectForm.transport === "stdio" ) && ( setFormField("endpoint", v)} placeholder="https://api.example.com" mono /> )} setFormField("namespace", v)} placeholder="example" /> {connectForm.kind === "openapi" && ( setFormField("specUrl", v)} placeholder="https://example.com/openapi.yaml" mono /> )} {connectForm.kind === "google_discovery" && ( <> setFormField("service", v)} placeholder="sheets" /> setFormField("version", v)} placeholder="v4" /> setFormField("discoveryUrl", v)} placeholder="https://www.googleapis.com/discovery/v1/apis/sheets/v4/rest" mono />
Common Google API versions: Gmail `v1`, Sheets `v4`, Drive `v3`, Calendar `v3`, Docs `v1`.
)}
{/* MCP Transport */} {connectForm.kind === "mcp" && (
setTransport(v as McpTransportValue)} options={transportOptions.map((v) => ({ value: v, label: v, }))} /> {connectForm.transport === "stdio" ? (
setStdioTransportField("command", v)} placeholder="npx" mono /> setStdioTransportField("cwd", v)} placeholder="/path/to/project" mono /> setStdioTransportField("argsText", v) } placeholder={ '[\n "-y",\n "chrome-devtools-mcp@latest"\n]' } /> setStdioTransportField("envText", v)} placeholder={ '{\n "CHROME_PATH": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"\n}' } />
) : (
setRemoteTransportField("queryParamsText", v) } placeholder={'{\n "workspace": "demo"\n}'} /> setRemoteTransportField("headersText", v) } placeholder={'{\n "x-api-key": "..."\n}'} />
)}
)} {/* Auth section (for non-MCP kinds, or when credential_required) */} {phase === "credential_required" && (

Credentials required

The server responded that this source requires authentication. Add a token or secret below, then try connecting again.

)} {connectForm.kind !== "mcp" && (
setFormField( "authKind", v as ConnectFormState["authKind"], ) } options={authOptions.map((v) => ({ value: v, label: v }))} /> {connectForm.kind === "google_discovery" && (
{connectForm.authKind === "oauth2" ? "OAuth starts after you click Connect. Choose an existing workspace OAuth client or enter a new client once, then reuse it across Google sources." : "Choosing auth mode 'none' skips the Google OAuth popup. Use 'oauth2' if you want executor to start the sign-in flow for you."}
)} {connectForm.kind === "google_discovery" && connectForm.authKind === "oauth2" && ( <> setFormField("workspaceOauthClientId", value) } options={[ { value: "", label: "Enter new client" }, ...(workspaceOauthClients.status === "ready" ? workspaceOauthClients.data.map((client) => ({ value: client.id, label: client.label ?? client.clientId, })) : []), ]} /> {connectForm.workspaceOauthClientId.trim().length === 0 && ( <> setFormField("oauthClientId", v) } placeholder="1234567890-abcdef.apps.googleusercontent.com" /> setFormField("oauthClientSecret", v) } placeholder="GOCSPX-..." /> )} )} {connectForm.authKind !== "none" && ( <> setFormField("authHeaderName", v)} placeholder="Authorization" /> setFormField("authPrefix", v)} placeholder="Bearer " /> )} {connectForm.authKind === "bearer" && ( { setFormField("bearerProviderId", providerId); setFormField("bearerHandle", handle); setFormField("bearerToken", ""); }} onChangeToken={(token) => { setFormField("bearerToken", token); setFormField("bearerProviderId", ""); setFormField("bearerHandle", ""); }} /> )}
)} {/* Actions */}
{phase === "credential_required" && ( )}
)} {/* Step 5a: OAuth required */} {phase === "oauth_required" && (oauthInfo || batchOauthInfo) && (
{/* Visual anchor */}
{oauthBusy ? ( ) : ( )}

{oauthBusy ? "Waiting for sign-in\u2026" : "Sign in to continue"}

{oauthInfo ? ( <> {oauthInfo.source.name} {" "} requires OAuth authentication. A popup will open for you to authorize access. ) : ( <> {batchOauthInfo!.sourceIds.length} Google source {batchOauthInfo!.sourceIds.length === 1 ? "" : "s"} need {batchOauthInfo!.sourceIds.length === 1 ? "s" : ""} OAuth. One consent screen covers all selected APIs. )}

{!oauthBusy && ( )}
)} {/* Step 5b: Connected */} {phase === "connected" && (

{connectResult ? connectResult.source.name : batchOauthInfo ? `${batchOauthInfo.sourceIds.length} source${batchOauthInfo.sourceIds.length === 1 ? "" : "s"}` : "Source"}{" "} connected

Redirecting\u2026

)} ); } // --------------------------------------------------------------------------- // Form building blocks // --------------------------------------------------------------------------- function Section(props: { title: string; children: ReactNode; className?: string; }) { return (

{props.title}

{props.children}
); } function Field(props: { label: string; className?: string; children: ReactNode; }) { return ( ); } function TextInput(props: { type?: "text" | "password"; value: string; onChange: (value: string) => void; placeholder?: string; mono?: boolean; }) { return ( props.onChange(event.target.value)} placeholder={props.placeholder} className={cn( "h-9 w-full rounded-lg border border-input bg-background px-3 text-[13px] text-foreground outline-none transition-colors placeholder:text-muted-foreground/35 focus:border-ring focus:ring-1 focus:ring-ring/25", props.mono && "font-mono text-[12px]", )} /> ); } function SelectInput(props: { value: string; onChange: (value: string) => void; options: ReadonlyArray<{ value: string; label: string }>; }) { return ( ); } function CodeEditor(props: { value: string; onChange: (value: string) => void; placeholder?: string; }) { return (