Repository: daytonaio/daytona Branch: main Commit: 0d0f3f921cd6 Files: 3949 Total size: 27.7 MB Directory structure: gitextract_07rnwxyw/ ├── .devcontainer/ │ ├── Dockerfile │ ├── buildkitd.toml │ ├── devcontainer.build.json │ ├── devcontainer.json │ ├── dex/ │ │ └── config.yaml │ ├── docker-compose.yaml │ ├── otel/ │ │ └── otel-collector-config.yaml │ ├── pgadmin4/ │ │ ├── pgpass │ │ └── servers.json │ └── tools-feature/ │ ├── devcontainer-feature.json │ └── install.sh ├── .dockerignore ├── .editorconfig ├── .gitattributes ├── .github/ │ ├── CODEOWNERS │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ └── feature_request.md │ ├── actions/ │ │ └── setup-toolchain/ │ │ └── action.yml │ ├── pull_request_template.md │ └── workflows/ │ ├── build_devcontainer.yaml │ ├── default_image_publish.yaml │ ├── pr_checks.yaml │ ├── prepare-release.yaml │ ├── release.yaml │ ├── sdk_publish.yaml │ └── translate.yaml ├── .gitignore ├── .golangci.yaml ├── .husky/ │ ├── .gitignore │ └── pre-commit ├── .licenserc-clients.yaml ├── .licenserc.yaml ├── .markdownlint-cli2.jsonc ├── .npmrc ├── .nxignore ├── .prettierignore ├── .prettierrc ├── .rubocop.yml ├── .verdaccio/ │ └── config.yml ├── .vscode/ │ ├── launch.json │ ├── settings.json │ └── tasks.json ├── .yarnrc.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── COPYRIGHT ├── Gemfile ├── Gemfile.lock ├── LICENSE ├── NOTICE ├── PACKAGING.md ├── PREPARING_YOUR_CHANGES.md ├── PUBLISHING.md ├── README.md ├── SECURITY.md ├── apps/ │ ├── api/ │ │ ├── Dockerfile │ │ ├── eslint.config.mjs │ │ ├── jest.config.ts │ │ ├── project.json │ │ ├── scripts/ │ │ │ └── validate-migration-paths.sh │ │ ├── src/ │ │ │ ├── admin/ │ │ │ │ ├── admin.module.ts │ │ │ │ ├── controllers/ │ │ │ │ │ ├── runner.controller.ts │ │ │ │ │ └── sandbox.controller.ts │ │ │ │ └── dto/ │ │ │ │ └── create-runner.dto.ts │ │ │ ├── analytics/ │ │ │ │ ├── analytics.module.ts │ │ │ │ └── services/ │ │ │ │ └── analytics.service.ts │ │ │ ├── api-key/ │ │ │ │ ├── api-key.controller.ts │ │ │ │ ├── api-key.entity.ts │ │ │ │ ├── api-key.module.ts │ │ │ │ ├── api-key.service.ts │ │ │ │ └── dto/ │ │ │ │ ├── api-key-list.dto.ts │ │ │ │ ├── api-key-response.dto.ts │ │ │ │ └── create-api-key.dto.ts │ │ │ ├── app.module.ts │ │ │ ├── app.service.ts │ │ │ ├── assets/ │ │ │ │ ├── .gitkeep │ │ │ │ └── templates/ │ │ │ │ └── organization-invitation.template.ejs │ │ │ ├── audit/ │ │ │ │ ├── adapters/ │ │ │ │ │ ├── audit-opensearch.adapter.ts │ │ │ │ │ └── audit-typeorm.adapter.ts │ │ │ │ ├── audit.module.ts │ │ │ │ ├── constants/ │ │ │ │ │ ├── audit-log-events.constant.ts │ │ │ │ │ └── audit-tokens.ts │ │ │ │ ├── controllers/ │ │ │ │ │ └── audit.controller.ts │ │ │ │ ├── decorators/ │ │ │ │ │ └── audit.decorator.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── audit-log.dto.ts │ │ │ │ │ ├── create-audit-log-internal.dto.ts │ │ │ │ │ ├── create-audit-log.dto.ts │ │ │ │ │ ├── list-audit-logs-query.dto.ts │ │ │ │ │ ├── paginated-audit-logs.dto.ts │ │ │ │ │ └── update-audit-log-internal.dto.ts │ │ │ │ ├── entities/ │ │ │ │ │ └── audit-log.entity.ts │ │ │ │ ├── enums/ │ │ │ │ │ ├── audit-action.enum.ts │ │ │ │ │ └── audit-target.enum.ts │ │ │ │ ├── events/ │ │ │ │ │ ├── audit-log-created.event.ts │ │ │ │ │ └── audit-log-updated.event.ts │ │ │ │ ├── interceptors/ │ │ │ │ │ └── audit.interceptor.ts │ │ │ │ ├── interfaces/ │ │ │ │ │ ├── audit-filter.interface.ts │ │ │ │ │ ├── audit-publisher.interface.ts │ │ │ │ │ └── audit-storage.interface.ts │ │ │ │ ├── providers/ │ │ │ │ │ ├── audit-publisher.provider.ts │ │ │ │ │ └── audit-storage.provider.ts │ │ │ │ ├── publishers/ │ │ │ │ │ ├── audit-direct-publisher.ts │ │ │ │ │ └── kafka/ │ │ │ │ │ ├── audit-kafka-consumer.controller.ts │ │ │ │ │ └── audit-kafka-publisher.ts │ │ │ │ ├── services/ │ │ │ │ │ └── audit.service.ts │ │ │ │ └── subscribers/ │ │ │ │ └── audit-log.subscriber.ts │ │ │ ├── auth/ │ │ │ │ ├── api-key.strategy.ts │ │ │ │ ├── auth.module.ts │ │ │ │ ├── combined-auth.guard.ts │ │ │ │ ├── constants/ │ │ │ │ │ └── jwt-regex.constant.ts │ │ │ │ ├── failed-auth-tracker.service.ts │ │ │ │ ├── get-auth-context.ts │ │ │ │ ├── health-check.guard.ts │ │ │ │ ├── jwt.strategy.ts │ │ │ │ ├── or.guard.ts │ │ │ │ ├── otel-collector.guard.ts │ │ │ │ ├── runner-auth.guard.ts │ │ │ │ └── system-action.guard.ts │ │ │ ├── clickhouse/ │ │ │ │ ├── clickhouse.module.ts │ │ │ │ ├── clickhouse.service.ts │ │ │ │ └── index.ts │ │ │ ├── common/ │ │ │ │ ├── constants/ │ │ │ │ │ ├── constants.ts │ │ │ │ │ ├── error-messages.ts │ │ │ │ │ ├── feature-flags.ts │ │ │ │ │ └── header.constants.ts │ │ │ │ ├── decorators/ │ │ │ │ │ ├── auth-context.decorator.ts │ │ │ │ │ ├── autocommit-offset.decorator.ts │ │ │ │ │ ├── distributed-lock.decorator.ts │ │ │ │ │ ├── log-execution.decorator.ts │ │ │ │ │ ├── on-async-event.decorator.ts │ │ │ │ │ ├── otel.decorator.ts │ │ │ │ │ ├── page-limit.decorator.ts │ │ │ │ │ ├── page-number.decorator.ts │ │ │ │ │ ├── required-role.decorator.ts │ │ │ │ │ ├── runner-context.decorator.ts │ │ │ │ │ ├── throttler-scope.decorator.ts │ │ │ │ │ ├── to-array.decorator.ts │ │ │ │ │ └── track-job-execution.decorator.ts │ │ │ │ ├── dto/ │ │ │ │ │ └── url.dto.ts │ │ │ │ ├── guards/ │ │ │ │ │ ├── anonymous-rate-limit.guard.ts │ │ │ │ │ └── authenticated-rate-limit.guard.ts │ │ │ │ ├── interceptors/ │ │ │ │ │ └── content-type.interceptors.ts │ │ │ │ ├── interfaces/ │ │ │ │ │ ├── auth-context.interface.ts │ │ │ │ │ ├── health-check-context.interface.ts │ │ │ │ │ ├── otel-collector-context.interface.ts │ │ │ │ │ ├── otel-config.interface.ts │ │ │ │ │ ├── paginated-list.interface.ts │ │ │ │ │ ├── proxy-context.interface.ts │ │ │ │ │ ├── region-proxy.interface.ts │ │ │ │ │ ├── region-ssh-gateway.interface.ts │ │ │ │ │ ├── runner-context.interface.ts │ │ │ │ │ ├── ssh-gateway-context.interface.ts │ │ │ │ │ └── trackable-job-executions.ts │ │ │ │ ├── middleware/ │ │ │ │ │ ├── failed-auth-rate-limit.middleware.ts │ │ │ │ │ ├── maintenance.middleware.ts │ │ │ │ │ └── version-header.middleware.ts │ │ │ │ ├── modules/ │ │ │ │ │ └── body-parser-error.module.ts │ │ │ │ ├── providers/ │ │ │ │ │ └── openfeature-posthog.provider.ts │ │ │ │ ├── repositories/ │ │ │ │ │ └── base.repository.ts │ │ │ │ └── utils/ │ │ │ │ ├── api-key.ts │ │ │ │ ├── app-mode.ts │ │ │ │ ├── delete-s3-bucket.ts │ │ │ │ ├── docker-image.util.ts │ │ │ │ ├── email.util.ts │ │ │ │ ├── from-axios-error.ts │ │ │ │ ├── naming-strategy.util.ts │ │ │ │ ├── pino.util.ts │ │ │ │ ├── range-filter.ts │ │ │ │ ├── rate-limit-headers.util.ts │ │ │ │ └── uuid.ts │ │ │ ├── config/ │ │ │ │ ├── config.controller.ts │ │ │ │ ├── configuration.ts │ │ │ │ ├── dto/ │ │ │ │ │ └── configuration.dto.ts │ │ │ │ ├── typed-config.module.ts │ │ │ │ └── typed-config.service.ts │ │ │ ├── docker-registry/ │ │ │ │ ├── controllers/ │ │ │ │ │ └── docker-registry.controller.ts │ │ │ │ ├── decorators/ │ │ │ │ │ └── docker-registry.decorator.ts │ │ │ │ ├── docker-registry.module.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── create-docker-registry-internal.dto.ts │ │ │ │ │ ├── create-docker-registry.dto.ts │ │ │ │ │ ├── docker-registry.dto.ts │ │ │ │ │ └── update-docker-registry.dto.ts │ │ │ │ ├── entities/ │ │ │ │ │ └── docker-registry.entity.ts │ │ │ │ ├── enums/ │ │ │ │ │ └── registry-type.enum.ts │ │ │ │ ├── guards/ │ │ │ │ │ └── docker-registry-access.guard.ts │ │ │ │ ├── providers/ │ │ │ │ │ ├── docker-registry.provider.interface.ts │ │ │ │ │ ├── docker-registry.provider.ts │ │ │ │ │ └── mock-docker-registry.provider.ts │ │ │ │ └── services/ │ │ │ │ └── docker-registry.service.ts │ │ │ ├── email/ │ │ │ │ ├── constants.ts │ │ │ │ ├── email.module.ts │ │ │ │ └── services/ │ │ │ │ └── email.service.ts │ │ │ ├── encryption/ │ │ │ │ ├── encryption.module.ts │ │ │ │ └── encryption.service.ts │ │ │ ├── exceptions/ │ │ │ │ ├── bad-request.exception.ts │ │ │ │ ├── forbidden-operation.exception.ts │ │ │ │ ├── not-found.exception.ts │ │ │ │ └── sandbox-error.exception.ts │ │ │ ├── filters/ │ │ │ │ ├── all-exceptions.filter.ts │ │ │ │ └── kafka-exception.filter.ts │ │ │ ├── generate-openapi.ts │ │ │ ├── health/ │ │ │ │ ├── health.controller.ts │ │ │ │ ├── health.module.ts │ │ │ │ └── redis.health.ts │ │ │ ├── interceptors/ │ │ │ │ └── metrics.interceptor.ts │ │ │ ├── main.ts │ │ │ ├── migrations/ │ │ │ │ ├── 1741087887225-migration.ts │ │ │ │ ├── 1741088165704-migration.ts │ │ │ │ ├── 1741088883000-migration.ts │ │ │ │ ├── 1741088883001-migration.ts │ │ │ │ ├── 1741088883002-migration.ts │ │ │ │ ├── 1741877019888-migration.ts │ │ │ │ ├── 1742215525714-migration.ts │ │ │ │ ├── 1742475055353-migration.ts │ │ │ │ ├── 1742831092942-migration.ts │ │ │ │ ├── 1743593463168-migration.ts │ │ │ │ ├── 1743683015304-migration.ts │ │ │ │ ├── 1744028841133-migration.ts │ │ │ │ ├── 1744114341077-migration.ts │ │ │ │ ├── 1744378115901-migration.ts │ │ │ │ ├── 1744808444807-migration.ts │ │ │ │ ├── 1744868914148-migration.ts │ │ │ │ ├── 1744971114480-migration.ts │ │ │ │ ├── 1745393243334-migration.ts │ │ │ │ ├── 1745494761360-migration.ts │ │ │ │ ├── 1745574377029-migration.ts │ │ │ │ ├── 1745840296260-migration.ts │ │ │ │ ├── 1745864972652-migration.ts │ │ │ │ ├── 1746354231722-migration.ts │ │ │ │ ├── 1746604150910-migration.ts │ │ │ │ ├── 1747658203010-migration.ts │ │ │ │ ├── 1748006546552-migration.ts │ │ │ │ ├── 1748866194353-migration.ts │ │ │ │ ├── 1749474791343-migration.ts │ │ │ │ ├── 1749474791344-migration.ts │ │ │ │ ├── 1749474791345-migration.ts │ │ │ │ ├── 1750077343089-migration.ts │ │ │ │ ├── 1750436374899-migration.ts │ │ │ │ ├── 1750668569562-migration.ts │ │ │ │ ├── 1750751712412-migration.ts │ │ │ │ ├── 1751456907334-migration.ts │ │ │ │ ├── 1752494676200-migration.ts │ │ │ │ ├── 1752494676205-migration.ts │ │ │ │ ├── 1752848014862-migration.ts │ │ │ │ ├── 1753099115783-migration.ts │ │ │ │ ├── 1753100751730-migration.ts │ │ │ │ ├── 1753100751731-migration.ts │ │ │ │ ├── 1753185133351-migration.ts │ │ │ │ ├── 1753274135567-migration.ts │ │ │ │ ├── 1753430929609-migration.ts │ │ │ │ ├── 1753717830378-migration.ts │ │ │ │ ├── 1754042247109-migration.ts │ │ │ │ ├── 1755003696741-migration.ts │ │ │ │ ├── 1755356869493-migration.ts │ │ │ │ ├── 1755464957487-migration.ts │ │ │ │ ├── 1755521645207-migration.ts │ │ │ │ ├── 1755860619921-migration.ts │ │ │ │ ├── 1757513754037-migration.ts │ │ │ │ ├── 1757513754038-migration.ts │ │ │ │ ├── 1759241690773-migration.ts │ │ │ │ ├── 1759768058397-migration.ts │ │ │ │ ├── 1761912147638-migration.ts │ │ │ │ ├── 1761912147639-migration.ts │ │ │ │ ├── 1763561822000-migration.ts │ │ │ │ ├── 1764073472179-migration.ts │ │ │ │ ├── 1764073472180-migration.ts │ │ │ │ ├── 1764844895057-migration.ts │ │ │ │ ├── 1764844895058-migration.ts │ │ │ │ ├── 1765282546000-migration.ts │ │ │ │ ├── 1765366773736-migration.ts │ │ │ │ ├── 1765400000000-migration.ts │ │ │ │ ├── 1765806205881-migration.ts │ │ │ │ ├── 1766415256696-migration.ts │ │ │ │ ├── 1767830400000-migration.ts │ │ │ │ ├── 1768306129179-migration.ts │ │ │ │ ├── 1768461678804-migration.ts │ │ │ │ ├── 1768475454675-migration.ts │ │ │ │ ├── 1768485728153-migration.ts │ │ │ │ ├── 1768583941244-migration.ts │ │ │ │ ├── 1769516172576-migration.ts │ │ │ │ ├── 1769516172577-migration.ts │ │ │ │ ├── 1770043707083-migration.ts │ │ │ │ ├── 1770212429837-migration.ts │ │ │ │ ├── 1770823569571-migration.ts │ │ │ │ ├── 1770880371265-migration.ts │ │ │ │ ├── README.md │ │ │ │ ├── data-source.ts │ │ │ │ ├── post-deploy/ │ │ │ │ │ └── data-source.ts │ │ │ │ └── pre-deploy/ │ │ │ │ ├── 1770900000000-migration.ts │ │ │ │ ├── 1773744656413-migration.ts │ │ │ │ ├── 1773916204375-migration.ts │ │ │ │ └── data-source.ts │ │ │ ├── notification/ │ │ │ │ ├── emitters/ │ │ │ │ │ └── notification-redis.emitter.ts │ │ │ │ ├── gateways/ │ │ │ │ │ ├── notification-emitter.abstract.ts │ │ │ │ │ └── notification.gateway.ts │ │ │ │ ├── notification.module.ts │ │ │ │ └── services/ │ │ │ │ └── notification.service.ts │ │ │ ├── object-storage/ │ │ │ │ ├── controllers/ │ │ │ │ │ └── object-storage.controller.ts │ │ │ │ ├── object-storage.module.ts │ │ │ │ └── services/ │ │ │ │ └── object-storage.service.ts │ │ │ ├── openapi-webhooks.ts │ │ │ ├── openapi.config.ts │ │ │ ├── organization/ │ │ │ │ ├── constants/ │ │ │ │ │ ├── global-organization-roles.constant.ts │ │ │ │ │ ├── organization-events.constant.ts │ │ │ │ │ ├── sandbox-states-consuming-compute.constant.ts │ │ │ │ │ ├── sandbox-states-consuming-disk.constant.ts │ │ │ │ │ ├── snapshot-states-consuming-resources.constant.ts │ │ │ │ │ └── volume-states-consuming-resources.constant.ts │ │ │ │ ├── controllers/ │ │ │ │ │ ├── organization-invitation.controller.ts │ │ │ │ │ ├── organization-region.controller.ts │ │ │ │ │ ├── organization-role.controller.ts │ │ │ │ │ ├── organization-user.controller.ts │ │ │ │ │ └── organization.controller.ts │ │ │ │ ├── decorators/ │ │ │ │ │ ├── required-organization-member-role.decorator.ts │ │ │ │ │ └── required-organization-resource-permissions.decorator.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── create-organization-invitation.dto.ts │ │ │ │ │ ├── create-organization-quota.dto.ts │ │ │ │ │ ├── create-organization-role.dto.ts │ │ │ │ │ ├── create-organization.dto.ts │ │ │ │ │ ├── create-organization.internal.dto.ts │ │ │ │ │ ├── organization-invitation.dto.ts │ │ │ │ │ ├── organization-role.dto.ts │ │ │ │ │ ├── organization-sandbox-default-limited-network-egress.dto.ts │ │ │ │ │ ├── organization-suspension.dto.ts │ │ │ │ │ ├── organization-usage-overview.dto.ts │ │ │ │ │ ├── organization-user.dto.ts │ │ │ │ │ ├── organization.dto.ts │ │ │ │ │ ├── otel-config.dto.ts │ │ │ │ │ ├── region-quota.dto.ts │ │ │ │ │ ├── sandbox-usage-overview-internal.dto.ts │ │ │ │ │ ├── snapshot-usage-overview-internal.dto.ts │ │ │ │ │ ├── update-organization-default-region.dto.ts │ │ │ │ │ ├── update-organization-invitation.dto.ts │ │ │ │ │ ├── update-organization-member-access.dto.ts │ │ │ │ │ ├── update-organization-quota.dto.ts │ │ │ │ │ ├── update-organization-region-quota.dto.ts │ │ │ │ │ ├── update-organization-role.dto.ts │ │ │ │ │ └── volume-usage-overview-internal.dto.ts │ │ │ │ ├── entities/ │ │ │ │ │ ├── organization-invitation.entity.ts │ │ │ │ │ ├── organization-role.entity.ts │ │ │ │ │ ├── organization-user.entity.ts │ │ │ │ │ ├── organization.entity.ts │ │ │ │ │ └── region-quota.entity.ts │ │ │ │ ├── enums/ │ │ │ │ │ ├── organization-invitation-status.enum.ts │ │ │ │ │ ├── organization-member-role.enum.ts │ │ │ │ │ └── organization-resource-permission.enum.ts │ │ │ │ ├── events/ │ │ │ │ │ ├── organization-invitation-accepted.event.ts │ │ │ │ │ ├── organization-invitation-created.event.ts │ │ │ │ │ ├── organization-resource-permissions-unassigned.event.ts │ │ │ │ │ ├── organization-suspended-sandbox-stopped.event.ts │ │ │ │ │ └── organization-suspended-snapshot-deactivated.event.ts │ │ │ │ ├── exceptions/ │ │ │ │ │ └── DefaultRegionRequiredException.ts │ │ │ │ ├── guards/ │ │ │ │ │ ├── organization-access.guard.ts │ │ │ │ │ ├── organization-action.guard.ts │ │ │ │ │ └── organization-resource-action.guard.ts │ │ │ │ ├── helpers/ │ │ │ │ │ └── organization-usage.helper.ts │ │ │ │ ├── organization.module.ts │ │ │ │ └── services/ │ │ │ │ ├── organization-invitation.service.ts │ │ │ │ ├── organization-role.service.ts │ │ │ │ ├── organization-usage.service.ts │ │ │ │ ├── organization-user.service.ts │ │ │ │ └── organization.service.ts │ │ │ ├── region/ │ │ │ │ ├── constants/ │ │ │ │ │ ├── region-events.constant.ts │ │ │ │ │ └── region-name-regex.constant.ts │ │ │ │ ├── controllers/ │ │ │ │ │ └── region.controller.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── create-region-internal.dto.ts │ │ │ │ │ ├── create-region.dto.ts │ │ │ │ │ ├── create-region.internal.dto.ts │ │ │ │ │ ├── regenerate-api-key.dto.ts │ │ │ │ │ ├── region.dto.ts │ │ │ │ │ ├── snapshot-manager-credentials.dto.ts │ │ │ │ │ └── update-region.dto.ts │ │ │ │ ├── entities/ │ │ │ │ │ └── region.entity.ts │ │ │ │ ├── enums/ │ │ │ │ │ └── region-type.enum.ts │ │ │ │ ├── events/ │ │ │ │ │ ├── region-created.event.ts │ │ │ │ │ ├── region-deleted.event.ts │ │ │ │ │ └── region-snapshot-manager-creds.event.ts │ │ │ │ ├── guards/ │ │ │ │ │ └── region-access.guard.ts │ │ │ │ ├── region.module.ts │ │ │ │ └── services/ │ │ │ │ └── region.service.ts │ │ │ ├── sandbox/ │ │ │ │ ├── common/ │ │ │ │ │ ├── redis-lock.provider.ts │ │ │ │ │ └── runner-service-info.ts │ │ │ │ ├── constants/ │ │ │ │ │ ├── errors-for-recovery.ts │ │ │ │ │ ├── runner-events.ts │ │ │ │ │ ├── runner-name-regex.constant.ts │ │ │ │ │ ├── sandbox-events.constants.ts │ │ │ │ │ ├── sandbox.constants.ts │ │ │ │ │ ├── snapshot-events.ts │ │ │ │ │ ├── volume-events.ts │ │ │ │ │ └── warmpool-events.constants.ts │ │ │ │ ├── controllers/ │ │ │ │ │ ├── job.controller.ts │ │ │ │ │ ├── preview.controller.ts │ │ │ │ │ ├── runner.controller.ts │ │ │ │ │ ├── sandbox.controller.ts │ │ │ │ │ ├── snapshot.controller.ts │ │ │ │ │ ├── toolbox.deprecated.controller.ts │ │ │ │ │ ├── volume.controller.ts │ │ │ │ │ └── workspace.deprecated.controller.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── build-info.dto.ts │ │ │ │ │ ├── create-build-info.dto.ts │ │ │ │ │ ├── create-runner-internal.dto.ts │ │ │ │ │ ├── create-runner-response.dto.ts │ │ │ │ │ ├── create-runner.dto.ts │ │ │ │ │ ├── create-sandbox.dto.ts │ │ │ │ │ ├── create-snapshot.dto.ts │ │ │ │ │ ├── create-volume.dto.ts │ │ │ │ │ ├── create-workspace.deprecated.dto.ts │ │ │ │ │ ├── download-files.dto.ts │ │ │ │ │ ├── job-type-map.dto.ts │ │ │ │ │ ├── job.dto.ts │ │ │ │ │ ├── list-sandboxes-query.dto.ts │ │ │ │ │ ├── list-snapshots-query.dto.ts │ │ │ │ │ ├── lsp.dto.ts │ │ │ │ │ ├── paginated-sandboxes.dto.ts │ │ │ │ │ ├── paginated-snapshots.dto.ts │ │ │ │ │ ├── port-preview-url.dto.ts │ │ │ │ │ ├── registry-push-access-dto.ts │ │ │ │ │ ├── resize-sandbox.dto.ts │ │ │ │ │ ├── runner-full.dto.ts │ │ │ │ │ ├── runner-health.dto.ts │ │ │ │ │ ├── runner-snapshot.dto.ts │ │ │ │ │ ├── runner-status.dto.ts │ │ │ │ │ ├── runner.dto.ts │ │ │ │ │ ├── sandbox.dto.ts │ │ │ │ │ ├── snapshot.dto.ts │ │ │ │ │ ├── ssh-access.dto.ts │ │ │ │ │ ├── storage-access-dto.ts │ │ │ │ │ ├── toolbox-proxy-url.dto.ts │ │ │ │ │ ├── toolbox.deprecated.dto.ts │ │ │ │ │ ├── update-sandbox-network-settings.dto.ts │ │ │ │ │ ├── update-sandbox-state.dto.ts │ │ │ │ │ ├── update-snapshot.dto.ts │ │ │ │ │ ├── upload-file.dto.ts │ │ │ │ │ ├── volume.dto.ts │ │ │ │ │ ├── workspace-port-preview-url.deprecated.dto.ts │ │ │ │ │ └── workspace.deprecated.dto.ts │ │ │ │ ├── entities/ │ │ │ │ │ ├── build-info.entity.ts │ │ │ │ │ ├── job.entity.ts │ │ │ │ │ ├── runner.entity.ts │ │ │ │ │ ├── sandbox.entity.ts │ │ │ │ │ ├── snapshot-region.entity.ts │ │ │ │ │ ├── snapshot-runner.entity.ts │ │ │ │ │ ├── snapshot.entity.ts │ │ │ │ │ ├── ssh-access.entity.ts │ │ │ │ │ ├── volume.entity.ts │ │ │ │ │ └── warm-pool.entity.ts │ │ │ │ ├── enums/ │ │ │ │ │ ├── backup-state.enum.ts │ │ │ │ │ ├── job-status.enum.ts │ │ │ │ │ ├── job-type.enum.ts │ │ │ │ │ ├── resource-type.enum.ts │ │ │ │ │ ├── runner-state.enum.ts │ │ │ │ │ ├── sandbox-class.enum.ts │ │ │ │ │ ├── sandbox-desired-state.enum.ts │ │ │ │ │ ├── sandbox-state.enum.ts │ │ │ │ │ ├── snapshot-runner-state.enum.ts │ │ │ │ │ ├── snapshot-state.enum.ts │ │ │ │ │ └── volume-state.enum.ts │ │ │ │ ├── errors/ │ │ │ │ │ ├── runner-api-error.ts │ │ │ │ │ ├── runner-not-ready.error.ts │ │ │ │ │ └── snapshot-state-error.ts │ │ │ │ ├── events/ │ │ │ │ │ ├── runner-created.event.ts │ │ │ │ │ ├── runner-deleted.event.ts │ │ │ │ │ ├── runner-state-updated.event.ts │ │ │ │ │ ├── runner-unschedulable-updated.event.ts │ │ │ │ │ ├── sandbox-archived.event.ts │ │ │ │ │ ├── sandbox-backup-created.event.ts │ │ │ │ │ ├── sandbox-create.event.ts │ │ │ │ │ ├── sandbox-desired-state-updated.event.ts │ │ │ │ │ ├── sandbox-destroyed.event.ts │ │ │ │ │ ├── sandbox-organization-updated.event.ts │ │ │ │ │ ├── sandbox-public-status-updated.event.ts │ │ │ │ │ ├── sandbox-started.event.ts │ │ │ │ │ ├── sandbox-state-updated.event.ts │ │ │ │ │ ├── sandbox-stopped.event.ts │ │ │ │ │ ├── snapshot-activated.event.ts │ │ │ │ │ ├── snapshot-created.event.ts │ │ │ │ │ ├── snapshot-removed.event.ts │ │ │ │ │ ├── snapshot-state-updated.event.ts │ │ │ │ │ ├── volume-created.event.ts │ │ │ │ │ ├── volume-last-used-at-updated.event.ts │ │ │ │ │ ├── volume-state-updated.event.ts │ │ │ │ │ └── warmpool-topup-requested.event.ts │ │ │ │ ├── guards/ │ │ │ │ │ ├── job-access.guard.ts │ │ │ │ │ ├── proxy.guard.ts │ │ │ │ │ ├── region-runner-access.guard.ts │ │ │ │ │ ├── region-sandbox-access.guard.ts │ │ │ │ │ ├── runner-access.guard.ts │ │ │ │ │ ├── sandbox-access.guard.ts │ │ │ │ │ ├── snapshot-access.guard.ts │ │ │ │ │ ├── snapshot-read-access.guard.ts │ │ │ │ │ ├── ssh-gateway.guard.ts │ │ │ │ │ └── volume-access.guard.ts │ │ │ │ ├── managers/ │ │ │ │ │ ├── backup.manager.ts │ │ │ │ │ ├── sandbox-actions/ │ │ │ │ │ │ ├── sandbox-archive.action.ts │ │ │ │ │ │ ├── sandbox-destroy.action.ts │ │ │ │ │ │ ├── sandbox-start.action.ts │ │ │ │ │ │ ├── sandbox-stop.action.ts │ │ │ │ │ │ └── sandbox.action.ts │ │ │ │ │ ├── sandbox.manager.ts │ │ │ │ │ ├── snapshot.manager.ts │ │ │ │ │ └── volume.manager.ts │ │ │ │ ├── proxy/ │ │ │ │ │ └── log-proxy.ts │ │ │ │ ├── repositories/ │ │ │ │ │ └── sandbox.repository.ts │ │ │ │ ├── runner-adapter/ │ │ │ │ │ ├── runnerAdapter.ts │ │ │ │ │ ├── runnerAdapter.v0.ts │ │ │ │ │ └── runnerAdapter.v2.ts │ │ │ │ ├── sandbox.module.ts │ │ │ │ ├── services/ │ │ │ │ │ ├── job-state-handler.service.ts │ │ │ │ │ ├── job.service.ts │ │ │ │ │ ├── proxy-cache-invalidation.service.ts │ │ │ │ │ ├── runner.service.ts │ │ │ │ │ ├── sandbox-lookup-cache-invalidation.service.ts │ │ │ │ │ ├── sandbox-warm-pool.service.ts │ │ │ │ │ ├── sandbox.service.ts │ │ │ │ │ ├── snapshot.service.ts │ │ │ │ │ ├── toolbox.deprecated.service.ts │ │ │ │ │ └── volume.service.ts │ │ │ │ ├── subscribers/ │ │ │ │ │ ├── runner.subscriber.ts │ │ │ │ │ ├── snapshot.subscriber.ts │ │ │ │ │ └── volume.subscriber.ts │ │ │ │ └── utils/ │ │ │ │ ├── lock-key.util.ts │ │ │ │ ├── network-validation.util.ts │ │ │ │ ├── runner-lookup-cache.util.ts │ │ │ │ ├── sandbox-lookup-cache.util.ts │ │ │ │ ├── sanitize-error.util.ts │ │ │ │ └── volume-mount-path-validation.util.ts │ │ │ ├── sandbox-telemetry/ │ │ │ │ ├── controllers/ │ │ │ │ │ └── sandbox-telemetry.controller.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── log-entry.dto.ts │ │ │ │ │ ├── metrics-response.dto.ts │ │ │ │ │ ├── paginated-logs.dto.ts │ │ │ │ │ ├── paginated-traces.dto.ts │ │ │ │ │ ├── telemetry-query-params.dto.ts │ │ │ │ │ ├── trace-span.dto.ts │ │ │ │ │ └── trace-summary.dto.ts │ │ │ │ ├── guards/ │ │ │ │ │ └── analytics-api-disabled.guard.ts │ │ │ │ ├── index.ts │ │ │ │ ├── sandbox-telemetry.module.ts │ │ │ │ └── services/ │ │ │ │ └── sandbox-telemetry.service.ts │ │ │ ├── tracing.ts │ │ │ ├── usage/ │ │ │ │ ├── entities/ │ │ │ │ │ ├── sandbox-usage-period-archive.entity.ts │ │ │ │ │ └── sandbox-usage-period.entity.ts │ │ │ │ ├── services/ │ │ │ │ │ └── usage.service.ts │ │ │ │ └── usage.module.ts │ │ │ ├── user/ │ │ │ │ ├── constants/ │ │ │ │ │ ├── acount-provider-display-name.constant.ts │ │ │ │ │ └── user-events.constant.ts │ │ │ │ ├── dto/ │ │ │ │ │ ├── account-provider.dto.ts │ │ │ │ │ ├── create-linked-account.dto.ts │ │ │ │ │ ├── create-user.dto.ts │ │ │ │ │ ├── update-user.dto.ts │ │ │ │ │ ├── user-public-key.dto.ts │ │ │ │ │ └── user.dto.ts │ │ │ │ ├── enums/ │ │ │ │ │ ├── account-provider.enum.ts │ │ │ │ │ └── system-role.enum.ts │ │ │ │ ├── events/ │ │ │ │ │ ├── user-created.event.ts │ │ │ │ │ ├── user-deleted.event.ts │ │ │ │ │ └── user-email-verified.event.ts │ │ │ │ ├── user.controller.ts │ │ │ │ ├── user.entity.ts │ │ │ │ ├── user.module.ts │ │ │ │ └── user.service.ts │ │ │ └── webhook/ │ │ │ ├── README.md │ │ │ ├── constants/ │ │ │ │ └── webhook-events.constants.ts │ │ │ ├── controllers/ │ │ │ │ └── webhook.controller.ts │ │ │ ├── dto/ │ │ │ │ ├── send-webhook.dto.ts │ │ │ │ ├── webhook-app-portal-access.dto.ts │ │ │ │ ├── webhook-event-payloads.dto.ts │ │ │ │ └── webhook-initialization-status.dto.ts │ │ │ ├── entities/ │ │ │ │ └── webhook-initialization.entity.ts │ │ │ ├── index.ts │ │ │ ├── services/ │ │ │ │ ├── webhook-event-handler.service.ts │ │ │ │ └── webhook.service.ts │ │ │ └── webhook.module.ts │ │ ├── tsconfig.app.json │ │ ├── tsconfig.json │ │ ├── tsconfig.spec.json │ │ └── webpack.config.js │ ├── cli/ │ │ ├── apiclient/ │ │ │ ├── api_client.go │ │ │ └── error_handler.go │ │ ├── auth/ │ │ │ ├── auth.go │ │ │ └── auth_success.html │ │ ├── cmd/ │ │ │ ├── auth/ │ │ │ │ ├── login.go │ │ │ │ └── logout.go │ │ │ ├── autocomplete.go │ │ │ ├── common/ │ │ │ │ ├── aliases.go │ │ │ │ ├── build.go │ │ │ │ ├── format.go │ │ │ │ ├── logs.go │ │ │ │ ├── organization.go │ │ │ │ ├── sandbox.go │ │ │ │ ├── ssh.go │ │ │ │ ├── ssh_unix.go │ │ │ │ ├── ssh_windows.go │ │ │ │ ├── state.go │ │ │ │ └── validate.go │ │ │ ├── docs.go │ │ │ ├── generatedocs.go │ │ │ ├── mcp/ │ │ │ │ ├── agents/ │ │ │ │ │ ├── claude.go │ │ │ │ │ ├── common.go │ │ │ │ │ ├── cursor.go │ │ │ │ │ └── windsurf.go │ │ │ │ ├── config.go │ │ │ │ ├── init.go │ │ │ │ ├── mcp.go │ │ │ │ └── start.go │ │ │ ├── organization/ │ │ │ │ ├── create.go │ │ │ │ ├── delete.go │ │ │ │ ├── list.go │ │ │ │ ├── organization.go │ │ │ │ └── use.go │ │ │ ├── sandbox/ │ │ │ │ ├── archive.go │ │ │ │ ├── create.go │ │ │ │ ├── delete.go │ │ │ │ ├── exec.go │ │ │ │ ├── info.go │ │ │ │ ├── list.go │ │ │ │ ├── preview_url.go │ │ │ │ ├── sandbox.go │ │ │ │ ├── ssh.go │ │ │ │ ├── start.go │ │ │ │ └── stop.go │ │ │ ├── snapshot/ │ │ │ │ ├── create.go │ │ │ │ ├── delete.go │ │ │ │ ├── list.go │ │ │ │ ├── push.go │ │ │ │ └── snapshot.go │ │ │ ├── version.go │ │ │ └── volume/ │ │ │ ├── create.go │ │ │ ├── delete.go │ │ │ ├── get.go │ │ │ ├── list.go │ │ │ └── volume.go │ │ ├── config/ │ │ │ └── config.go │ │ ├── docker/ │ │ │ └── build.go │ │ ├── docs/ │ │ │ ├── daytona.md │ │ │ ├── daytona_archive.md │ │ │ ├── daytona_autocomplete.md │ │ │ ├── daytona_create.md │ │ │ ├── daytona_delete.md │ │ │ ├── daytona_docs.md │ │ │ ├── daytona_exec.md │ │ │ ├── daytona_info.md │ │ │ ├── daytona_list.md │ │ │ ├── daytona_login.md │ │ │ ├── daytona_logout.md │ │ │ ├── daytona_mcp.md │ │ │ ├── daytona_mcp_config.md │ │ │ ├── daytona_mcp_init.md │ │ │ ├── daytona_mcp_start.md │ │ │ ├── daytona_organization.md │ │ │ ├── daytona_organization_create.md │ │ │ ├── daytona_organization_delete.md │ │ │ ├── daytona_organization_list.md │ │ │ ├── daytona_organization_use.md │ │ │ ├── daytona_preview-url.md │ │ │ ├── daytona_snapshot.md │ │ │ ├── daytona_snapshot_create.md │ │ │ ├── daytona_snapshot_delete.md │ │ │ ├── daytona_snapshot_list.md │ │ │ ├── daytona_snapshot_push.md │ │ │ ├── daytona_ssh.md │ │ │ ├── daytona_start.md │ │ │ ├── daytona_stop.md │ │ │ ├── daytona_version.md │ │ │ ├── daytona_volume.md │ │ │ ├── daytona_volume_create.md │ │ │ ├── daytona_volume_delete.md │ │ │ ├── daytona_volume_get.md │ │ │ └── daytona_volume_list.md │ │ ├── go.mod │ │ ├── go.sum │ │ ├── hack/ │ │ │ ├── build.sh │ │ │ ├── docs/ │ │ │ │ ├── daytona.yaml │ │ │ │ ├── daytona_archive.yaml │ │ │ │ ├── daytona_autocomplete.yaml │ │ │ │ ├── daytona_create.yaml │ │ │ │ ├── daytona_delete.yaml │ │ │ │ ├── daytona_docs.yaml │ │ │ │ ├── daytona_exec.yaml │ │ │ │ ├── daytona_info.yaml │ │ │ │ ├── daytona_list.yaml │ │ │ │ ├── daytona_login.yaml │ │ │ │ ├── daytona_logout.yaml │ │ │ │ ├── daytona_mcp.yaml │ │ │ │ ├── daytona_mcp_config.yaml │ │ │ │ ├── daytona_mcp_init.yaml │ │ │ │ ├── daytona_mcp_start.yaml │ │ │ │ ├── daytona_organization.yaml │ │ │ │ ├── daytona_organization_create.yaml │ │ │ │ ├── daytona_organization_delete.yaml │ │ │ │ ├── daytona_organization_list.yaml │ │ │ │ ├── daytona_organization_use.yaml │ │ │ │ ├── daytona_preview-url.yaml │ │ │ │ ├── daytona_snapshot.yaml │ │ │ │ ├── daytona_snapshot_create.yaml │ │ │ │ ├── daytona_snapshot_delete.yaml │ │ │ │ ├── daytona_snapshot_list.yaml │ │ │ │ ├── daytona_snapshot_push.yaml │ │ │ │ ├── daytona_ssh.yaml │ │ │ │ ├── daytona_start.yaml │ │ │ │ ├── daytona_stop.yaml │ │ │ │ ├── daytona_version.yaml │ │ │ │ ├── daytona_volume.yaml │ │ │ │ ├── daytona_volume_create.yaml │ │ │ │ ├── daytona_volume_delete.yaml │ │ │ │ ├── daytona_volume_get.yaml │ │ │ │ └── daytona_volume_list.yaml │ │ │ └── generate-cli-docs.sh │ │ ├── internal/ │ │ │ ├── buildinfo.go │ │ │ └── cmd.go │ │ ├── main.go │ │ ├── mcp/ │ │ │ ├── README.md │ │ │ ├── server.go │ │ │ └── tools/ │ │ │ ├── common.go │ │ │ ├── create_folder.go │ │ │ ├── create_sandbox.go │ │ │ ├── delete_file.go │ │ │ ├── destroy_sandbox.go │ │ │ ├── download_file.go │ │ │ ├── execute_command.go │ │ │ ├── file_info.go │ │ │ ├── git_clone.go │ │ │ ├── list_files.go │ │ │ ├── move_file.go │ │ │ ├── preview_link.go │ │ │ └── upload_file.go │ │ ├── pkg/ │ │ │ └── minio/ │ │ │ └── minio.go │ │ ├── project.json │ │ ├── toolbox/ │ │ │ └── toolbox.go │ │ ├── util/ │ │ │ └── pointer.go │ │ └── views/ │ │ ├── common/ │ │ │ ├── common.go │ │ │ ├── prompt.go │ │ │ ├── select.go │ │ │ └── styles.go │ │ ├── organization/ │ │ │ ├── info.go │ │ │ ├── list.go │ │ │ └── select.go │ │ ├── sandbox/ │ │ │ ├── info.go │ │ │ └── list.go │ │ ├── snapshot/ │ │ │ ├── info.go │ │ │ └── list.go │ │ ├── util/ │ │ │ ├── empty_list.go │ │ │ ├── info.go │ │ │ ├── spinner.go │ │ │ ├── table.go │ │ │ └── time.go │ │ └── volume/ │ │ ├── info.go │ │ └── list.go │ ├── daemon/ │ │ ├── .gitignore │ │ ├── cmd/ │ │ │ └── daemon/ │ │ │ ├── config/ │ │ │ │ └── config.go │ │ │ └── main.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── internal/ │ │ │ ├── buildinfo.go │ │ │ └── util/ │ │ │ ├── entrypoint_logs.go │ │ │ ├── entrypoint_session.go │ │ │ ├── log_reader.go │ │ │ ├── pointer.go │ │ │ ├── sandbox.go │ │ │ ├── shell_quote.go │ │ │ ├── version.go │ │ │ ├── websocket.go │ │ │ └── ws_keepalive.go │ │ ├── pkg/ │ │ │ ├── common/ │ │ │ │ ├── errors.go │ │ │ │ ├── get_shell.go │ │ │ │ └── spawn_tty.go │ │ │ ├── git/ │ │ │ │ ├── add.go │ │ │ │ ├── branch.go │ │ │ │ ├── checkout.go │ │ │ │ ├── clone.go │ │ │ │ ├── commit.go │ │ │ │ ├── config.go │ │ │ │ ├── log.go │ │ │ │ ├── pull.go │ │ │ │ ├── push.go │ │ │ │ ├── service.go │ │ │ │ ├── service_test.go │ │ │ │ ├── status.go │ │ │ │ └── types.go │ │ │ ├── gitprovider/ │ │ │ │ └── types.go │ │ │ ├── recording/ │ │ │ │ ├── delete.go │ │ │ │ ├── get.go │ │ │ │ ├── list.go │ │ │ │ ├── service.go │ │ │ │ ├── start.go │ │ │ │ ├── stop.go │ │ │ │ └── types.go │ │ │ ├── recordingdashboard/ │ │ │ │ ├── assets.go │ │ │ │ ├── server.go │ │ │ │ └── static/ │ │ │ │ └── index.html │ │ │ ├── session/ │ │ │ │ ├── command.go │ │ │ │ ├── common.go │ │ │ │ ├── create.go │ │ │ │ ├── delete.go │ │ │ │ ├── execute.go │ │ │ │ ├── get.go │ │ │ │ ├── input.go │ │ │ │ ├── list.go │ │ │ │ ├── log.go │ │ │ │ ├── service.go │ │ │ │ └── types.go │ │ │ ├── ssh/ │ │ │ │ ├── config/ │ │ │ │ │ └── config.go │ │ │ │ ├── server.go │ │ │ │ └── unix_forward.go │ │ │ ├── terminal/ │ │ │ │ ├── assets.go │ │ │ │ ├── decoder.go │ │ │ │ ├── server.go │ │ │ │ └── static/ │ │ │ │ └── index.html │ │ │ └── toolbox/ │ │ │ ├── computeruse/ │ │ │ │ ├── disabled_middleware.go │ │ │ │ ├── handler.go │ │ │ │ ├── interface.go │ │ │ │ ├── lazy.go │ │ │ │ ├── manager/ │ │ │ │ │ └── manager.go │ │ │ │ ├── recording/ │ │ │ │ │ ├── controller.go │ │ │ │ │ ├── download.go │ │ │ │ │ ├── recording.go │ │ │ │ │ ├── start.go │ │ │ │ │ ├── stop.go │ │ │ │ │ └── types.go │ │ │ │ ├── rpc_client.go │ │ │ │ └── rpc_server.go │ │ │ ├── config/ │ │ │ │ └── config.go │ │ │ ├── controller.go │ │ │ ├── docs/ │ │ │ │ ├── docs.go │ │ │ │ ├── swagger.json │ │ │ │ └── swagger.yaml │ │ │ ├── fs/ │ │ │ │ ├── create_folder.go │ │ │ │ ├── delete_file.go │ │ │ │ ├── download_file.go │ │ │ │ ├── download_files.go │ │ │ │ ├── find_in_files.go │ │ │ │ ├── get_file_info.go │ │ │ │ ├── list_files.go │ │ │ │ ├── move_file.go │ │ │ │ ├── replace_in_files.go │ │ │ │ ├── search_files.go │ │ │ │ ├── set_file_permissions.go │ │ │ │ ├── types.go │ │ │ │ ├── upload_file.go │ │ │ │ └── upload_files.go │ │ │ ├── git/ │ │ │ │ ├── add.go │ │ │ │ ├── checkout.go │ │ │ │ ├── clone_repository.go │ │ │ │ ├── commit.go │ │ │ │ ├── create_branch.go │ │ │ │ ├── delete_branch.go │ │ │ │ ├── history.go │ │ │ │ ├── list_branches.go │ │ │ │ ├── pull.go │ │ │ │ ├── push.go │ │ │ │ ├── status.go │ │ │ │ └── types.go │ │ │ ├── lsp/ │ │ │ │ ├── client.go │ │ │ │ ├── lsp.go │ │ │ │ ├── python_lsp.go │ │ │ │ ├── server.go │ │ │ │ ├── service.go │ │ │ │ ├── types.go │ │ │ │ └── typescript_lsp.go │ │ │ ├── middlewares/ │ │ │ │ └── error.go │ │ │ ├── port/ │ │ │ │ ├── detector.go │ │ │ │ └── types.go │ │ │ ├── process/ │ │ │ │ ├── execute.go │ │ │ │ ├── interpreter/ │ │ │ │ │ ├── controller.go │ │ │ │ │ ├── manager.go │ │ │ │ │ ├── repl_client.go │ │ │ │ │ ├── repl_worker.py │ │ │ │ │ ├── types.go │ │ │ │ │ └── websocket.go │ │ │ │ ├── pty/ │ │ │ │ │ ├── controller.go │ │ │ │ │ ├── manager.go │ │ │ │ │ ├── session.go │ │ │ │ │ ├── types.go │ │ │ │ │ ├── websocket.go │ │ │ │ │ └── ws_client.go │ │ │ │ ├── session/ │ │ │ │ │ ├── controller.go │ │ │ │ │ ├── execute.go │ │ │ │ │ ├── input.go │ │ │ │ │ ├── log.go │ │ │ │ │ ├── session.go │ │ │ │ │ └── types.go │ │ │ │ └── types.go │ │ │ ├── proxy/ │ │ │ │ └── proxy.go │ │ │ ├── server.go │ │ │ ├── telemetry.go │ │ │ ├── types.go │ │ │ └── validator.go │ │ ├── project.json │ │ └── tools/ │ │ └── xterm.go │ ├── dashboard/ │ │ ├── .prettierignore │ │ ├── .storybook/ │ │ │ ├── main.ts │ │ │ ├── preview.tsx │ │ │ └── tsconfig.json │ │ ├── eslint.config.mjs │ │ ├── index.html │ │ ├── postcss.config.js │ │ ├── project.json │ │ ├── public/ │ │ │ └── mockServiceWorker.js │ │ ├── src/ │ │ │ ├── App.css │ │ │ ├── App.tsx │ │ │ ├── api/ │ │ │ │ ├── apiClient.ts │ │ │ │ └── errors.ts │ │ │ ├── assets/ │ │ │ │ └── Logo.tsx │ │ │ ├── billing-api/ │ │ │ │ ├── billingApiClient.ts │ │ │ │ ├── index.ts │ │ │ │ └── types/ │ │ │ │ ├── Invoice.ts │ │ │ │ ├── OrganizationEmail.ts │ │ │ │ ├── OrganizationTier.ts │ │ │ │ ├── OrganizationUsage.ts │ │ │ │ ├── OrganizationWallet.ts │ │ │ │ ├── index.ts │ │ │ │ └── tier.ts │ │ │ ├── components/ │ │ │ │ ├── AccountProviderIcon.tsx │ │ │ │ ├── AnnouncementBanner.tsx │ │ │ │ ├── ApiKeyTable.tsx │ │ │ │ ├── AuditLogTable.tsx │ │ │ │ ├── Banner.tsx │ │ │ │ ├── CodeBlock.tsx │ │ │ │ ├── CommandPalette.tsx │ │ │ │ ├── ComparisonTable.tsx │ │ │ │ ├── CopyButton.tsx │ │ │ │ ├── CreateApiKeyDialog.tsx │ │ │ │ ├── CreateRegionDialog.tsx │ │ │ │ ├── CreateRunnerDialog.tsx │ │ │ │ ├── DebouncedInput.tsx │ │ │ │ ├── EllipsisWithTooltip.tsx │ │ │ │ ├── ErrorBoundaryFallback.tsx │ │ │ │ ├── Invoices/ │ │ │ │ │ ├── InvoicesTableActions.tsx │ │ │ │ │ ├── InvoicesTableHeader.tsx │ │ │ │ │ ├── columns.tsx │ │ │ │ │ ├── index.tsx │ │ │ │ │ ├── types.ts │ │ │ │ │ └── useInvoicesTable.ts │ │ │ │ ├── LimitUsageChart.tsx │ │ │ │ ├── LiveIndicator.tsx │ │ │ │ ├── LoadingFallback.tsx │ │ │ │ ├── OrganizationMembers/ │ │ │ │ │ ├── CancelOrganizationInvitationDialog.tsx │ │ │ │ │ ├── CreateOrganizationInvitationDialog.tsx │ │ │ │ │ ├── OrganizationInvitationTable.tsx │ │ │ │ │ ├── OrganizationMemberTable.tsx │ │ │ │ │ ├── RemoveOrganizationMemberDialog.tsx │ │ │ │ │ ├── UpdateOrganizationInvitationDialog.tsx │ │ │ │ │ ├── UpdateOrganizationMemberAccessDialog.tsx │ │ │ │ │ └── ViewerOrganizationRoleCheckbox.tsx │ │ │ │ ├── OrganizationRoles/ │ │ │ │ │ ├── CreateOrganizationRoleDialog.tsx │ │ │ │ │ ├── DeleteOrganizationRoleDialog.tsx │ │ │ │ │ ├── OrganizationRoleTable.tsx │ │ │ │ │ └── UpdateOrganizationRoleDialog.tsx │ │ │ │ ├── Organizations/ │ │ │ │ │ ├── CreateOrganizationDialog.tsx │ │ │ │ │ ├── DeleteOrganizationDialog.tsx │ │ │ │ │ ├── LeaveOrganizationDialog.tsx │ │ │ │ │ ├── OrganizationPicker.tsx │ │ │ │ │ └── SetDefaultRegionDialog.tsx │ │ │ │ ├── PageLayout.tsx │ │ │ │ ├── Pagination.tsx │ │ │ │ ├── Playground/ │ │ │ │ │ ├── ActionForm.tsx │ │ │ │ │ ├── ActionRunButton.tsx │ │ │ │ │ ├── Inputs/ │ │ │ │ │ │ ├── CheckboxInput.tsx │ │ │ │ │ │ ├── InlineInputFormControl.tsx │ │ │ │ │ │ ├── Label.tsx │ │ │ │ │ │ ├── NumberInput.tsx │ │ │ │ │ │ ├── SelectInput.tsx │ │ │ │ │ │ ├── StackedInputFormControl.tsx │ │ │ │ │ │ └── TextInput.tsx │ │ │ │ │ ├── PlaygroundLayout.tsx │ │ │ │ │ ├── ResponseCard.tsx │ │ │ │ │ ├── Sandbox/ │ │ │ │ │ │ ├── CodeSnippets/ │ │ │ │ │ │ │ ├── index.ts │ │ │ │ │ │ │ ├── python.ts │ │ │ │ │ │ │ ├── types.ts │ │ │ │ │ │ │ ├── typescript.ts │ │ │ │ │ │ │ └── utils.ts │ │ │ │ │ │ ├── CodeSnippetsResponse.tsx │ │ │ │ │ │ └── Parameters/ │ │ │ │ │ │ ├── FileSystem.tsx │ │ │ │ │ │ ├── GitOperations.tsx │ │ │ │ │ │ ├── Management.tsx │ │ │ │ │ │ ├── ProcessCodeExecution.tsx │ │ │ │ │ │ └── index.tsx │ │ │ │ │ ├── Terminal/ │ │ │ │ │ │ ├── Description.tsx │ │ │ │ │ │ └── WebTerminal.tsx │ │ │ │ │ ├── VNC/ │ │ │ │ │ │ ├── DesktopWindowResponse.tsx │ │ │ │ │ │ └── Interaction/ │ │ │ │ │ │ ├── Display.tsx │ │ │ │ │ │ ├── Keyboard.tsx │ │ │ │ │ │ ├── Mouse.tsx │ │ │ │ │ │ ├── Screenshot.tsx │ │ │ │ │ │ └── index.tsx │ │ │ │ │ └── Window.tsx │ │ │ │ ├── PostHogProviderWrapper.tsx │ │ │ │ ├── PrivacyBanner.tsx │ │ │ │ ├── PrivacyPreferencesDialog.tsx │ │ │ │ ├── QuotaLine.tsx │ │ │ │ ├── RegionDetailsSheet.tsx │ │ │ │ ├── RegionTable.tsx │ │ │ │ ├── RegistryTable.tsx │ │ │ │ ├── ResourceChip.tsx │ │ │ │ ├── RunnerDetailsSheet.tsx │ │ │ │ ├── RunnerTable.tsx │ │ │ │ ├── Sandbox/ │ │ │ │ │ └── CreateSandboxSheet.tsx │ │ │ │ ├── SandboxDetailsSheet.tsx │ │ │ │ ├── SandboxTable/ │ │ │ │ │ ├── BulkActionAlertDialog.tsx │ │ │ │ │ ├── SandboxState.tsx │ │ │ │ │ ├── SandboxTableActions.tsx │ │ │ │ │ ├── SandboxTableHeader.tsx │ │ │ │ │ ├── columns.tsx │ │ │ │ │ ├── constants.ts │ │ │ │ │ ├── filters/ │ │ │ │ │ │ ├── LabelFilter.tsx │ │ │ │ │ │ ├── LastEventFilter.tsx │ │ │ │ │ │ ├── RegionFilter.tsx │ │ │ │ │ │ ├── ResourceFilter.tsx │ │ │ │ │ │ ├── SnapshotFilter.tsx │ │ │ │ │ │ └── StateFilter.tsx │ │ │ │ │ ├── index.tsx │ │ │ │ │ ├── state-icons.tsx │ │ │ │ │ ├── types.ts │ │ │ │ │ ├── useSandboxCommands.tsx │ │ │ │ │ └── useSandboxTable.ts │ │ │ │ ├── SelectionToast.tsx │ │ │ │ ├── Sidebar.tsx │ │ │ │ ├── SortIcon.tsx │ │ │ │ ├── TableColumnVisibilityToggle.tsx │ │ │ │ ├── TableEmptyState.tsx │ │ │ │ ├── TierComparisonTable.tsx │ │ │ │ ├── TierUpgradeCard.tsx │ │ │ │ ├── TimestampTooltip.tsx │ │ │ │ ├── Tooltip.tsx │ │ │ │ ├── TooltipButton.tsx │ │ │ │ ├── UpdateRegionDialog.tsx │ │ │ │ ├── UsageOverview.tsx │ │ │ │ ├── UsageOverviewIndicator.tsx │ │ │ │ ├── UserOrganizationInvitations/ │ │ │ │ │ ├── DeclineOrganizationInvitationDialog.tsx │ │ │ │ │ ├── OrganizationInvitationActionDialog.tsx │ │ │ │ │ └── UserOrganizationInvitationTable.tsx │ │ │ │ ├── VerifyEmailDialog.tsx │ │ │ │ ├── VolumeTable.tsx │ │ │ │ ├── Webhooks/ │ │ │ │ │ ├── CreateEndpointDialog.tsx │ │ │ │ │ ├── DeliveryStatsLine.tsx │ │ │ │ │ ├── EditEndpointDialog.tsx │ │ │ │ │ ├── EndpointEventsTable/ │ │ │ │ │ │ ├── EndpointEventsTable.tsx │ │ │ │ │ │ ├── EventDetailsSheet.tsx │ │ │ │ │ │ ├── columns.tsx │ │ │ │ │ │ └── index.ts │ │ │ │ │ ├── MessageAttemptsTable.tsx │ │ │ │ │ ├── WebhooksEndpointTable/ │ │ │ │ │ │ ├── WebhooksEndpointTable.tsx │ │ │ │ │ │ ├── columns.tsx │ │ │ │ │ │ └── index.ts │ │ │ │ │ └── WebhooksMessagesTable/ │ │ │ │ │ ├── MessageDetailsSheet.tsx │ │ │ │ │ ├── WebhooksMessagesTable.tsx │ │ │ │ │ └── columns.tsx │ │ │ │ ├── sandboxes/ │ │ │ │ │ ├── CreateSshAccessDialog.tsx │ │ │ │ │ ├── RevokeSshAccessDialog.tsx │ │ │ │ │ ├── SandboxContentTabs.tsx │ │ │ │ │ ├── SandboxDetails.tsx │ │ │ │ │ ├── SandboxHeader.tsx │ │ │ │ │ ├── SandboxInfoPanel.tsx │ │ │ │ │ ├── SandboxLogsTab.tsx │ │ │ │ │ ├── SandboxMetricsTab.tsx │ │ │ │ │ ├── SandboxSpendingTab.tsx │ │ │ │ │ ├── SandboxTerminalTab.tsx │ │ │ │ │ ├── SandboxTracesTab.tsx │ │ │ │ │ ├── SandboxVncTab.tsx │ │ │ │ │ ├── SearchParams.ts │ │ │ │ │ └── index.ts │ │ │ │ ├── snapshots/ │ │ │ │ │ ├── CreateSnapshotDialog.tsx │ │ │ │ │ └── SnapshotTable/ │ │ │ │ │ ├── BulkActionAlertDialog.tsx │ │ │ │ │ ├── SnapshotTable.tsx │ │ │ │ │ ├── columns.tsx │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── useSnapshotsCommands.tsx │ │ │ │ │ └── utils.ts │ │ │ │ ├── spending/ │ │ │ │ │ ├── AggregatedUsageChart.tsx │ │ │ │ │ ├── CostBreakdown.tsx │ │ │ │ │ ├── ResourceUsageChart.tsx │ │ │ │ │ ├── SandboxSpendingTab.tsx │ │ │ │ │ ├── SandboxUsageTable.tsx │ │ │ │ │ ├── UsageTimelineChart.tsx │ │ │ │ │ └── index.ts │ │ │ │ ├── telemetry/ │ │ │ │ │ ├── LogsTab.tsx │ │ │ │ │ ├── MetricsTab.tsx │ │ │ │ │ ├── SeverityBadge.tsx │ │ │ │ │ ├── TimeRangeSelector.tsx │ │ │ │ │ ├── TraceDetailsSheet.tsx │ │ │ │ │ ├── TracesTab.tsx │ │ │ │ │ └── index.ts │ │ │ │ └── ui/ │ │ │ │ ├── accordion.tsx │ │ │ │ ├── alert-dialog.tsx │ │ │ │ ├── alert.tsx │ │ │ │ ├── badge.tsx │ │ │ │ ├── button-group.tsx │ │ │ │ ├── button.tsx │ │ │ │ ├── calendar.tsx │ │ │ │ ├── card.tsx │ │ │ │ ├── chart.tsx │ │ │ │ ├── checkbox.tsx │ │ │ │ ├── command.tsx │ │ │ │ ├── data-table-faceted-filter.tsx │ │ │ │ ├── date-picker.tsx │ │ │ │ ├── date-range-picker.tsx │ │ │ │ ├── dialog.tsx │ │ │ │ ├── drawer.tsx │ │ │ │ ├── dropdown-menu.tsx │ │ │ │ ├── empty.tsx │ │ │ │ ├── facet-filter.tsx │ │ │ │ ├── field.tsx │ │ │ │ ├── input-group.tsx │ │ │ │ ├── input.tsx │ │ │ │ ├── kbd.tsx │ │ │ │ ├── label.tsx │ │ │ │ ├── popover.tsx │ │ │ │ ├── radio-group.tsx │ │ │ │ ├── scroll-area.tsx │ │ │ │ ├── select.tsx │ │ │ │ ├── separator.tsx │ │ │ │ ├── sheet.tsx │ │ │ │ ├── sidebar.tsx │ │ │ │ ├── skeleton.tsx │ │ │ │ ├── slider.tsx │ │ │ │ ├── sonner.tsx │ │ │ │ ├── spinner.tsx │ │ │ │ ├── stories/ │ │ │ │ │ ├── accordion.stories.tsx │ │ │ │ │ ├── alert-dialog.stories.tsx │ │ │ │ │ ├── alert.stories.tsx │ │ │ │ │ ├── badge.stories.tsx │ │ │ │ │ ├── button.stories.tsx │ │ │ │ │ ├── calendar.stories.tsx │ │ │ │ │ ├── card.stories.tsx │ │ │ │ │ ├── chart.stories.tsx │ │ │ │ │ ├── checkbox.stories.tsx │ │ │ │ │ ├── colors.stories.tsx │ │ │ │ │ ├── command.stories.tsx │ │ │ │ │ ├── date-picker.stories.tsx │ │ │ │ │ ├── dialog.stories.tsx │ │ │ │ │ ├── drawer.stories.tsx │ │ │ │ │ ├── dropdown-menu.stories.tsx │ │ │ │ │ ├── facet-filter.stories.tsx │ │ │ │ │ ├── field.stories.tsx │ │ │ │ │ ├── input-group.stories.tsx │ │ │ │ │ ├── input.stories.tsx │ │ │ │ │ ├── kbd.stories.tsx │ │ │ │ │ ├── label.stories.tsx │ │ │ │ │ ├── popover.stories.tsx │ │ │ │ │ ├── radio-group.stories.tsx │ │ │ │ │ ├── scroll-area.stories.tsx │ │ │ │ │ ├── select.stories.tsx │ │ │ │ │ ├── separator.stories.tsx │ │ │ │ │ ├── sheet.stories.tsx │ │ │ │ │ ├── sidebar.stories.tsx │ │ │ │ │ ├── skeleton.stories.tsx │ │ │ │ │ ├── slider.stories.tsx │ │ │ │ │ ├── sonner.stories.tsx │ │ │ │ │ ├── spinner.stories.tsx │ │ │ │ │ ├── switch.stories.tsx │ │ │ │ │ ├── table.stories.tsx │ │ │ │ │ ├── tabs.stories.tsx │ │ │ │ │ ├── textarea.stories.tsx │ │ │ │ │ ├── toggle-group.stories.tsx │ │ │ │ │ ├── toggle.stories.tsx │ │ │ │ │ └── tooltip.stories.tsx │ │ │ │ ├── switch.tsx │ │ │ │ ├── table.tsx │ │ │ │ ├── tabs.tsx │ │ │ │ ├── textarea.tsx │ │ │ │ ├── toggle-group.tsx │ │ │ │ ├── toggle.tsx │ │ │ │ └── tooltip.tsx │ │ │ ├── constants/ │ │ │ │ ├── CreateApiKeyPermissionsGroups.ts │ │ │ │ ├── ExternalLinks.ts │ │ │ │ ├── OrganizationPermissionsGroups.ts │ │ │ │ ├── Pagination.ts │ │ │ │ ├── Playground.ts │ │ │ │ ├── limits.ts │ │ │ │ ├── metrics.ts │ │ │ │ └── webhook-events.ts │ │ │ ├── contexts/ │ │ │ │ ├── ApiContext.tsx │ │ │ │ ├── ConfigContext.tsx │ │ │ │ ├── NotificationSocketContext.tsx │ │ │ │ ├── OrganizationsContext.tsx │ │ │ │ ├── PlaygroundContext.tsx │ │ │ │ ├── RegionsContext.tsx │ │ │ │ ├── SandboxSessionContext.tsx │ │ │ │ ├── SelectedOrganizationContext.tsx │ │ │ │ ├── ThemeContext.tsx │ │ │ │ └── UserOrganizationInvitationsContext.tsx │ │ │ ├── enums/ │ │ │ │ ├── FeatureFlags.ts │ │ │ │ ├── LocalStorageKey.ts │ │ │ │ ├── Playground.ts │ │ │ │ └── RoutePath.ts │ │ │ ├── hooks/ │ │ │ │ ├── mutations/ │ │ │ │ │ ├── useArchiveSandboxMutation.ts │ │ │ │ │ ├── useCreateApiKeyMutation.ts │ │ │ │ │ ├── useCreateInvoicePaymentUrlMutation.ts │ │ │ │ │ ├── useCreateSandboxMutation.tsx │ │ │ │ │ ├── useCreateSnapshotMutation.tsx │ │ │ │ │ ├── useCreateSshAccessMutation.ts │ │ │ │ │ ├── useDeleteOrganizationMutation.ts │ │ │ │ │ ├── useDeleteSandboxMutation.ts │ │ │ │ │ ├── useDeleteWebhookEndpointMutation.ts │ │ │ │ │ ├── useDowngradeTierMutation.ts │ │ │ │ │ ├── useEnrollInSmsMfaMutation.ts │ │ │ │ │ ├── useLeaveOrganizationMutation.ts │ │ │ │ │ ├── useRecoverSandboxMutation.ts │ │ │ │ │ ├── useRedeemCouponMutation.ts │ │ │ │ │ ├── useReplayWebhookEventMutation.tsx │ │ │ │ │ ├── useRevokeApiKeyMutation.ts │ │ │ │ │ ├── useRevokeSshAccessMutation.ts │ │ │ │ │ ├── useRotateWebhookSecretMutation.ts │ │ │ │ │ ├── useSetAutomaticTopUpMutation.ts │ │ │ │ │ ├── useSetOrganizationDefaultRegionMutation.ts │ │ │ │ │ ├── useStartSandboxMutation.ts │ │ │ │ │ ├── useStartVncMutation.ts │ │ │ │ │ ├── useStopSandboxMutation.ts │ │ │ │ │ ├── useTopUpWalletMutation.ts │ │ │ │ │ ├── useUnlinkAccountMutation.ts │ │ │ │ │ ├── useUpdateWebhookEndpointMutation.ts │ │ │ │ │ ├── useUpgradeTierMutation.ts │ │ │ │ │ └── useVoidInvoiceMutation.ts │ │ │ │ ├── queries/ │ │ │ │ │ ├── billingQueries.ts │ │ │ │ │ ├── queryKeys.ts │ │ │ │ │ ├── useAccountProvidersQuery.ts │ │ │ │ │ ├── useAnalyticsUsage.ts │ │ │ │ │ ├── useApiKeysQuery.ts │ │ │ │ │ ├── useOrganizationBillingPortalUrlQuery.ts │ │ │ │ │ ├── useOrganizationCheckoutUrlQuery.ts │ │ │ │ │ ├── useOrganizationInvoicesQuery.ts │ │ │ │ │ ├── useOrganizationTierQuery.ts │ │ │ │ │ ├── useOrganizationUsageOverviewQuery.ts │ │ │ │ │ ├── useOrganizationUsageQuery.ts │ │ │ │ │ ├── useOrganizationWalletQuery.ts │ │ │ │ │ ├── usePastOrganizationUsageQuery.ts │ │ │ │ │ ├── useSandboxQuery.ts │ │ │ │ │ ├── useSnapshotsQuery.ts │ │ │ │ │ ├── useTerminalSessionQuery.ts │ │ │ │ │ ├── useTiersQuery.ts │ │ │ │ │ ├── useVncSessionQuery.ts │ │ │ │ │ ├── useVncStatusQuery.ts │ │ │ │ │ ├── useWebhookAppPortalAccessQuery.ts │ │ │ │ │ └── useWebhookInitializationStatusQuery.ts │ │ │ │ ├── use-mobile.tsx │ │ │ │ ├── useApi.tsx │ │ │ │ ├── useConfig.ts │ │ │ │ ├── useCopyToClipboard.tsx │ │ │ │ ├── useDeepCompareMemo.ts │ │ │ │ ├── useDocsSearchCommands.tsx │ │ │ │ ├── useMatchMedia.ts │ │ │ │ ├── useNotificationSocket.ts │ │ │ │ ├── useOrganizationRoles.ts │ │ │ │ ├── useOrganizations.ts │ │ │ │ ├── usePlayground.ts │ │ │ │ ├── usePlaygroundSandbox.tsx │ │ │ │ ├── useQueryCountdown.ts │ │ │ │ ├── useRegions.ts │ │ │ │ ├── useSandboxLogs.ts │ │ │ │ ├── useSandboxMetrics.ts │ │ │ │ ├── useSandboxSession.ts │ │ │ │ ├── useSandboxSessionContext.ts │ │ │ │ ├── useSandboxTraceSpans.ts │ │ │ │ ├── useSandboxTraces.ts │ │ │ │ ├── useSandboxWsSync.ts │ │ │ │ ├── useSandboxes.ts │ │ │ │ ├── useSelectedOrganization.ts │ │ │ │ ├── useSuspensionBanner.tsx │ │ │ │ ├── useUserOrganizationInvitations.ts │ │ │ │ └── useWebhooks.ts │ │ │ ├── index.css │ │ │ ├── lib/ │ │ │ │ ├── bulk-action-toast.tsx │ │ │ │ ├── env.ts │ │ │ │ ├── error-handling.ts │ │ │ │ ├── local-storage.ts │ │ │ │ ├── playground.tsx │ │ │ │ ├── schema.ts │ │ │ │ ├── suspended-fetch.ts │ │ │ │ └── utils/ │ │ │ │ ├── index.ts │ │ │ │ └── sandbox.ts │ │ │ ├── main.tsx │ │ │ ├── mocks/ │ │ │ │ ├── browser.ts │ │ │ │ └── handlers.ts │ │ │ ├── pages/ │ │ │ │ ├── AccountSettings.tsx │ │ │ │ ├── AuditLogs.tsx │ │ │ │ ├── Callback.tsx │ │ │ │ ├── Dashboard.tsx │ │ │ │ ├── EmailVerify.tsx │ │ │ │ ├── Experimental.tsx │ │ │ │ ├── Keys.tsx │ │ │ │ ├── LandingPage.tsx │ │ │ │ ├── Limits.tsx │ │ │ │ ├── LinkedAccounts.tsx │ │ │ │ ├── Logout.tsx │ │ │ │ ├── NotFound.tsx │ │ │ │ ├── Onboarding.tsx │ │ │ │ ├── OrganizationMembers.tsx │ │ │ │ ├── OrganizationRoles.tsx │ │ │ │ ├── OrganizationSettings.tsx │ │ │ │ ├── Playground.tsx │ │ │ │ ├── Regions.tsx │ │ │ │ ├── Registries.tsx │ │ │ │ ├── Runners.tsx │ │ │ │ ├── Sandboxes.tsx │ │ │ │ ├── Snapshots.tsx │ │ │ │ ├── Spending.tsx │ │ │ │ ├── UserOrganizationInvitations.tsx │ │ │ │ ├── Volumes.tsx │ │ │ │ ├── Wallet.tsx │ │ │ │ ├── WebhookEndpointDetails.tsx │ │ │ │ └── Webhooks.tsx │ │ │ ├── providers/ │ │ │ │ ├── ApiProvider.tsx │ │ │ │ ├── ConfigProvider.tsx │ │ │ │ ├── NotificationSocketProvider.tsx │ │ │ │ ├── OrganizationsProvider.tsx │ │ │ │ ├── PlaygroundProvider.tsx │ │ │ │ ├── PlaygroundSandboxProvider.tsx │ │ │ │ ├── QueryProvider.tsx │ │ │ │ ├── RegionsProvider.tsx │ │ │ │ ├── SandboxSessionProvider.tsx │ │ │ │ ├── SelectedOrganizationProvider.tsx │ │ │ │ ├── SvixProvider.tsx │ │ │ │ └── UserOrganizationInvitationsProvider.tsx │ │ │ ├── services/ │ │ │ │ └── webhookService.ts │ │ │ ├── types/ │ │ │ │ ├── CreateApiKeyPermissionGroup.ts │ │ │ │ ├── DashboardConfig.ts │ │ │ │ ├── OrganizationRolePermissionGroup.ts │ │ │ │ ├── sandbox.ts │ │ │ │ └── window.d.ts │ │ │ ├── vendor/ │ │ │ │ └── pylon/ │ │ │ │ ├── addPylonWidget.ts │ │ │ │ ├── index.ts │ │ │ │ ├── usePylon.ts │ │ │ │ └── usePylonCommands.tsx │ │ │ └── vite-env.d.ts │ │ ├── tailwind.config.js │ │ ├── tsconfig.app.json │ │ ├── tsconfig.json │ │ └── vite.config.mts │ ├── docs/ │ │ ├── .dockerignore │ │ ├── .gitignore │ │ ├── .markdownlint-cli2.jsonc │ │ ├── .nvmrc │ │ ├── .prettierrc │ │ ├── CODE_OF_CONDUCT.md │ │ ├── CONTRIBUTING.md │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── SECURITY.md │ │ ├── astro.config.mjs │ │ ├── gt.config.json │ │ ├── project.json │ │ ├── server/ │ │ │ ├── index.mjs │ │ │ └── util/ │ │ │ ├── environment.mjs │ │ │ └── redirects.mjs │ │ ├── src/ │ │ │ ├── assets/ │ │ │ │ ├── docs/ │ │ │ │ │ ├── README.md │ │ │ │ │ └── sandbox-states.drawio.xml │ │ │ │ └── themes/ │ │ │ │ ├── daytona-code-dark.json │ │ │ │ └── daytona-code-light.json │ │ │ ├── components/ │ │ │ │ ├── ApiBackButton.astro │ │ │ │ ├── ApiReference.astro │ │ │ │ ├── ArchitectureDiagram.astro │ │ │ │ ├── Aside.astro │ │ │ │ ├── ContentPanel.astro │ │ │ │ ├── EditLink.astro │ │ │ │ ├── ExploreMore.astro │ │ │ │ ├── Footer.astro │ │ │ │ ├── GuidesList.astro │ │ │ │ ├── Head.astro │ │ │ │ ├── Header.astro │ │ │ │ ├── Hero.astro │ │ │ │ ├── Image.astro │ │ │ │ ├── Keyboard.astro │ │ │ │ ├── Label.astro │ │ │ │ ├── MarkdownContent.astro │ │ │ │ ├── MobileMenuToggle.astro │ │ │ │ ├── OpenPageDropdown/ │ │ │ │ │ ├── OpenPageDropdown.module.scss │ │ │ │ │ └── OpenPageDropdown.tsx │ │ │ │ ├── PageFrame.astro │ │ │ │ ├── PageSidebar.astro │ │ │ │ ├── PageTitle.astro │ │ │ │ ├── Pagination.astro │ │ │ │ ├── PostHog.astro │ │ │ │ ├── SandboxDiagram.astro │ │ │ │ ├── Search.jsx │ │ │ │ ├── Sidebar.astro │ │ │ │ ├── SidebarSublist.astro │ │ │ │ ├── TableOfContent/ │ │ │ │ │ ├── constants.ts │ │ │ │ │ └── starlight-toc.ts │ │ │ │ ├── TableOfContents.astro │ │ │ │ ├── TableOfContentsList.astro │ │ │ │ ├── ThemeProvider.astro │ │ │ │ ├── ThemeSelect.astro │ │ │ │ ├── TwoColumnContent.astro │ │ │ │ ├── Version.astro │ │ │ │ ├── buttons/ │ │ │ │ │ ├── Button.module.scss │ │ │ │ │ ├── CopyButton.tsx │ │ │ │ │ └── LinkButton.tsx │ │ │ │ ├── cards/ │ │ │ │ │ ├── Card.astro │ │ │ │ │ ├── CardGrid.astro │ │ │ │ │ ├── ImageCard.astro │ │ │ │ │ ├── LinkCard.astro │ │ │ │ │ └── TitleCard.astro │ │ │ │ ├── menu/ │ │ │ │ │ ├── LocaleSelector.module.scss │ │ │ │ │ ├── LocaleSelector.tsx │ │ │ │ │ └── SideNavLinks.tsx │ │ │ │ └── table/ │ │ │ │ ├── Table.astro │ │ │ │ └── TableRow.astro │ │ │ ├── content/ │ │ │ │ ├── config.ts │ │ │ │ ├── docs/ │ │ │ │ │ ├── en/ │ │ │ │ │ │ ├── 404.md │ │ │ │ │ │ ├── api-keys.mdx │ │ │ │ │ │ ├── architecture.mdx │ │ │ │ │ │ ├── audit-logs.mdx │ │ │ │ │ │ ├── billing.mdx │ │ │ │ │ │ ├── computer-use.mdx │ │ │ │ │ │ ├── configuration.mdx │ │ │ │ │ │ ├── custom-preview-proxy.mdx │ │ │ │ │ │ ├── declarative-builder.mdx │ │ │ │ │ │ ├── experimental/ │ │ │ │ │ │ │ └── otel-collection.mdx │ │ │ │ │ │ ├── file-system-operations.mdx │ │ │ │ │ │ ├── getting-started.mdx │ │ │ │ │ │ ├── git-operations.mdx │ │ │ │ │ │ ├── go-sdk/ │ │ │ │ │ │ │ ├── daytona.mdx │ │ │ │ │ │ │ ├── errors.mdx │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ ├── options.mdx │ │ │ │ │ │ │ └── types.mdx │ │ │ │ │ │ ├── guides/ │ │ │ │ │ │ │ ├── agentkit/ │ │ │ │ │ │ │ │ └── inngest-agentkit-coding-agent.mdx │ │ │ │ │ │ │ ├── claude/ │ │ │ │ │ │ │ │ ├── claude-agent-sdk-connect-service-sandbox.mdx │ │ │ │ │ │ │ │ ├── claude-agent-sdk-interactive-terminal-sandbox.mdx │ │ │ │ │ │ │ │ ├── claude-code-run-cli-sandbox.mdx │ │ │ │ │ │ │ │ ├── claude-code-run-tasks-stream-logs-sandbox.mdx │ │ │ │ │ │ │ │ └── index.mdx │ │ │ │ │ │ │ ├── codex/ │ │ │ │ │ │ │ │ └── codex-sdk-interactive-terminal-sandbox.mdx │ │ │ │ │ │ │ ├── data-analysis-with-ai.mdx │ │ │ │ │ │ │ ├── google-adk-code-generator.mdx │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ ├── langchain/ │ │ │ │ │ │ │ │ └── langchain-data-analysis.mdx │ │ │ │ │ │ │ ├── letta-code/ │ │ │ │ │ │ │ │ └── letta-code-agent.mdx │ │ │ │ │ │ │ ├── mastra/ │ │ │ │ │ │ │ │ └── mastra-coding-agent.mdx │ │ │ │ │ │ │ ├── openclaw/ │ │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ │ ├── openclaw-run-secure-sandbox.mdx │ │ │ │ │ │ │ │ └── openclaw-sdk-sandbox.mdx │ │ │ │ │ │ │ ├── opencode/ │ │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ │ ├── opencode-plugin.mdx │ │ │ │ │ │ │ │ ├── opencode-sdk-agent.mdx │ │ │ │ │ │ │ │ └── opencode-web-agent.mdx │ │ │ │ │ │ │ ├── reinforcement-learning/ │ │ │ │ │ │ │ │ └── trl-grpo-training.mdx │ │ │ │ │ │ │ └── rlm/ │ │ │ │ │ │ │ ├── dspy-rlms.mdx │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ └── recursive-language-models.mdx │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ ├── language-server-protocol.mdx │ │ │ │ │ │ ├── limits.mdx │ │ │ │ │ │ ├── linked-accounts.mdx │ │ │ │ │ │ ├── log-streaming.mdx │ │ │ │ │ │ ├── mcp.mdx │ │ │ │ │ │ ├── network-limits.mdx │ │ │ │ │ │ ├── organizations.mdx │ │ │ │ │ │ ├── oss-deployment.mdx │ │ │ │ │ │ ├── playground.mdx │ │ │ │ │ │ ├── preview.mdx │ │ │ │ │ │ ├── process-code-execution.mdx │ │ │ │ │ │ ├── pty.mdx │ │ │ │ │ │ ├── python-sdk/ │ │ │ │ │ │ │ ├── async/ │ │ │ │ │ │ │ │ ├── async-code-interpreter.mdx │ │ │ │ │ │ │ │ ├── async-computer-use.mdx │ │ │ │ │ │ │ │ ├── async-daytona.mdx │ │ │ │ │ │ │ │ ├── async-file-system.mdx │ │ │ │ │ │ │ │ ├── async-git.mdx │ │ │ │ │ │ │ │ ├── async-lsp-server.mdx │ │ │ │ │ │ │ │ ├── async-object-storage.mdx │ │ │ │ │ │ │ │ ├── async-process.mdx │ │ │ │ │ │ │ │ ├── async-sandbox.mdx │ │ │ │ │ │ │ │ ├── async-snapshot.mdx │ │ │ │ │ │ │ │ └── async-volume.mdx │ │ │ │ │ │ │ ├── common/ │ │ │ │ │ │ │ │ ├── charts.mdx │ │ │ │ │ │ │ │ ├── errors.mdx │ │ │ │ │ │ │ │ └── image.mdx │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ └── sync/ │ │ │ │ │ │ │ ├── code-interpreter.mdx │ │ │ │ │ │ │ ├── computer-use.mdx │ │ │ │ │ │ │ ├── daytona.mdx │ │ │ │ │ │ │ ├── file-system.mdx │ │ │ │ │ │ │ ├── git.mdx │ │ │ │ │ │ │ ├── lsp-server.mdx │ │ │ │ │ │ │ ├── object-storage.mdx │ │ │ │ │ │ │ ├── process.mdx │ │ │ │ │ │ │ ├── sandbox.mdx │ │ │ │ │ │ │ ├── snapshot.mdx │ │ │ │ │ │ │ └── volume.mdx │ │ │ │ │ │ ├── regions.mdx │ │ │ │ │ │ ├── ruby-sdk/ │ │ │ │ │ │ │ ├── computer-use.mdx │ │ │ │ │ │ │ ├── config.mdx │ │ │ │ │ │ │ ├── daytona.mdx │ │ │ │ │ │ │ ├── file-system.mdx │ │ │ │ │ │ │ ├── git.mdx │ │ │ │ │ │ │ ├── image.mdx │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ ├── lsp-server.mdx │ │ │ │ │ │ │ ├── object-storage.mdx │ │ │ │ │ │ │ ├── process.mdx │ │ │ │ │ │ │ ├── sandbox.mdx │ │ │ │ │ │ │ ├── snapshot.mdx │ │ │ │ │ │ │ ├── volume-service.mdx │ │ │ │ │ │ │ └── volume.mdx │ │ │ │ │ │ ├── runners.mdx │ │ │ │ │ │ ├── sandboxes.mdx │ │ │ │ │ │ ├── security-exhibit.mdx │ │ │ │ │ │ ├── snapshots.mdx │ │ │ │ │ │ ├── ssh-access.mdx │ │ │ │ │ │ ├── tools/ │ │ │ │ │ │ │ ├── api.mdx │ │ │ │ │ │ │ └── cli.mdx │ │ │ │ │ │ ├── typescript-sdk/ │ │ │ │ │ │ │ ├── charts.mdx │ │ │ │ │ │ │ ├── code-interpreter.mdx │ │ │ │ │ │ │ ├── computer-use.mdx │ │ │ │ │ │ │ ├── daytona.mdx │ │ │ │ │ │ │ ├── errors.mdx │ │ │ │ │ │ │ ├── execute-response.mdx │ │ │ │ │ │ │ ├── file-system.mdx │ │ │ │ │ │ │ ├── git.mdx │ │ │ │ │ │ │ ├── image.mdx │ │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ │ ├── lsp-server.mdx │ │ │ │ │ │ │ ├── object-storage.mdx │ │ │ │ │ │ │ ├── process.mdx │ │ │ │ │ │ │ ├── pty-handle.mdx │ │ │ │ │ │ │ ├── pty.mdx │ │ │ │ │ │ │ ├── sandbox.mdx │ │ │ │ │ │ │ ├── snapshot.mdx │ │ │ │ │ │ │ └── volume.mdx │ │ │ │ │ │ ├── vnc-access.mdx │ │ │ │ │ │ ├── volumes.mdx │ │ │ │ │ │ ├── vpn-connections.mdx │ │ │ │ │ │ ├── web-terminal.mdx │ │ │ │ │ │ └── webhooks.mdx │ │ │ │ │ └── ja/ │ │ │ │ │ ├── 404.md │ │ │ │ │ ├── api-keys.mdx │ │ │ │ │ ├── audit-logs.mdx │ │ │ │ │ ├── billing.mdx │ │ │ │ │ ├── configuration.mdx │ │ │ │ │ ├── custom-domain-authentication.mdx │ │ │ │ │ ├── data-analysis-with-ai.mdx │ │ │ │ │ ├── declarative-builder.mdx │ │ │ │ │ ├── file-system-operations.mdx │ │ │ │ │ ├── getting-started.mdx │ │ │ │ │ ├── git-operations.mdx │ │ │ │ │ ├── index.mdx │ │ │ │ │ ├── language-server-protocol.mdx │ │ │ │ │ ├── limits.mdx │ │ │ │ │ ├── linked-accounts.mdx │ │ │ │ │ ├── log-streaming.mdx │ │ │ │ │ ├── mcp.mdx │ │ │ │ │ ├── organizations.mdx │ │ │ │ │ ├── preview-and-authentication.mdx │ │ │ │ │ ├── process-code-execution.mdx │ │ │ │ │ ├── python-sdk/ │ │ │ │ │ │ ├── async/ │ │ │ │ │ │ │ ├── async-computer-use.mdx │ │ │ │ │ │ │ ├── async-daytona.mdx │ │ │ │ │ │ │ ├── async-file-system.mdx │ │ │ │ │ │ │ ├── async-git.mdx │ │ │ │ │ │ │ ├── async-lsp-server.mdx │ │ │ │ │ │ │ ├── async-object-storage.mdx │ │ │ │ │ │ │ ├── async-sandbox.mdx │ │ │ │ │ │ │ ├── async-snapshot.mdx │ │ │ │ │ │ │ └── async-volume.mdx │ │ │ │ │ │ ├── common/ │ │ │ │ │ │ │ ├── charts.mdx │ │ │ │ │ │ │ ├── errors.mdx │ │ │ │ │ │ │ └── image.mdx │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ └── sync/ │ │ │ │ │ │ ├── computer-use.mdx │ │ │ │ │ │ ├── daytona.mdx │ │ │ │ │ │ ├── file-system.mdx │ │ │ │ │ │ ├── git.mdx │ │ │ │ │ │ ├── lsp-server.mdx │ │ │ │ │ │ ├── object-storage.mdx │ │ │ │ │ │ ├── process.mdx │ │ │ │ │ │ ├── sandbox.mdx │ │ │ │ │ │ ├── snapshot.mdx │ │ │ │ │ │ └── volume.mdx │ │ │ │ │ ├── regions.mdx │ │ │ │ │ ├── sandbox-management.mdx │ │ │ │ │ ├── snapshots.mdx │ │ │ │ │ ├── tools/ │ │ │ │ │ │ ├── api.mdx │ │ │ │ │ │ └── cli.mdx │ │ │ │ │ ├── typescript-sdk/ │ │ │ │ │ │ ├── charts.mdx │ │ │ │ │ │ ├── computer-use.mdx │ │ │ │ │ │ ├── daytona.mdx │ │ │ │ │ │ ├── errors.mdx │ │ │ │ │ │ ├── execute-response.mdx │ │ │ │ │ │ ├── file-system.mdx │ │ │ │ │ │ ├── git.mdx │ │ │ │ │ │ ├── image.mdx │ │ │ │ │ │ ├── index.mdx │ │ │ │ │ │ ├── lsp-server.mdx │ │ │ │ │ │ ├── object-storage.mdx │ │ │ │ │ │ ├── process.mdx │ │ │ │ │ │ ├── sandbox.mdx │ │ │ │ │ │ ├── snapshot.mdx │ │ │ │ │ │ └── volume.mdx │ │ │ │ │ ├── volumes.mdx │ │ │ │ │ ├── web-terminal.mdx │ │ │ │ │ └── webhooks.mdx │ │ │ │ └── i18n/ │ │ │ │ ├── en.json │ │ │ │ └── ja.json │ │ │ ├── data/ │ │ │ │ └── i18n/ │ │ │ │ └── ja.json │ │ │ ├── env.d.ts │ │ │ ├── fonts/ │ │ │ │ ├── BerkeleyMono-Regular.otf │ │ │ │ └── font-face.css │ │ │ ├── i18n/ │ │ │ │ ├── generateI18nConfig.ts │ │ │ │ ├── generateI18nSchema.ts │ │ │ │ ├── loadTranslations.ts │ │ │ │ ├── routing.ts │ │ │ │ └── utils.ts │ │ │ ├── middleware.ts │ │ │ ├── pages/ │ │ │ │ ├── [...slug].astro │ │ │ │ └── index.astro │ │ │ ├── styles/ │ │ │ │ ├── _color.scss │ │ │ │ ├── _typography.scss │ │ │ │ ├── _variables.scss │ │ │ │ ├── components/ │ │ │ │ │ ├── burger-menu.scss │ │ │ │ │ ├── cards.scss │ │ │ │ │ ├── docs-footer.scss │ │ │ │ │ ├── markdown.scss │ │ │ │ │ ├── navbar.scss │ │ │ │ │ ├── page-frame.scss │ │ │ │ │ └── search.scss │ │ │ │ ├── init.scss │ │ │ │ ├── mixins.scss │ │ │ │ └── style.scss │ │ │ └── utils/ │ │ │ ├── md.js │ │ │ ├── navigation.ts │ │ │ └── redirects.ts │ │ ├── tailwind.config.js │ │ ├── tools/ │ │ │ ├── update-api-reference.js │ │ │ ├── update-cli-reference.js │ │ │ ├── update-llms.js │ │ │ └── update-search.js │ │ ├── tsconfig.json │ │ ├── vite-env.d.ts │ │ └── vite.config.js │ ├── otel-collector/ │ │ ├── Dockerfile │ │ ├── builder-config.dev.yaml │ │ ├── builder-config.yaml │ │ ├── config.dev.yaml │ │ ├── config.yaml │ │ ├── exporter/ │ │ │ ├── config.go │ │ │ ├── exporter.go │ │ │ ├── factory.go │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ └── internal/ │ │ │ └── config/ │ │ │ └── resolver.go │ │ └── project.json │ ├── proxy/ │ │ ├── Dockerfile │ │ ├── cmd/ │ │ │ └── proxy/ │ │ │ ├── config/ │ │ │ │ └── config.go │ │ │ └── main.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── internal/ │ │ │ └── buildinfo.go │ │ ├── pkg/ │ │ │ └── proxy/ │ │ │ ├── auth.go │ │ │ ├── auth_callback.go │ │ │ ├── get_sandbox_build_target.go │ │ │ ├── get_sandbox_target.go │ │ │ ├── get_snapshot_target.go │ │ │ ├── proxy.go │ │ │ ├── retry.go │ │ │ └── warning_page.go │ │ └── project.json │ ├── runner/ │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── cmd/ │ │ │ └── runner/ │ │ │ ├── config/ │ │ │ │ └── config.go │ │ │ └── main.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── internal/ │ │ │ ├── buildinfo.go │ │ │ ├── constants/ │ │ │ │ └── auth.go │ │ │ ├── metrics/ │ │ │ │ └── collector.go │ │ │ └── util/ │ │ │ └── error_extract.go │ │ ├── packaging/ │ │ │ ├── deb/ │ │ │ │ └── DEBIAN/ │ │ │ │ ├── control │ │ │ │ ├── postinst │ │ │ │ ├── postrm │ │ │ │ └── prerm │ │ │ └── systemd/ │ │ │ └── daytona-runner.service │ │ ├── pkg/ │ │ │ ├── api/ │ │ │ │ ├── controllers/ │ │ │ │ │ ├── command_logs.go │ │ │ │ │ ├── health.go │ │ │ │ │ ├── info.go │ │ │ │ │ ├── proxy.go │ │ │ │ │ ├── sandbox.go │ │ │ │ │ └── snapshot.go │ │ │ │ ├── docs/ │ │ │ │ │ ├── docs.go │ │ │ │ │ ├── swagger.json │ │ │ │ │ └── swagger.yaml │ │ │ │ ├── dto/ │ │ │ │ │ ├── backup.go │ │ │ │ │ ├── image.go │ │ │ │ │ ├── info.go │ │ │ │ │ ├── registry.go │ │ │ │ │ ├── sandbox.go │ │ │ │ │ ├── snapshot.go │ │ │ │ │ └── volume.go │ │ │ │ ├── middlewares/ │ │ │ │ │ ├── auth.go │ │ │ │ │ └── recoverable_errors.go │ │ │ │ ├── server.go │ │ │ │ └── validator.go │ │ │ ├── apiclient/ │ │ │ │ └── api_client.go │ │ │ ├── cache/ │ │ │ │ ├── backup_info_cache.go │ │ │ │ └── snapshot_error_cache.go │ │ │ ├── common/ │ │ │ │ ├── container.go │ │ │ │ ├── daemon.go │ │ │ │ ├── errors.go │ │ │ │ ├── metrics.go │ │ │ │ ├── recovery.go │ │ │ │ ├── rsync.go │ │ │ │ └── storage.go │ │ │ ├── daemon/ │ │ │ │ ├── assets.go │ │ │ │ ├── static/ │ │ │ │ │ └── .gitkeep │ │ │ │ └── util.go │ │ │ ├── docker/ │ │ │ │ ├── backup.go │ │ │ │ ├── client.go │ │ │ │ ├── container_commit.go │ │ │ │ ├── container_configs.go │ │ │ │ ├── container_exec.go │ │ │ │ ├── container_inspect.go │ │ │ │ ├── create.go │ │ │ │ ├── daemon.go │ │ │ │ ├── daemon_version.go │ │ │ │ ├── destroy.go │ │ │ │ ├── image_build.go │ │ │ │ ├── image_exists.go │ │ │ │ ├── image_info.go │ │ │ │ ├── image_pull.go │ │ │ │ ├── image_push.go │ │ │ │ ├── image_remove.go │ │ │ │ ├── monitor.go │ │ │ │ ├── network.go │ │ │ │ ├── ping.go │ │ │ │ ├── recover.go │ │ │ │ ├── recover_from_storage_limit.go │ │ │ │ ├── registry_manifest.go │ │ │ │ ├── resize.go │ │ │ │ ├── snapshot_build.go │ │ │ │ ├── snapshot_pull.go │ │ │ │ ├── start.go │ │ │ │ ├── state.go │ │ │ │ ├── stop.go │ │ │ │ ├── tag_image.go │ │ │ │ ├── volumes_cleanup.go │ │ │ │ └── volumes_mountpaths.go │ │ │ ├── models/ │ │ │ │ ├── backup_info.go │ │ │ │ ├── enums/ │ │ │ │ │ ├── sandbox_state.go │ │ │ │ │ └── snapshot_state.go │ │ │ │ ├── recovery_type.go │ │ │ │ ├── sandbox_info.go │ │ │ │ └── service_info.go │ │ │ ├── netrules/ │ │ │ │ ├── assign.go │ │ │ │ ├── delete.go │ │ │ │ ├── limiter.go │ │ │ │ ├── netrules.go │ │ │ │ ├── set.go │ │ │ │ ├── unassign.go │ │ │ │ └── utils.go │ │ │ ├── runner/ │ │ │ │ ├── runner.go │ │ │ │ └── v2/ │ │ │ │ ├── executor/ │ │ │ │ │ ├── backup.go │ │ │ │ │ ├── executor.go │ │ │ │ │ ├── sandbox.go │ │ │ │ │ ├── snapshot.go │ │ │ │ │ └── types.go │ │ │ │ ├── healthcheck/ │ │ │ │ │ └── healthcheck.go │ │ │ │ └── poller/ │ │ │ │ └── poller.go │ │ │ ├── services/ │ │ │ │ ├── sandbox.go │ │ │ │ └── sandbox_sync.go │ │ │ ├── sshgateway/ │ │ │ │ ├── config.go │ │ │ │ └── service.go │ │ │ ├── storage/ │ │ │ │ ├── client.go │ │ │ │ └── minio_client.go │ │ │ └── telemetry/ │ │ │ └── filters/ │ │ │ └── not_found.go │ │ └── project.json │ ├── snapshot-manager/ │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── cmd/ │ │ │ └── main.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── internal/ │ │ │ ├── buildinfo.go │ │ │ ├── config/ │ │ │ │ └── config.go │ │ │ ├── logger/ │ │ │ │ └── logger.go │ │ │ └── server/ │ │ │ ├── config.go │ │ │ └── server.go │ │ └── project.json │ └── ssh-gateway/ │ ├── Dockerfile │ ├── README.md │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── project.json ├── components.json ├── docker/ │ ├── README.md │ ├── dex/ │ │ └── config.yaml │ ├── docker-compose.build.override.yaml │ ├── docker-compose.yaml │ ├── otel/ │ │ └── otel-collector-config.yaml │ └── pgadmin4/ │ ├── pgpass │ └── servers.json ├── ecosystem.config.js ├── eslint.config.mjs ├── examples/ │ ├── go/ │ │ ├── auto_archive/ │ │ │ └── main.go │ │ ├── auto_delete/ │ │ │ └── main.go │ │ ├── build_logs/ │ │ │ └── main.go │ │ ├── code_interpreter/ │ │ │ └── main.go │ │ ├── computer_use/ │ │ │ └── main.go │ │ ├── exec_sessions/ │ │ │ └── main.go │ │ ├── filesystem/ │ │ │ └── main.go │ │ ├── fromimage/ │ │ │ └── main.go │ │ ├── git_operations/ │ │ │ └── main.go │ │ ├── lifecycle/ │ │ │ └── main.go │ │ ├── lsp_usage/ │ │ │ └── main.go │ │ ├── network_settings/ │ │ │ └── main.go │ │ ├── pagination/ │ │ │ └── main.go │ │ ├── pty_channel/ │ │ │ └── main.go │ │ ├── run_examples.sh │ │ ├── sandbox/ │ │ │ └── main.go │ │ ├── snapshots_simple/ │ │ │ └── main.go │ │ ├── snapshots_withlogstreaming/ │ │ │ └── main.go │ │ ├── stream_logs/ │ │ │ └── main.go │ │ ├── volumes/ │ │ │ └── main.go │ │ └── volumes_with_sandbox/ │ │ └── main.go │ ├── jupyter/ │ │ └── daytona.ipynb │ ├── otel-dashboards/ │ │ ├── grafana/ │ │ │ ├── README.md │ │ │ └── dashboard.json │ │ └── new-relic/ │ │ ├── README.md │ │ └── dashboard.json │ ├── python/ │ │ ├── auto-archive/ │ │ │ ├── _async/ │ │ │ │ └── auto_archive.py │ │ │ └── auto_archive.py │ │ ├── auto-delete/ │ │ │ ├── _async/ │ │ │ │ └── auto_delete.py │ │ │ └── auto_delete.py │ │ ├── charts/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ ├── declarative-image/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ ├── exec-command/ │ │ │ ├── _async/ │ │ │ │ ├── exec.py │ │ │ │ ├── exec_logs_async.py │ │ │ │ └── exec_session.py │ │ │ ├── exec.py │ │ │ ├── exec_logs_async.py │ │ │ └── exec_session.py │ │ ├── file-operations/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ ├── git-lsp/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ ├── lifecycle/ │ │ │ ├── _async/ │ │ │ │ └── lifecycle.py │ │ │ └── lifecycle.py │ │ ├── network-settings/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ ├── pagination/ │ │ │ ├── _async/ │ │ │ │ ├── sandbox.py │ │ │ │ └── snapshot.py │ │ │ ├── sandbox.py │ │ │ └── snapshot.py │ │ ├── pty/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ ├── region/ │ │ │ ├── _async/ │ │ │ │ └── main.py │ │ │ └── main.py │ │ └── volumes/ │ │ ├── _async/ │ │ │ └── volume.py │ │ └── volume.py │ ├── ruby/ │ │ ├── README.md │ │ ├── auto-archive/ │ │ │ └── auto_archive.rb │ │ ├── auto-delete/ │ │ │ └── auto_delete.rb │ │ ├── charts/ │ │ │ └── main.rb │ │ ├── declarative-image/ │ │ │ └── main.rb │ │ ├── exec-command/ │ │ │ ├── exec.rb │ │ │ └── exec_session.rb │ │ ├── file-operations/ │ │ │ └── main.rb │ │ ├── git-lsp/ │ │ │ └── main.rb │ │ ├── lifecycle/ │ │ │ └── lifecycle.rb │ │ ├── network-settings/ │ │ │ └── main.rb │ │ ├── pagination/ │ │ │ ├── sandbox.rb │ │ │ └── snapshot.rb │ │ ├── pty/ │ │ │ └── main.rb │ │ └── volumes/ │ │ └── volume.rb │ └── typescript/ │ ├── auto-archive/ │ │ └── index.ts │ ├── auto-delete/ │ │ └── index.ts │ ├── charts/ │ │ └── index.ts │ ├── declarative-image/ │ │ └── index.ts │ ├── exec-command/ │ │ └── index.ts │ ├── file-operations/ │ │ └── index.ts │ ├── git-lsp/ │ │ └── index.ts │ ├── lifecycle/ │ │ └── index.ts │ ├── network-settings/ │ │ └── index.ts │ ├── pagination/ │ │ ├── sandbox.ts │ │ └── snapshot.ts │ ├── pty/ │ │ └── index.ts │ ├── region/ │ │ └── index.tsx │ └── volumes/ │ └── index.ts ├── functions/ │ └── auth0/ │ ├── setCustomClaims.onExecutePostLogin.js │ ├── validateEmailUnused.onExecutePostLogin.js │ ├── validateEmailUnused.onExecutePreRegister.js │ └── verifyAliasEmail.onExecutePreRegister.js ├── go.work ├── guides/ │ ├── python/ │ │ ├── ai-data-analyst/ │ │ │ ├── litellm/ │ │ │ │ ├── .gitignore │ │ │ │ ├── README.md │ │ │ │ ├── ai_data_analyst.py │ │ │ │ ├── cafe_sales_data.csv │ │ │ │ └── pyproject.toml │ │ │ └── openai/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ ├── ai_data_analyst.py │ │ │ ├── cafe_sales_data.csv │ │ │ └── pyproject.toml │ │ ├── dspy-rlms/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ ├── daytona_interpreter.py │ │ │ ├── demo.py │ │ │ └── pyproject.toml │ │ ├── google-adk/ │ │ │ └── code-generator-agent/ │ │ │ └── gemini/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ └── main.py │ │ ├── langchain/ │ │ │ └── data-analysis/ │ │ │ └── anthropic/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ └── data_analysis.py │ │ ├── recursive-language-models/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ ├── config.yaml │ │ │ ├── output_logging/ │ │ │ │ ├── __init__.py │ │ │ │ ├── console.py │ │ │ │ └── tree_logger.py │ │ │ ├── pyproject.toml │ │ │ ├── rlm/ │ │ │ │ ├── __init__.py │ │ │ │ ├── agent.py │ │ │ │ ├── budget.py │ │ │ │ ├── client.py │ │ │ │ ├── prompts.py │ │ │ │ ├── repl.py │ │ │ │ ├── sandbox.py │ │ │ │ └── types.py │ │ │ ├── run.py │ │ │ └── viewer/ │ │ │ └── index.html │ │ └── reinforcement-learning/ │ │ └── trl/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── pyproject.toml │ │ └── train.py │ └── typescript/ │ ├── agentkit-inngest/ │ │ └── coding-agent/ │ │ └── anthropic/ │ │ ├── .gitignore │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ └── utils.ts │ │ └── tsconfig.json │ ├── ai-data-analyst/ │ │ └── openai/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── cafe_sales_data.csv │ │ ├── index.ts │ │ └── package.json │ ├── anthropic/ │ │ ├── multi-agent-claude-sdk/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ ├── package.json │ │ │ ├── src/ │ │ │ │ ├── coding_agent.py │ │ │ │ ├── index.ts │ │ │ │ └── utils.ts │ │ │ └── tsconfig.json │ │ └── single-claude-agent-sdk/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── coding_agent.py │ │ │ ├── index.ts │ │ │ └── utils.ts │ │ └── tsconfig.json │ ├── letta-code/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ ├── letta-session.ts │ │ │ ├── types.ts │ │ │ └── utils.ts │ │ └── tsconfig.json │ ├── mastra/ │ │ └── coding-agent/ │ │ └── openai/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── package.json │ │ ├── src/ │ │ │ └── mastra/ │ │ │ ├── agents/ │ │ │ │ └── coding-agent.ts │ │ │ ├── index.ts │ │ │ └── tools/ │ │ │ ├── daytona/ │ │ │ │ ├── tools.ts │ │ │ │ └── utils.ts │ │ │ └── index.ts │ │ └── tsconfig.json │ ├── openai/ │ │ └── codex-sdk/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── agent/ │ │ │ ├── index.ts │ │ │ └── package.json │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ └── utils.ts │ │ └── tsconfig.json │ ├── openclaw/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── openclaw.json │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ └── utils.ts │ │ └── tsconfig.json │ └── opencode/ │ ├── opencode-sdk/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── package.json │ │ ├── src/ │ │ │ ├── index.ts │ │ │ ├── server.ts │ │ │ └── session.ts │ │ └── tsconfig.json │ └── opencode-web/ │ ├── .gitignore │ ├── README.md │ ├── package.json │ ├── src/ │ │ └── index.ts │ └── tsconfig.json ├── hack/ │ ├── computer-use/ │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ └── build-computer-use-amd64.sh │ └── python-client/ │ ├── openapi-templates/ │ │ ├── __init__package.mustache │ │ ├── model_generic.mustache │ │ └── og__init__package.mustache │ └── postprocess.sh ├── images/ │ ├── sandbox/ │ │ ├── Dockerfile │ │ └── README.md │ └── sandbox-slim/ │ ├── Dockerfile │ └── README.md ├── jest.config.ts ├── jest.preset.js ├── libs/ │ ├── analytics-api-client/ │ │ ├── LICENSE │ │ ├── package.json │ │ ├── project.json │ │ ├── src/ │ │ │ ├── .gitignore │ │ │ ├── .npmignore │ │ │ ├── .openapi-generator/ │ │ │ │ ├── FILES │ │ │ │ └── VERSION │ │ │ ├── .openapi-generator-ignore │ │ │ ├── api/ │ │ │ │ ├── telemetry-api.ts │ │ │ │ └── usage-api.ts │ │ │ ├── api.ts │ │ │ ├── base.ts │ │ │ ├── common.ts │ │ │ ├── configuration.ts │ │ │ ├── git_push.sh │ │ │ ├── index.ts │ │ │ └── models/ │ │ │ ├── index.ts │ │ │ ├── models-aggregated-usage.ts │ │ │ ├── models-log-entry.ts │ │ │ ├── models-metric-point.ts │ │ │ ├── models-sandbox-usage.ts │ │ │ ├── models-span.ts │ │ │ ├── models-trace-summary.ts │ │ │ ├── models-usage-chart-point.ts │ │ │ └── models-usage-period.ts │ │ ├── tsconfig.json │ │ └── tsconfig.lib.json │ ├── api-client/ │ │ ├── LICENSE │ │ ├── package.json │ │ ├── project.json │ │ ├── src/ │ │ │ ├── .gitignore │ │ │ ├── .npmignore │ │ │ ├── .openapi-generator/ │ │ │ │ ├── FILES │ │ │ │ └── VERSION │ │ │ ├── .openapi-generator-ignore │ │ │ ├── api/ │ │ │ │ ├── admin-api.ts │ │ │ │ ├── api-keys-api.ts │ │ │ │ ├── audit-api.ts │ │ │ │ ├── config-api.ts │ │ │ │ ├── docker-registry-api.ts │ │ │ │ ├── health-api.ts │ │ │ │ ├── jobs-api.ts │ │ │ │ ├── object-storage-api.ts │ │ │ │ ├── organizations-api.ts │ │ │ │ ├── preview-api.ts │ │ │ │ ├── regions-api.ts │ │ │ │ ├── runners-api.ts │ │ │ │ ├── sandbox-api.ts │ │ │ │ ├── snapshots-api.ts │ │ │ │ ├── toolbox-api.ts │ │ │ │ ├── users-api.ts │ │ │ │ ├── volumes-api.ts │ │ │ │ ├── webhooks-api.ts │ │ │ │ └── workspace-api.ts │ │ │ ├── api.ts │ │ │ ├── base.ts │ │ │ ├── common.ts │ │ │ ├── configuration.ts │ │ │ ├── git_push.sh │ │ │ ├── index.ts │ │ │ └── models/ │ │ │ ├── account-provider.ts │ │ │ ├── admin-create-runner.ts │ │ │ ├── announcement.ts │ │ │ ├── api-key-list.ts │ │ │ ├── api-key-response.ts │ │ │ ├── audit-log.ts │ │ │ ├── build-info.ts │ │ │ ├── command.ts │ │ │ ├── completion-context.ts │ │ │ ├── completion-item.ts │ │ │ ├── completion-list.ts │ │ │ ├── compressed-screenshot-response.ts │ │ │ ├── computer-use-start-response.ts │ │ │ ├── computer-use-status-response.ts │ │ │ ├── computer-use-stop-response.ts │ │ │ ├── create-api-key.ts │ │ │ ├── create-build-info.ts │ │ │ ├── create-docker-registry.ts │ │ │ ├── create-linked-account.ts │ │ │ ├── create-organization-invitation.ts │ │ │ ├── create-organization-quota.ts │ │ │ ├── create-organization-role.ts │ │ │ ├── create-organization.ts │ │ │ ├── create-region-response.ts │ │ │ ├── create-region.ts │ │ │ ├── create-runner-response.ts │ │ │ ├── create-runner.ts │ │ │ ├── create-sandbox.ts │ │ │ ├── create-session-request.ts │ │ │ ├── create-snapshot.ts │ │ │ ├── create-user.ts │ │ │ ├── create-volume.ts │ │ │ ├── create-workspace.ts │ │ │ ├── daytona-configuration.ts │ │ │ ├── display-info-response.ts │ │ │ ├── docker-registry.ts │ │ │ ├── download-files.ts │ │ │ ├── execute-request.ts │ │ │ ├── execute-response.ts │ │ │ ├── file-info.ts │ │ │ ├── file-status.ts │ │ │ ├── git-add-request.ts │ │ │ ├── git-branch-request.ts │ │ │ ├── git-checkout-request.ts │ │ │ ├── git-clone-request.ts │ │ │ ├── git-commit-info.ts │ │ │ ├── git-commit-request.ts │ │ │ ├── git-commit-response.ts │ │ │ ├── git-delete-branch-request.ts │ │ │ ├── git-repo-request.ts │ │ │ ├── git-status.ts │ │ │ ├── health-controller-check200-response-info-value.ts │ │ │ ├── health-controller-check200-response.ts │ │ │ ├── health-controller-check503-response.ts │ │ │ ├── index.ts │ │ │ ├── job-status.ts │ │ │ ├── job-type.ts │ │ │ ├── job.ts │ │ │ ├── keyboard-hotkey-request.ts │ │ │ ├── keyboard-press-request.ts │ │ │ ├── keyboard-type-request.ts │ │ │ ├── list-branch-response.ts │ │ │ ├── log-entry.ts │ │ │ ├── lsp-completion-params.ts │ │ │ ├── lsp-document-request.ts │ │ │ ├── lsp-location.ts │ │ │ ├── lsp-server-request.ts │ │ │ ├── lsp-symbol.ts │ │ │ ├── match.ts │ │ │ ├── metric-data-point.ts │ │ │ ├── metric-series.ts │ │ │ ├── metrics-response.ts │ │ │ ├── mouse-click-request.ts │ │ │ ├── mouse-click-response.ts │ │ │ ├── mouse-drag-request.ts │ │ │ ├── mouse-drag-response.ts │ │ │ ├── mouse-move-request.ts │ │ │ ├── mouse-move-response.ts │ │ │ ├── mouse-position.ts │ │ │ ├── mouse-scroll-request.ts │ │ │ ├── mouse-scroll-response.ts │ │ │ ├── oidc-config.ts │ │ │ ├── organization-invitation.ts │ │ │ ├── organization-role.ts │ │ │ ├── organization-sandbox-default-limited-network-egress.ts │ │ │ ├── organization-suspension.ts │ │ │ ├── organization-usage-overview.ts │ │ │ ├── organization-user.ts │ │ │ ├── organization.ts │ │ │ ├── otel-config.ts │ │ │ ├── paginated-audit-logs.ts │ │ │ ├── paginated-jobs.ts │ │ │ ├── paginated-logs.ts │ │ │ ├── paginated-sandboxes.ts │ │ │ ├── paginated-snapshots.ts │ │ │ ├── paginated-traces.ts │ │ │ ├── poll-jobs-response.ts │ │ │ ├── port-preview-url.ts │ │ │ ├── position.ts │ │ │ ├── posthog-config.ts │ │ │ ├── process-errors-response.ts │ │ │ ├── process-logs-response.ts │ │ │ ├── process-restart-response.ts │ │ │ ├── process-status-response.ts │ │ │ ├── project-dir-response.ts │ │ │ ├── pty-create-request.ts │ │ │ ├── pty-create-response.ts │ │ │ ├── pty-list-response.ts │ │ │ ├── pty-resize-request.ts │ │ │ ├── pty-session-info.ts │ │ │ ├── range.ts │ │ │ ├── rate-limit-config.ts │ │ │ ├── rate-limit-entry.ts │ │ │ ├── regenerate-api-key-response.ts │ │ │ ├── region-quota.ts │ │ │ ├── region-screenshot-response.ts │ │ │ ├── region-type.ts │ │ │ ├── region-usage-overview.ts │ │ │ ├── region.ts │ │ │ ├── registry-push-access-dto.ts │ │ │ ├── replace-request.ts │ │ │ ├── replace-result.ts │ │ │ ├── resize-sandbox.ts │ │ │ ├── runner-full.ts │ │ │ ├── runner-health-metrics.ts │ │ │ ├── runner-healthcheck.ts │ │ │ ├── runner-service-health.ts │ │ │ ├── runner-snapshot-dto.ts │ │ │ ├── runner-state.ts │ │ │ ├── runner.ts │ │ │ ├── sandbox-class.ts │ │ │ ├── sandbox-desired-state.ts │ │ │ ├── sandbox-info.ts │ │ │ ├── sandbox-labels.ts │ │ │ ├── sandbox-state.ts │ │ │ ├── sandbox-volume.ts │ │ │ ├── sandbox.ts │ │ │ ├── screenshot-response.ts │ │ │ ├── search-files-response.ts │ │ │ ├── send-webhook-dto.ts │ │ │ ├── session-execute-request.ts │ │ │ ├── session-execute-response.ts │ │ │ ├── session.ts │ │ │ ├── set-snapshot-general-status-dto.ts │ │ │ ├── signed-port-preview-url.ts │ │ │ ├── snapshot-dto.ts │ │ │ ├── snapshot-manager-credentials.ts │ │ │ ├── snapshot-state.ts │ │ │ ├── ssh-access-dto.ts │ │ │ ├── ssh-access-validation-dto.ts │ │ │ ├── storage-access-dto.ts │ │ │ ├── toolbox-proxy-url.ts │ │ │ ├── trace-span.ts │ │ │ ├── trace-summary.ts │ │ │ ├── update-docker-registry.ts │ │ │ ├── update-job-status.ts │ │ │ ├── update-organization-default-region.ts │ │ │ ├── update-organization-invitation.ts │ │ │ ├── update-organization-member-access.ts │ │ │ ├── update-organization-quota.ts │ │ │ ├── update-organization-region-quota.ts │ │ │ ├── update-organization-role.ts │ │ │ ├── update-region.ts │ │ │ ├── update-sandbox-state-dto.ts │ │ │ ├── url.ts │ │ │ ├── user-home-dir-response.ts │ │ │ ├── user-public-key.ts │ │ │ ├── user.ts │ │ │ ├── volume-dto.ts │ │ │ ├── volume-state.ts │ │ │ ├── webhook-app-portal-access.ts │ │ │ ├── webhook-controller-get-status200-response.ts │ │ │ ├── webhook-event.ts │ │ │ ├── webhook-initialization-status.ts │ │ │ ├── windows-response.ts │ │ │ ├── work-dir-response.ts │ │ │ ├── workspace-port-preview-url.ts │ │ │ └── workspace.ts │ │ ├── tsconfig.json │ │ └── tsconfig.lib.json │ ├── api-client-go/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── LICENSE │ │ ├── api/ │ │ │ └── openapi.yaml │ │ ├── api_admin.go │ │ ├── api_api_keys.go │ │ ├── api_audit.go │ │ ├── api_config.go │ │ ├── api_docker_registry.go │ │ ├── api_health.go │ │ ├── api_jobs.go │ │ ├── api_object_storage.go │ │ ├── api_organizations.go │ │ ├── api_preview.go │ │ ├── api_regions.go │ │ ├── api_runners.go │ │ ├── api_sandbox.go │ │ ├── api_snapshots.go │ │ ├── api_toolbox.go │ │ ├── api_users.go │ │ ├── api_volumes.go │ │ ├── api_webhooks.go │ │ ├── api_workspace.go │ │ ├── client.go │ │ ├── configuration.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── model_account_provider.go │ │ ├── model_admin_create_runner.go │ │ ├── model_announcement.go │ │ ├── model_api_key_list.go │ │ ├── model_api_key_response.go │ │ ├── model_audit_log.go │ │ ├── model_build_info.go │ │ ├── model_command.go │ │ ├── model_completion_context.go │ │ ├── model_completion_item.go │ │ ├── model_completion_list.go │ │ ├── model_compressed_screenshot_response.go │ │ ├── model_computer_use_start_response.go │ │ ├── model_computer_use_status_response.go │ │ ├── model_computer_use_stop_response.go │ │ ├── model_create_api_key.go │ │ ├── model_create_build_info.go │ │ ├── model_create_docker_registry.go │ │ ├── model_create_linked_account.go │ │ ├── model_create_organization.go │ │ ├── model_create_organization_invitation.go │ │ ├── model_create_organization_quota.go │ │ ├── model_create_organization_role.go │ │ ├── model_create_region.go │ │ ├── model_create_region_response.go │ │ ├── model_create_runner.go │ │ ├── model_create_runner_response.go │ │ ├── model_create_sandbox.go │ │ ├── model_create_session_request.go │ │ ├── model_create_snapshot.go │ │ ├── model_create_user.go │ │ ├── model_create_volume.go │ │ ├── model_create_workspace.go │ │ ├── model_daytona_configuration.go │ │ ├── model_display_info_response.go │ │ ├── model_docker_registry.go │ │ ├── model_download_files.go │ │ ├── model_execute_request.go │ │ ├── model_execute_response.go │ │ ├── model_file_info.go │ │ ├── model_file_status.go │ │ ├── model_git_add_request.go │ │ ├── model_git_branch_request.go │ │ ├── model_git_checkout_request.go │ │ ├── model_git_clone_request.go │ │ ├── model_git_commit_info.go │ │ ├── model_git_commit_request.go │ │ ├── model_git_commit_response.go │ │ ├── model_git_delete_branch_request.go │ │ ├── model_git_repo_request.go │ │ ├── model_git_status.go │ │ ├── model_health_controller_check_200_response.go │ │ ├── model_health_controller_check_200_response_info_value.go │ │ ├── model_health_controller_check_503_response.go │ │ ├── model_job.go │ │ ├── model_job_status.go │ │ ├── model_job_type.go │ │ ├── model_keyboard_hotkey_request.go │ │ ├── model_keyboard_press_request.go │ │ ├── model_keyboard_type_request.go │ │ ├── model_list_branch_response.go │ │ ├── model_log_entry.go │ │ ├── model_lsp_completion_params.go │ │ ├── model_lsp_document_request.go │ │ ├── model_lsp_location.go │ │ ├── model_lsp_server_request.go │ │ ├── model_lsp_symbol.go │ │ ├── model_match.go │ │ ├── model_metric_data_point.go │ │ ├── model_metric_series.go │ │ ├── model_metrics_response.go │ │ ├── model_mouse_click_request.go │ │ ├── model_mouse_click_response.go │ │ ├── model_mouse_drag_request.go │ │ ├── model_mouse_drag_response.go │ │ ├── model_mouse_move_request.go │ │ ├── model_mouse_move_response.go │ │ ├── model_mouse_position.go │ │ ├── model_mouse_scroll_request.go │ │ ├── model_mouse_scroll_response.go │ │ ├── model_oidc_config.go │ │ ├── model_organization.go │ │ ├── model_organization_invitation.go │ │ ├── model_organization_role.go │ │ ├── model_organization_sandbox_default_limited_network_egress.go │ │ ├── model_organization_suspension.go │ │ ├── model_organization_usage_overview.go │ │ ├── model_organization_user.go │ │ ├── model_otel_config.go │ │ ├── model_paginated_audit_logs.go │ │ ├── model_paginated_jobs.go │ │ ├── model_paginated_logs.go │ │ ├── model_paginated_sandboxes.go │ │ ├── model_paginated_snapshots.go │ │ ├── model_paginated_traces.go │ │ ├── model_poll_jobs_response.go │ │ ├── model_port_preview_url.go │ │ ├── model_position.go │ │ ├── model_posthog_config.go │ │ ├── model_process_errors_response.go │ │ ├── model_process_logs_response.go │ │ ├── model_process_restart_response.go │ │ ├── model_process_status_response.go │ │ ├── model_project_dir_response.go │ │ ├── model_pty_create_request.go │ │ ├── model_pty_create_response.go │ │ ├── model_pty_list_response.go │ │ ├── model_pty_resize_request.go │ │ ├── model_pty_session_info.go │ │ ├── model_range.go │ │ ├── model_rate_limit_config.go │ │ ├── model_rate_limit_entry.go │ │ ├── model_regenerate_api_key_response.go │ │ ├── model_region.go │ │ ├── model_region_quota.go │ │ ├── model_region_screenshot_response.go │ │ ├── model_region_type.go │ │ ├── model_region_usage_overview.go │ │ ├── model_registry_push_access_dto.go │ │ ├── model_replace_request.go │ │ ├── model_replace_result.go │ │ ├── model_resize_sandbox.go │ │ ├── model_runner.go │ │ ├── model_runner_full.go │ │ ├── model_runner_health_metrics.go │ │ ├── model_runner_healthcheck.go │ │ ├── model_runner_service_health.go │ │ ├── model_runner_snapshot_dto.go │ │ ├── model_runner_state.go │ │ ├── model_sandbox.go │ │ ├── model_sandbox_class.go │ │ ├── model_sandbox_desired_state.go │ │ ├── model_sandbox_info.go │ │ ├── model_sandbox_labels.go │ │ ├── model_sandbox_state.go │ │ ├── model_sandbox_volume.go │ │ ├── model_screenshot_response.go │ │ ├── model_search_files_response.go │ │ ├── model_send_webhook_dto.go │ │ ├── model_session.go │ │ ├── model_session_execute_request.go │ │ ├── model_session_execute_response.go │ │ ├── model_set_snapshot_general_status_dto.go │ │ ├── model_signed_port_preview_url.go │ │ ├── model_snapshot_dto.go │ │ ├── model_snapshot_manager_credentials.go │ │ ├── model_snapshot_state.go │ │ ├── model_ssh_access_dto.go │ │ ├── model_ssh_access_validation_dto.go │ │ ├── model_storage_access_dto.go │ │ ├── model_toolbox_proxy_url.go │ │ ├── model_trace_span.go │ │ ├── model_trace_summary.go │ │ ├── model_update_docker_registry.go │ │ ├── model_update_job_status.go │ │ ├── model_update_organization_default_region.go │ │ ├── model_update_organization_invitation.go │ │ ├── model_update_organization_member_access.go │ │ ├── model_update_organization_quota.go │ │ ├── model_update_organization_region_quota.go │ │ ├── model_update_organization_role.go │ │ ├── model_update_region.go │ │ ├── model_update_sandbox_state_dto.go │ │ ├── model_url.go │ │ ├── model_user.go │ │ ├── model_user_home_dir_response.go │ │ ├── model_user_public_key.go │ │ ├── model_volume_dto.go │ │ ├── model_volume_state.go │ │ ├── model_webhook_app_portal_access.go │ │ ├── model_webhook_controller_get_status_200_response.go │ │ ├── model_webhook_event.go │ │ ├── model_webhook_initialization_status.go │ │ ├── model_windows_response.go │ │ ├── model_work_dir_response.go │ │ ├── model_workspace.go │ │ ├── model_workspace_port_preview_url.go │ │ ├── project.json │ │ ├── response.go │ │ └── utils.go │ ├── api-client-python/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── daytona_api_client/ │ │ │ ├── __init__.py │ │ │ ├── api/ │ │ │ │ ├── __init__.py │ │ │ │ ├── admin_api.py │ │ │ │ ├── api_keys_api.py │ │ │ │ ├── audit_api.py │ │ │ │ ├── config_api.py │ │ │ │ ├── docker_registry_api.py │ │ │ │ ├── health_api.py │ │ │ │ ├── jobs_api.py │ │ │ │ ├── object_storage_api.py │ │ │ │ ├── organizations_api.py │ │ │ │ ├── preview_api.py │ │ │ │ ├── regions_api.py │ │ │ │ ├── runners_api.py │ │ │ │ ├── sandbox_api.py │ │ │ │ ├── snapshots_api.py │ │ │ │ ├── toolbox_api.py │ │ │ │ ├── users_api.py │ │ │ │ ├── volumes_api.py │ │ │ │ ├── webhooks_api.py │ │ │ │ └── workspace_api.py │ │ │ ├── api_client.py │ │ │ ├── api_response.py │ │ │ ├── configuration.py │ │ │ ├── exceptions.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── account_provider.py │ │ │ │ ├── admin_create_runner.py │ │ │ │ ├── announcement.py │ │ │ │ ├── api_key_list.py │ │ │ │ ├── api_key_response.py │ │ │ │ ├── audit_log.py │ │ │ │ ├── build_info.py │ │ │ │ ├── command.py │ │ │ │ ├── completion_context.py │ │ │ │ ├── completion_item.py │ │ │ │ ├── completion_list.py │ │ │ │ ├── compressed_screenshot_response.py │ │ │ │ ├── computer_use_start_response.py │ │ │ │ ├── computer_use_status_response.py │ │ │ │ ├── computer_use_stop_response.py │ │ │ │ ├── create_api_key.py │ │ │ │ ├── create_build_info.py │ │ │ │ ├── create_docker_registry.py │ │ │ │ ├── create_linked_account.py │ │ │ │ ├── create_organization.py │ │ │ │ ├── create_organization_invitation.py │ │ │ │ ├── create_organization_quota.py │ │ │ │ ├── create_organization_role.py │ │ │ │ ├── create_region.py │ │ │ │ ├── create_region_response.py │ │ │ │ ├── create_runner.py │ │ │ │ ├── create_runner_response.py │ │ │ │ ├── create_sandbox.py │ │ │ │ ├── create_session_request.py │ │ │ │ ├── create_snapshot.py │ │ │ │ ├── create_user.py │ │ │ │ ├── create_volume.py │ │ │ │ ├── create_workspace.py │ │ │ │ ├── daytona_configuration.py │ │ │ │ ├── display_info_response.py │ │ │ │ ├── docker_registry.py │ │ │ │ ├── download_files.py │ │ │ │ ├── execute_request.py │ │ │ │ ├── execute_response.py │ │ │ │ ├── file_info.py │ │ │ │ ├── file_status.py │ │ │ │ ├── git_add_request.py │ │ │ │ ├── git_branch_request.py │ │ │ │ ├── git_checkout_request.py │ │ │ │ ├── git_clone_request.py │ │ │ │ ├── git_commit_info.py │ │ │ │ ├── git_commit_request.py │ │ │ │ ├── git_commit_response.py │ │ │ │ ├── git_delete_branch_request.py │ │ │ │ ├── git_repo_request.py │ │ │ │ ├── git_status.py │ │ │ │ ├── health_controller_check200_response.py │ │ │ │ ├── health_controller_check200_response_info_value.py │ │ │ │ ├── health_controller_check503_response.py │ │ │ │ ├── job.py │ │ │ │ ├── job_status.py │ │ │ │ ├── job_type.py │ │ │ │ ├── keyboard_hotkey_request.py │ │ │ │ ├── keyboard_press_request.py │ │ │ │ ├── keyboard_type_request.py │ │ │ │ ├── list_branch_response.py │ │ │ │ ├── log_entry.py │ │ │ │ ├── lsp_completion_params.py │ │ │ │ ├── lsp_document_request.py │ │ │ │ ├── lsp_location.py │ │ │ │ ├── lsp_server_request.py │ │ │ │ ├── lsp_symbol.py │ │ │ │ ├── match.py │ │ │ │ ├── metric_data_point.py │ │ │ │ ├── metric_series.py │ │ │ │ ├── metrics_response.py │ │ │ │ ├── mouse_click_request.py │ │ │ │ ├── mouse_click_response.py │ │ │ │ ├── mouse_drag_request.py │ │ │ │ ├── mouse_drag_response.py │ │ │ │ ├── mouse_move_request.py │ │ │ │ ├── mouse_move_response.py │ │ │ │ ├── mouse_position.py │ │ │ │ ├── mouse_scroll_request.py │ │ │ │ ├── mouse_scroll_response.py │ │ │ │ ├── oidc_config.py │ │ │ │ ├── organization.py │ │ │ │ ├── organization_invitation.py │ │ │ │ ├── organization_role.py │ │ │ │ ├── organization_sandbox_default_limited_network_egress.py │ │ │ │ ├── organization_suspension.py │ │ │ │ ├── organization_usage_overview.py │ │ │ │ ├── organization_user.py │ │ │ │ ├── otel_config.py │ │ │ │ ├── paginated_audit_logs.py │ │ │ │ ├── paginated_jobs.py │ │ │ │ ├── paginated_logs.py │ │ │ │ ├── paginated_sandboxes.py │ │ │ │ ├── paginated_snapshots.py │ │ │ │ ├── paginated_traces.py │ │ │ │ ├── poll_jobs_response.py │ │ │ │ ├── port_preview_url.py │ │ │ │ ├── position.py │ │ │ │ ├── posthog_config.py │ │ │ │ ├── process_errors_response.py │ │ │ │ ├── process_logs_response.py │ │ │ │ ├── process_restart_response.py │ │ │ │ ├── process_status_response.py │ │ │ │ ├── project_dir_response.py │ │ │ │ ├── pty_create_request.py │ │ │ │ ├── pty_create_response.py │ │ │ │ ├── pty_list_response.py │ │ │ │ ├── pty_resize_request.py │ │ │ │ ├── pty_session_info.py │ │ │ │ ├── range.py │ │ │ │ ├── rate_limit_config.py │ │ │ │ ├── rate_limit_entry.py │ │ │ │ ├── regenerate_api_key_response.py │ │ │ │ ├── region.py │ │ │ │ ├── region_quota.py │ │ │ │ ├── region_screenshot_response.py │ │ │ │ ├── region_type.py │ │ │ │ ├── region_usage_overview.py │ │ │ │ ├── registry_push_access_dto.py │ │ │ │ ├── replace_request.py │ │ │ │ ├── replace_result.py │ │ │ │ ├── resize_sandbox.py │ │ │ │ ├── runner.py │ │ │ │ ├── runner_full.py │ │ │ │ ├── runner_health_metrics.py │ │ │ │ ├── runner_healthcheck.py │ │ │ │ ├── runner_service_health.py │ │ │ │ ├── runner_snapshot_dto.py │ │ │ │ ├── runner_state.py │ │ │ │ ├── sandbox.py │ │ │ │ ├── sandbox_class.py │ │ │ │ ├── sandbox_desired_state.py │ │ │ │ ├── sandbox_info.py │ │ │ │ ├── sandbox_labels.py │ │ │ │ ├── sandbox_state.py │ │ │ │ ├── sandbox_volume.py │ │ │ │ ├── screenshot_response.py │ │ │ │ ├── search_files_response.py │ │ │ │ ├── send_webhook_dto.py │ │ │ │ ├── session.py │ │ │ │ ├── session_execute_request.py │ │ │ │ ├── session_execute_response.py │ │ │ │ ├── set_snapshot_general_status_dto.py │ │ │ │ ├── signed_port_preview_url.py │ │ │ │ ├── snapshot_dto.py │ │ │ │ ├── snapshot_manager_credentials.py │ │ │ │ ├── snapshot_state.py │ │ │ │ ├── ssh_access_dto.py │ │ │ │ ├── ssh_access_validation_dto.py │ │ │ │ ├── storage_access_dto.py │ │ │ │ ├── toolbox_proxy_url.py │ │ │ │ ├── trace_span.py │ │ │ │ ├── trace_summary.py │ │ │ │ ├── update_docker_registry.py │ │ │ │ ├── update_job_status.py │ │ │ │ ├── update_organization_default_region.py │ │ │ │ ├── update_organization_invitation.py │ │ │ │ ├── update_organization_member_access.py │ │ │ │ ├── update_organization_quota.py │ │ │ │ ├── update_organization_region_quota.py │ │ │ │ ├── update_organization_role.py │ │ │ │ ├── update_region.py │ │ │ │ ├── update_sandbox_state_dto.py │ │ │ │ ├── url.py │ │ │ │ ├── user.py │ │ │ │ ├── user_home_dir_response.py │ │ │ │ ├── user_public_key.py │ │ │ │ ├── volume_dto.py │ │ │ │ ├── volume_state.py │ │ │ │ ├── webhook_app_portal_access.py │ │ │ │ ├── webhook_controller_get_status200_response.py │ │ │ │ ├── webhook_event.py │ │ │ │ ├── webhook_initialization_status.py │ │ │ │ ├── windows_response.py │ │ │ │ ├── work_dir_response.py │ │ │ │ ├── workspace.py │ │ │ │ └── workspace_port_preview_url.py │ │ │ ├── py.typed │ │ │ └── rest.py │ │ ├── project.json │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── test-requirements.txt │ │ └── tox.ini │ ├── api-client-python-async/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── daytona_api_client_async/ │ │ │ ├── __init__.py │ │ │ ├── api/ │ │ │ │ ├── __init__.py │ │ │ │ ├── admin_api.py │ │ │ │ ├── api_keys_api.py │ │ │ │ ├── audit_api.py │ │ │ │ ├── config_api.py │ │ │ │ ├── docker_registry_api.py │ │ │ │ ├── health_api.py │ │ │ │ ├── jobs_api.py │ │ │ │ ├── object_storage_api.py │ │ │ │ ├── organizations_api.py │ │ │ │ ├── preview_api.py │ │ │ │ ├── regions_api.py │ │ │ │ ├── runners_api.py │ │ │ │ ├── sandbox_api.py │ │ │ │ ├── snapshots_api.py │ │ │ │ ├── toolbox_api.py │ │ │ │ ├── users_api.py │ │ │ │ ├── volumes_api.py │ │ │ │ ├── webhooks_api.py │ │ │ │ └── workspace_api.py │ │ │ ├── api_client.py │ │ │ ├── api_response.py │ │ │ ├── configuration.py │ │ │ ├── exceptions.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── account_provider.py │ │ │ │ ├── admin_create_runner.py │ │ │ │ ├── announcement.py │ │ │ │ ├── api_key_list.py │ │ │ │ ├── api_key_response.py │ │ │ │ ├── audit_log.py │ │ │ │ ├── build_info.py │ │ │ │ ├── command.py │ │ │ │ ├── completion_context.py │ │ │ │ ├── completion_item.py │ │ │ │ ├── completion_list.py │ │ │ │ ├── compressed_screenshot_response.py │ │ │ │ ├── computer_use_start_response.py │ │ │ │ ├── computer_use_status_response.py │ │ │ │ ├── computer_use_stop_response.py │ │ │ │ ├── create_api_key.py │ │ │ │ ├── create_build_info.py │ │ │ │ ├── create_docker_registry.py │ │ │ │ ├── create_linked_account.py │ │ │ │ ├── create_organization.py │ │ │ │ ├── create_organization_invitation.py │ │ │ │ ├── create_organization_quota.py │ │ │ │ ├── create_organization_role.py │ │ │ │ ├── create_region.py │ │ │ │ ├── create_region_response.py │ │ │ │ ├── create_runner.py │ │ │ │ ├── create_runner_response.py │ │ │ │ ├── create_sandbox.py │ │ │ │ ├── create_session_request.py │ │ │ │ ├── create_snapshot.py │ │ │ │ ├── create_user.py │ │ │ │ ├── create_volume.py │ │ │ │ ├── create_workspace.py │ │ │ │ ├── daytona_configuration.py │ │ │ │ ├── display_info_response.py │ │ │ │ ├── docker_registry.py │ │ │ │ ├── download_files.py │ │ │ │ ├── execute_request.py │ │ │ │ ├── execute_response.py │ │ │ │ ├── file_info.py │ │ │ │ ├── file_status.py │ │ │ │ ├── git_add_request.py │ │ │ │ ├── git_branch_request.py │ │ │ │ ├── git_checkout_request.py │ │ │ │ ├── git_clone_request.py │ │ │ │ ├── git_commit_info.py │ │ │ │ ├── git_commit_request.py │ │ │ │ ├── git_commit_response.py │ │ │ │ ├── git_delete_branch_request.py │ │ │ │ ├── git_repo_request.py │ │ │ │ ├── git_status.py │ │ │ │ ├── health_controller_check200_response.py │ │ │ │ ├── health_controller_check200_response_info_value.py │ │ │ │ ├── health_controller_check503_response.py │ │ │ │ ├── job.py │ │ │ │ ├── job_status.py │ │ │ │ ├── job_type.py │ │ │ │ ├── keyboard_hotkey_request.py │ │ │ │ ├── keyboard_press_request.py │ │ │ │ ├── keyboard_type_request.py │ │ │ │ ├── list_branch_response.py │ │ │ │ ├── log_entry.py │ │ │ │ ├── lsp_completion_params.py │ │ │ │ ├── lsp_document_request.py │ │ │ │ ├── lsp_location.py │ │ │ │ ├── lsp_server_request.py │ │ │ │ ├── lsp_symbol.py │ │ │ │ ├── match.py │ │ │ │ ├── metric_data_point.py │ │ │ │ ├── metric_series.py │ │ │ │ ├── metrics_response.py │ │ │ │ ├── mouse_click_request.py │ │ │ │ ├── mouse_click_response.py │ │ │ │ ├── mouse_drag_request.py │ │ │ │ ├── mouse_drag_response.py │ │ │ │ ├── mouse_move_request.py │ │ │ │ ├── mouse_move_response.py │ │ │ │ ├── mouse_position.py │ │ │ │ ├── mouse_scroll_request.py │ │ │ │ ├── mouse_scroll_response.py │ │ │ │ ├── oidc_config.py │ │ │ │ ├── organization.py │ │ │ │ ├── organization_invitation.py │ │ │ │ ├── organization_role.py │ │ │ │ ├── organization_sandbox_default_limited_network_egress.py │ │ │ │ ├── organization_suspension.py │ │ │ │ ├── organization_usage_overview.py │ │ │ │ ├── organization_user.py │ │ │ │ ├── otel_config.py │ │ │ │ ├── paginated_audit_logs.py │ │ │ │ ├── paginated_jobs.py │ │ │ │ ├── paginated_logs.py │ │ │ │ ├── paginated_sandboxes.py │ │ │ │ ├── paginated_snapshots.py │ │ │ │ ├── paginated_traces.py │ │ │ │ ├── poll_jobs_response.py │ │ │ │ ├── port_preview_url.py │ │ │ │ ├── position.py │ │ │ │ ├── posthog_config.py │ │ │ │ ├── process_errors_response.py │ │ │ │ ├── process_logs_response.py │ │ │ │ ├── process_restart_response.py │ │ │ │ ├── process_status_response.py │ │ │ │ ├── project_dir_response.py │ │ │ │ ├── pty_create_request.py │ │ │ │ ├── pty_create_response.py │ │ │ │ ├── pty_list_response.py │ │ │ │ ├── pty_resize_request.py │ │ │ │ ├── pty_session_info.py │ │ │ │ ├── range.py │ │ │ │ ├── rate_limit_config.py │ │ │ │ ├── rate_limit_entry.py │ │ │ │ ├── regenerate_api_key_response.py │ │ │ │ ├── region.py │ │ │ │ ├── region_quota.py │ │ │ │ ├── region_screenshot_response.py │ │ │ │ ├── region_type.py │ │ │ │ ├── region_usage_overview.py │ │ │ │ ├── registry_push_access_dto.py │ │ │ │ ├── replace_request.py │ │ │ │ ├── replace_result.py │ │ │ │ ├── resize_sandbox.py │ │ │ │ ├── runner.py │ │ │ │ ├── runner_full.py │ │ │ │ ├── runner_health_metrics.py │ │ │ │ ├── runner_healthcheck.py │ │ │ │ ├── runner_service_health.py │ │ │ │ ├── runner_snapshot_dto.py │ │ │ │ ├── runner_state.py │ │ │ │ ├── sandbox.py │ │ │ │ ├── sandbox_class.py │ │ │ │ ├── sandbox_desired_state.py │ │ │ │ ├── sandbox_info.py │ │ │ │ ├── sandbox_labels.py │ │ │ │ ├── sandbox_state.py │ │ │ │ ├── sandbox_volume.py │ │ │ │ ├── screenshot_response.py │ │ │ │ ├── search_files_response.py │ │ │ │ ├── send_webhook_dto.py │ │ │ │ ├── session.py │ │ │ │ ├── session_execute_request.py │ │ │ │ ├── session_execute_response.py │ │ │ │ ├── set_snapshot_general_status_dto.py │ │ │ │ ├── signed_port_preview_url.py │ │ │ │ ├── snapshot_dto.py │ │ │ │ ├── snapshot_manager_credentials.py │ │ │ │ ├── snapshot_state.py │ │ │ │ ├── ssh_access_dto.py │ │ │ │ ├── ssh_access_validation_dto.py │ │ │ │ ├── storage_access_dto.py │ │ │ │ ├── toolbox_proxy_url.py │ │ │ │ ├── trace_span.py │ │ │ │ ├── trace_summary.py │ │ │ │ ├── update_docker_registry.py │ │ │ │ ├── update_job_status.py │ │ │ │ ├── update_organization_default_region.py │ │ │ │ ├── update_organization_invitation.py │ │ │ │ ├── update_organization_member_access.py │ │ │ │ ├── update_organization_quota.py │ │ │ │ ├── update_organization_region_quota.py │ │ │ │ ├── update_organization_role.py │ │ │ │ ├── update_region.py │ │ │ │ ├── update_sandbox_state_dto.py │ │ │ │ ├── url.py │ │ │ │ ├── user.py │ │ │ │ ├── user_home_dir_response.py │ │ │ │ ├── user_public_key.py │ │ │ │ ├── volume_dto.py │ │ │ │ ├── volume_state.py │ │ │ │ ├── webhook_app_portal_access.py │ │ │ │ ├── webhook_controller_get_status200_response.py │ │ │ │ ├── webhook_event.py │ │ │ │ ├── webhook_initialization_status.py │ │ │ │ ├── windows_response.py │ │ │ │ ├── work_dir_response.py │ │ │ │ ├── workspace.py │ │ │ │ └── workspace_port_preview_url.py │ │ │ ├── py.typed │ │ │ └── rest.py │ │ ├── project.json │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── test-requirements.txt │ │ └── tox.ini │ ├── api-client-ruby/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── .rspec │ │ ├── .rubocop.yml │ │ ├── Gemfile │ │ ├── Rakefile │ │ ├── daytona_api_client.gemspec │ │ ├── fix-gemspec.sh │ │ ├── lib/ │ │ │ ├── daytona_api_client/ │ │ │ │ ├── api/ │ │ │ │ │ ├── admin_api.rb │ │ │ │ │ ├── api_keys_api.rb │ │ │ │ │ ├── audit_api.rb │ │ │ │ │ ├── config_api.rb │ │ │ │ │ ├── docker_registry_api.rb │ │ │ │ │ ├── health_api.rb │ │ │ │ │ ├── jobs_api.rb │ │ │ │ │ ├── object_storage_api.rb │ │ │ │ │ ├── organizations_api.rb │ │ │ │ │ ├── preview_api.rb │ │ │ │ │ ├── regions_api.rb │ │ │ │ │ ├── runners_api.rb │ │ │ │ │ ├── sandbox_api.rb │ │ │ │ │ ├── snapshots_api.rb │ │ │ │ │ ├── toolbox_api.rb │ │ │ │ │ ├── users_api.rb │ │ │ │ │ ├── volumes_api.rb │ │ │ │ │ ├── webhooks_api.rb │ │ │ │ │ └── workspace_api.rb │ │ │ │ ├── api_client.rb │ │ │ │ ├── api_error.rb │ │ │ │ ├── configuration.rb │ │ │ │ ├── models/ │ │ │ │ │ ├── account_provider.rb │ │ │ │ │ ├── admin_create_runner.rb │ │ │ │ │ ├── announcement.rb │ │ │ │ │ ├── api_key_list.rb │ │ │ │ │ ├── api_key_response.rb │ │ │ │ │ ├── audit_log.rb │ │ │ │ │ ├── build_info.rb │ │ │ │ │ ├── command.rb │ │ │ │ │ ├── completion_context.rb │ │ │ │ │ ├── completion_item.rb │ │ │ │ │ ├── completion_list.rb │ │ │ │ │ ├── compressed_screenshot_response.rb │ │ │ │ │ ├── computer_use_start_response.rb │ │ │ │ │ ├── computer_use_status_response.rb │ │ │ │ │ ├── computer_use_stop_response.rb │ │ │ │ │ ├── create_api_key.rb │ │ │ │ │ ├── create_build_info.rb │ │ │ │ │ ├── create_docker_registry.rb │ │ │ │ │ ├── create_linked_account.rb │ │ │ │ │ ├── create_organization.rb │ │ │ │ │ ├── create_organization_invitation.rb │ │ │ │ │ ├── create_organization_quota.rb │ │ │ │ │ ├── create_organization_role.rb │ │ │ │ │ ├── create_region.rb │ │ │ │ │ ├── create_region_response.rb │ │ │ │ │ ├── create_runner.rb │ │ │ │ │ ├── create_runner_response.rb │ │ │ │ │ ├── create_sandbox.rb │ │ │ │ │ ├── create_session_request.rb │ │ │ │ │ ├── create_snapshot.rb │ │ │ │ │ ├── create_user.rb │ │ │ │ │ ├── create_volume.rb │ │ │ │ │ ├── create_workspace.rb │ │ │ │ │ ├── daytona_configuration.rb │ │ │ │ │ ├── display_info_response.rb │ │ │ │ │ ├── docker_registry.rb │ │ │ │ │ ├── download_files.rb │ │ │ │ │ ├── execute_request.rb │ │ │ │ │ ├── execute_response.rb │ │ │ │ │ ├── file_info.rb │ │ │ │ │ ├── file_status.rb │ │ │ │ │ ├── git_add_request.rb │ │ │ │ │ ├── git_branch_request.rb │ │ │ │ │ ├── git_checkout_request.rb │ │ │ │ │ ├── git_clone_request.rb │ │ │ │ │ ├── git_commit_info.rb │ │ │ │ │ ├── git_commit_request.rb │ │ │ │ │ ├── git_commit_response.rb │ │ │ │ │ ├── git_delete_branch_request.rb │ │ │ │ │ ├── git_repo_request.rb │ │ │ │ │ ├── git_status.rb │ │ │ │ │ ├── health_controller_check200_response.rb │ │ │ │ │ ├── health_controller_check200_response_info_value.rb │ │ │ │ │ ├── health_controller_check503_response.rb │ │ │ │ │ ├── job.rb │ │ │ │ │ ├── job_status.rb │ │ │ │ │ ├── job_type.rb │ │ │ │ │ ├── keyboard_hotkey_request.rb │ │ │ │ │ ├── keyboard_press_request.rb │ │ │ │ │ ├── keyboard_type_request.rb │ │ │ │ │ ├── list_branch_response.rb │ │ │ │ │ ├── log_entry.rb │ │ │ │ │ ├── lsp_completion_params.rb │ │ │ │ │ ├── lsp_document_request.rb │ │ │ │ │ ├── lsp_location.rb │ │ │ │ │ ├── lsp_server_request.rb │ │ │ │ │ ├── lsp_symbol.rb │ │ │ │ │ ├── match.rb │ │ │ │ │ ├── metric_data_point.rb │ │ │ │ │ ├── metric_series.rb │ │ │ │ │ ├── metrics_response.rb │ │ │ │ │ ├── mouse_click_request.rb │ │ │ │ │ ├── mouse_click_response.rb │ │ │ │ │ ├── mouse_drag_request.rb │ │ │ │ │ ├── mouse_drag_response.rb │ │ │ │ │ ├── mouse_move_request.rb │ │ │ │ │ ├── mouse_move_response.rb │ │ │ │ │ ├── mouse_position.rb │ │ │ │ │ ├── mouse_scroll_request.rb │ │ │ │ │ ├── mouse_scroll_response.rb │ │ │ │ │ ├── oidc_config.rb │ │ │ │ │ ├── organization.rb │ │ │ │ │ ├── organization_invitation.rb │ │ │ │ │ ├── organization_role.rb │ │ │ │ │ ├── organization_sandbox_default_limited_network_egress.rb │ │ │ │ │ ├── organization_suspension.rb │ │ │ │ │ ├── organization_usage_overview.rb │ │ │ │ │ ├── organization_user.rb │ │ │ │ │ ├── otel_config.rb │ │ │ │ │ ├── paginated_audit_logs.rb │ │ │ │ │ ├── paginated_jobs.rb │ │ │ │ │ ├── paginated_logs.rb │ │ │ │ │ ├── paginated_sandboxes.rb │ │ │ │ │ ├── paginated_snapshots.rb │ │ │ │ │ ├── paginated_traces.rb │ │ │ │ │ ├── poll_jobs_response.rb │ │ │ │ │ ├── port_preview_url.rb │ │ │ │ │ ├── position.rb │ │ │ │ │ ├── posthog_config.rb │ │ │ │ │ ├── process_errors_response.rb │ │ │ │ │ ├── process_logs_response.rb │ │ │ │ │ ├── process_restart_response.rb │ │ │ │ │ ├── process_status_response.rb │ │ │ │ │ ├── project_dir_response.rb │ │ │ │ │ ├── pty_create_request.rb │ │ │ │ │ ├── pty_create_response.rb │ │ │ │ │ ├── pty_list_response.rb │ │ │ │ │ ├── pty_resize_request.rb │ │ │ │ │ ├── pty_session_info.rb │ │ │ │ │ ├── range.rb │ │ │ │ │ ├── rate_limit_config.rb │ │ │ │ │ ├── rate_limit_entry.rb │ │ │ │ │ ├── regenerate_api_key_response.rb │ │ │ │ │ ├── region.rb │ │ │ │ │ ├── region_quota.rb │ │ │ │ │ ├── region_screenshot_response.rb │ │ │ │ │ ├── region_type.rb │ │ │ │ │ ├── region_usage_overview.rb │ │ │ │ │ ├── registry_push_access_dto.rb │ │ │ │ │ ├── replace_request.rb │ │ │ │ │ ├── replace_result.rb │ │ │ │ │ ├── resize_sandbox.rb │ │ │ │ │ ├── runner.rb │ │ │ │ │ ├── runner_full.rb │ │ │ │ │ ├── runner_health_metrics.rb │ │ │ │ │ ├── runner_healthcheck.rb │ │ │ │ │ ├── runner_service_health.rb │ │ │ │ │ ├── runner_snapshot_dto.rb │ │ │ │ │ ├── runner_state.rb │ │ │ │ │ ├── sandbox.rb │ │ │ │ │ ├── sandbox_class.rb │ │ │ │ │ ├── sandbox_desired_state.rb │ │ │ │ │ ├── sandbox_info.rb │ │ │ │ │ ├── sandbox_labels.rb │ │ │ │ │ ├── sandbox_state.rb │ │ │ │ │ ├── sandbox_volume.rb │ │ │ │ │ ├── screenshot_response.rb │ │ │ │ │ ├── search_files_response.rb │ │ │ │ │ ├── send_webhook_dto.rb │ │ │ │ │ ├── session.rb │ │ │ │ │ ├── session_execute_request.rb │ │ │ │ │ ├── session_execute_response.rb │ │ │ │ │ ├── set_snapshot_general_status_dto.rb │ │ │ │ │ ├── signed_port_preview_url.rb │ │ │ │ │ ├── snapshot_dto.rb │ │ │ │ │ ├── snapshot_manager_credentials.rb │ │ │ │ │ ├── snapshot_state.rb │ │ │ │ │ ├── ssh_access_dto.rb │ │ │ │ │ ├── ssh_access_validation_dto.rb │ │ │ │ │ ├── storage_access_dto.rb │ │ │ │ │ ├── toolbox_proxy_url.rb │ │ │ │ │ ├── trace_span.rb │ │ │ │ │ ├── trace_summary.rb │ │ │ │ │ ├── update_docker_registry.rb │ │ │ │ │ ├── update_job_status.rb │ │ │ │ │ ├── update_organization_default_region.rb │ │ │ │ │ ├── update_organization_invitation.rb │ │ │ │ │ ├── update_organization_member_access.rb │ │ │ │ │ ├── update_organization_quota.rb │ │ │ │ │ ├── update_organization_region_quota.rb │ │ │ │ │ ├── update_organization_role.rb │ │ │ │ │ ├── update_region.rb │ │ │ │ │ ├── update_sandbox_state_dto.rb │ │ │ │ │ ├── url.rb │ │ │ │ │ ├── user.rb │ │ │ │ │ ├── user_home_dir_response.rb │ │ │ │ │ ├── user_public_key.rb │ │ │ │ │ ├── volume_dto.rb │ │ │ │ │ ├── volume_state.rb │ │ │ │ │ ├── webhook_app_portal_access.rb │ │ │ │ │ ├── webhook_controller_get_status200_response.rb │ │ │ │ │ ├── webhook_event.rb │ │ │ │ │ ├── webhook_initialization_status.rb │ │ │ │ │ ├── windows_response.rb │ │ │ │ │ ├── work_dir_response.rb │ │ │ │ │ ├── workspace.rb │ │ │ │ │ └── workspace_port_preview_url.rb │ │ │ │ └── version.rb │ │ │ └── daytona_api_client.rb │ │ └── project.json │ ├── common-go/ │ │ ├── go.mod │ │ ├── go.sum │ │ └── pkg/ │ │ ├── cache/ │ │ │ ├── interface.go │ │ │ ├── map_cache.go │ │ │ └── redis_cache.go │ │ ├── errors/ │ │ │ ├── convert_openapi_error.go │ │ │ ├── http.go │ │ │ └── middleware.go │ │ ├── log/ │ │ │ ├── handlers.go │ │ │ ├── level.go │ │ │ ├── prefix_writer.go │ │ │ ├── read_multiplex_log.go │ │ │ └── writer.go │ │ ├── proxy/ │ │ │ ├── conn_monitor.go │ │ │ └── proxy.go │ │ ├── telemetry/ │ │ │ ├── common.go │ │ │ ├── logging.go │ │ │ ├── metrics.go │ │ │ └── tracing.go │ │ ├── timer/ │ │ │ └── timer.go │ │ └── utils/ │ │ └── exponential_backoff.go │ ├── computer-use/ │ │ ├── README.md │ │ ├── computer_use_test.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── main.go │ │ ├── pkg/ │ │ │ └── computeruse/ │ │ │ ├── computeruse.go │ │ │ ├── display.go │ │ │ ├── keyboard.go │ │ │ ├── mouse.go │ │ │ └── screenshot.go │ │ └── project.json │ ├── opencode-plugin/ │ │ ├── .gitignore │ │ ├── .npmignore │ │ ├── .opencode/ │ │ │ └── plugin/ │ │ │ ├── daytona/ │ │ │ │ ├── core/ │ │ │ │ │ ├── logger.ts │ │ │ │ │ ├── project-data-storage.ts │ │ │ │ │ ├── session-manager.ts │ │ │ │ │ ├── toast.ts │ │ │ │ │ └── types.ts │ │ │ │ ├── git/ │ │ │ │ │ ├── host-git-manager.ts │ │ │ │ │ ├── index.ts │ │ │ │ │ ├── sandbox-git-manager.ts │ │ │ │ │ └── session-git-manager.ts │ │ │ │ ├── index.ts │ │ │ │ ├── plugins/ │ │ │ │ │ ├── custom-tools.ts │ │ │ │ │ ├── session-events.ts │ │ │ │ │ └── system-transform.ts │ │ │ │ ├── tools/ │ │ │ │ │ ├── bash.ts │ │ │ │ │ ├── edit.ts │ │ │ │ │ ├── get-preview-url.ts │ │ │ │ │ ├── glob.ts │ │ │ │ │ ├── grep.ts │ │ │ │ │ ├── ls.ts │ │ │ │ │ ├── lsp.ts │ │ │ │ │ ├── multiedit.ts │ │ │ │ │ ├── patch.ts │ │ │ │ │ ├── read.ts │ │ │ │ │ └── write.ts │ │ │ │ └── tools.ts │ │ │ └── index.ts │ │ ├── README.md │ │ ├── package.json │ │ ├── project.json │ │ ├── tsconfig.json │ │ └── tsconfig.lib.json │ ├── runner-api-client/ │ │ ├── LICENSE │ │ ├── package.json │ │ ├── project.json │ │ ├── src/ │ │ │ ├── .gitignore │ │ │ ├── .npmignore │ │ │ ├── .openapi-generator/ │ │ │ │ ├── FILES │ │ │ │ └── VERSION │ │ │ ├── .openapi-generator-ignore │ │ │ ├── api/ │ │ │ │ ├── default-api.ts │ │ │ │ ├── sandbox-api.ts │ │ │ │ ├── snapshots-api.ts │ │ │ │ └── toolbox-api.ts │ │ │ ├── api.ts │ │ │ ├── base.ts │ │ │ ├── common.ts │ │ │ ├── configuration.ts │ │ │ ├── git_push.sh │ │ │ ├── index.ts │ │ │ └── models/ │ │ │ ├── build-snapshot-request-dto.ts │ │ │ ├── create-backup-dto.ts │ │ │ ├── create-sandbox-dto.ts │ │ │ ├── dto-volume-dto.ts │ │ │ ├── enums-backup-state.ts │ │ │ ├── enums-sandbox-state.ts │ │ │ ├── error-response.ts │ │ │ ├── index.ts │ │ │ ├── inspect-snapshot-in-registry-request.ts │ │ │ ├── is-recoverable-dto.ts │ │ │ ├── is-recoverable-response.ts │ │ │ ├── pull-snapshot-request-dto.ts │ │ │ ├── recover-sandbox-dto.ts │ │ │ ├── registry-dto.ts │ │ │ ├── resize-sandbox-dto.ts │ │ │ ├── runner-info-response-dto.ts │ │ │ ├── runner-metrics.ts │ │ │ ├── runner-service-info.ts │ │ │ ├── sandbox-info-response.ts │ │ │ ├── snapshot-digest-response.ts │ │ │ ├── snapshot-exists-response.ts │ │ │ ├── snapshot-info-response.ts │ │ │ ├── start-sandbox-response.ts │ │ │ ├── tag-image-request-dto.ts │ │ │ └── update-network-settings-dto.ts │ │ ├── tsconfig.json │ │ └── tsconfig.lib.json │ ├── sdk-go/ │ │ ├── LICENSE │ │ ├── README.md │ │ ├── go.mod │ │ ├── go.sum │ │ ├── pkg/ │ │ │ ├── common/ │ │ │ │ └── url_helpers.go │ │ │ ├── daytona/ │ │ │ │ ├── VERSION │ │ │ │ ├── client.go │ │ │ │ ├── client_test.go │ │ │ │ ├── code_interpreter.go │ │ │ │ ├── computer_use.go │ │ │ │ ├── filesystem.go │ │ │ │ ├── git.go │ │ │ │ ├── image.go │ │ │ │ ├── lsp_server.go │ │ │ │ ├── object_storage.go │ │ │ │ ├── otel.go │ │ │ │ ├── process.go │ │ │ │ ├── process_test.go │ │ │ │ ├── pty_handle.go │ │ │ │ ├── sandbox.go │ │ │ │ ├── snapshot.go │ │ │ │ ├── version.go │ │ │ │ └── volume.go │ │ │ ├── errors/ │ │ │ │ └── errors.go │ │ │ ├── options/ │ │ │ │ ├── client.go │ │ │ │ ├── code_interpreter.go │ │ │ │ ├── filesystem.go │ │ │ │ ├── git.go │ │ │ │ ├── image.go │ │ │ │ └── process.go │ │ │ └── types/ │ │ │ └── types.go │ │ ├── project.json │ │ └── templates/ │ │ ├── example.gotxt │ │ ├── file.gotxt │ │ ├── func.gotxt │ │ └── type.gotxt │ ├── sdk-python/ │ │ ├── LICENSE │ │ ├── README.md │ │ ├── package.json │ │ ├── project.json │ │ ├── pydoc-markdown.yml │ │ ├── pyproject.toml │ │ ├── scripts/ │ │ │ ├── add-api-clients.sh │ │ │ ├── build-sdk.sh │ │ │ ├── chart_data_extractor_wrapper.py │ │ │ ├── docs-code-block-indentation.sh │ │ │ └── docs-reorder-sections.sh │ │ └── src/ │ │ └── daytona/ │ │ ├── __init__.py │ │ ├── _async/ │ │ │ ├── __init__.py │ │ │ ├── code_interpreter.py │ │ │ ├── computer_use.py │ │ │ ├── daytona.py │ │ │ ├── filesystem.py │ │ │ ├── git.py │ │ │ ├── lsp_server.py │ │ │ ├── object_storage.py │ │ │ ├── process.py │ │ │ ├── sandbox.py │ │ │ ├── snapshot.py │ │ │ └── volume.py │ │ ├── _sync/ │ │ │ ├── __init__.py │ │ │ ├── code_interpreter.py │ │ │ ├── computer_use.py │ │ │ ├── daytona.py │ │ │ ├── filesystem.py │ │ │ ├── git.py │ │ │ ├── lsp_server.py │ │ │ ├── object_storage.py │ │ │ ├── process.py │ │ │ ├── sandbox.py │ │ │ ├── snapshot.py │ │ │ └── volume.py │ │ ├── _utils/ │ │ │ ├── __init__.py │ │ │ ├── deprecation.py │ │ │ ├── docs_ignore.py │ │ │ ├── enum.py │ │ │ ├── environment.py │ │ │ ├── errors.py │ │ │ ├── otel_decorator.py │ │ │ ├── stream.py │ │ │ ├── timeout.py │ │ │ └── types.py │ │ ├── code_toolbox/ │ │ │ ├── __init__.py │ │ │ ├── sandbox_js_code_toolbox.py │ │ │ ├── sandbox_python_code_toolbox.py │ │ │ └── sandbox_ts_code_toolbox.py │ │ ├── common/ │ │ │ ├── __init__.py │ │ │ ├── charts.py │ │ │ ├── code_interpreter.py │ │ │ ├── computer_use.py │ │ │ ├── daytona.py │ │ │ ├── errors.py │ │ │ ├── filesystem.py │ │ │ ├── git.py │ │ │ ├── image.py │ │ │ ├── lsp_server.py │ │ │ ├── process.py │ │ │ ├── protocols.py │ │ │ ├── pty.py │ │ │ ├── sandbox.py │ │ │ ├── snapshot.py │ │ │ └── volume.py │ │ ├── handle/ │ │ │ ├── async_pty_handle.py │ │ │ └── pty_handle.py │ │ ├── internal/ │ │ │ ├── __init__.py │ │ │ └── toolbox_api_client_proxy.py │ │ └── py.typed │ ├── sdk-ruby/ │ │ ├── .gitignore │ │ ├── .rubocop.yml │ │ ├── .ruby-version │ │ ├── CODE_OF_CONDUCT.md │ │ ├── Gemfile │ │ ├── README.md │ │ ├── Rakefile │ │ ├── bin/ │ │ │ ├── console │ │ │ └── setup │ │ ├── daytona.gemspec │ │ ├── lib/ │ │ │ ├── daytona/ │ │ │ │ ├── code_interpreter.rb │ │ │ │ ├── code_toolbox/ │ │ │ │ │ ├── sandbox_js_code_toolbox.rb │ │ │ │ │ ├── sandbox_python_code_toolbox.rb │ │ │ │ │ └── sandbox_ts_code_toolbox.rb │ │ │ │ ├── common/ │ │ │ │ │ ├── charts.rb │ │ │ │ │ ├── code_interpreter.rb │ │ │ │ │ ├── code_language.rb │ │ │ │ │ ├── daytona.rb │ │ │ │ │ ├── file_system.rb │ │ │ │ │ ├── git.rb │ │ │ │ │ ├── image.rb │ │ │ │ │ ├── process.rb │ │ │ │ │ ├── pty.rb │ │ │ │ │ ├── resources.rb │ │ │ │ │ ├── response.rb │ │ │ │ │ └── snapshot.rb │ │ │ │ ├── computer_use.rb │ │ │ │ ├── config.rb │ │ │ │ ├── daytona.rb │ │ │ │ ├── file_system.rb │ │ │ │ ├── git.rb │ │ │ │ ├── lsp_server.rb │ │ │ │ ├── object_storage.rb │ │ │ │ ├── otel.rb │ │ │ │ ├── process.rb │ │ │ │ ├── sandbox.rb │ │ │ │ ├── sdk/ │ │ │ │ │ └── version.rb │ │ │ │ ├── sdk.rb │ │ │ │ ├── snapshot_service.rb │ │ │ │ ├── util.rb │ │ │ │ ├── volume.rb │ │ │ │ └── volume_service.rb │ │ │ └── daytona.rb │ │ ├── project.json │ │ ├── scripts/ │ │ │ └── generate-docs.rb │ │ └── sig/ │ │ └── daytona/ │ │ └── sdk.rbs │ ├── sdk-typescript/ │ │ ├── LICENSE │ │ ├── README.md │ │ ├── hooks/ │ │ │ └── typedoc-custom.mjs │ │ ├── jest.config.js │ │ ├── package.json │ │ ├── project.json │ │ ├── src/ │ │ │ ├── CodeInterpreter.ts │ │ │ ├── ComputerUse.ts │ │ │ ├── Daytona.ts │ │ │ ├── FileSystem.ts │ │ │ ├── Git.ts │ │ │ ├── Image.ts │ │ │ ├── LspServer.ts │ │ │ ├── ObjectStorage.ts │ │ │ ├── Process.ts │ │ │ ├── PtyHandle.ts │ │ │ ├── Sandbox.ts │ │ │ ├── Snapshot.ts │ │ │ ├── Volume.ts │ │ │ ├── code-toolbox/ │ │ │ │ ├── SandboxJsCodeToolbox.ts │ │ │ │ ├── SandboxPythonCodeToolbox.ts │ │ │ │ └── SandboxTsCodeToolbox.ts │ │ │ ├── errors/ │ │ │ │ └── DaytonaError.ts │ │ │ ├── index.ts │ │ │ ├── types/ │ │ │ │ ├── Charts.ts │ │ │ │ ├── CodeInterpreter.ts │ │ │ │ ├── ExecuteResponse.ts │ │ │ │ └── Pty.ts │ │ │ └── utils/ │ │ │ ├── ArtifactParser.ts │ │ │ ├── Binary.ts │ │ │ ├── FileTransfer.ts │ │ │ ├── Import.ts │ │ │ ├── Multipart.ts │ │ │ ├── Runtime.ts │ │ │ ├── Stream.ts │ │ │ ├── WebSocket.ts │ │ │ └── otel.decorator.ts │ │ ├── tsconfig.json │ │ ├── tsconfig.lib.json │ │ ├── tsconfig.spec.json │ │ └── typedoc.json │ ├── toolbox-api-client/ │ │ ├── LICENSE │ │ ├── package.json │ │ ├── project.json │ │ ├── src/ │ │ │ ├── .gitignore │ │ │ ├── .npmignore │ │ │ ├── .openapi-generator/ │ │ │ │ ├── FILES │ │ │ │ └── VERSION │ │ │ ├── .openapi-generator-ignore │ │ │ ├── api/ │ │ │ │ ├── computer-use-api.ts │ │ │ │ ├── file-system-api.ts │ │ │ │ ├── git-api.ts │ │ │ │ ├── info-api.ts │ │ │ │ ├── interpreter-api.ts │ │ │ │ ├── lsp-api.ts │ │ │ │ ├── port-api.ts │ │ │ │ ├── process-api.ts │ │ │ │ └── server-api.ts │ │ │ ├── api.ts │ │ │ ├── base.ts │ │ │ ├── common.ts │ │ │ ├── configuration.ts │ │ │ ├── git_push.sh │ │ │ ├── index.ts │ │ │ └── models/ │ │ │ ├── command.ts │ │ │ ├── completion-context.ts │ │ │ ├── completion-item.ts │ │ │ ├── completion-list.ts │ │ │ ├── computer-use-start-response.ts │ │ │ ├── computer-use-status-response.ts │ │ │ ├── computer-use-stop-response.ts │ │ │ ├── create-context-request.ts │ │ │ ├── create-session-request.ts │ │ │ ├── display-info-response.ts │ │ │ ├── display-info.ts │ │ │ ├── execute-request.ts │ │ │ ├── execute-response.ts │ │ │ ├── file-info.ts │ │ │ ├── file-status.ts │ │ │ ├── files-download-request.ts │ │ │ ├── git-add-request.ts │ │ │ ├── git-branch-request.ts │ │ │ ├── git-checkout-request.ts │ │ │ ├── git-clone-request.ts │ │ │ ├── git-commit-info.ts │ │ │ ├── git-commit-request.ts │ │ │ ├── git-commit-response.ts │ │ │ ├── git-git-delete-branch-request.ts │ │ │ ├── git-repo-request.ts │ │ │ ├── git-status.ts │ │ │ ├── index.ts │ │ │ ├── initialize-request.ts │ │ │ ├── interpreter-context.ts │ │ │ ├── is-port-in-use-response.ts │ │ │ ├── keyboard-hotkey-request.ts │ │ │ ├── keyboard-press-request.ts │ │ │ ├── keyboard-type-request.ts │ │ │ ├── list-branch-response.ts │ │ │ ├── list-contexts-response.ts │ │ │ ├── list-recordings-response.ts │ │ │ ├── lsp-completion-params.ts │ │ │ ├── lsp-document-request.ts │ │ │ ├── lsp-location.ts │ │ │ ├── lsp-position.ts │ │ │ ├── lsp-range.ts │ │ │ ├── lsp-server-request.ts │ │ │ ├── lsp-symbol.ts │ │ │ ├── match.ts │ │ │ ├── mouse-click-request.ts │ │ │ ├── mouse-click-response.ts │ │ │ ├── mouse-drag-request.ts │ │ │ ├── mouse-drag-response.ts │ │ │ ├── mouse-move-request.ts │ │ │ ├── mouse-position-response.ts │ │ │ ├── mouse-scroll-request.ts │ │ │ ├── port-list.ts │ │ │ ├── position.ts │ │ │ ├── process-errors-response.ts │ │ │ ├── process-logs-response.ts │ │ │ ├── process-restart-response.ts │ │ │ ├── process-status-response.ts │ │ │ ├── process-status.ts │ │ │ ├── pty-create-request.ts │ │ │ ├── pty-create-response.ts │ │ │ ├── pty-list-response.ts │ │ │ ├── pty-resize-request.ts │ │ │ ├── pty-session-info.ts │ │ │ ├── recording.ts │ │ │ ├── replace-request.ts │ │ │ ├── replace-result.ts │ │ │ ├── screenshot-response.ts │ │ │ ├── scroll-response.ts │ │ │ ├── search-files-response.ts │ │ │ ├── session-execute-request.ts │ │ │ ├── session-execute-response.ts │ │ │ ├── session-send-input-request.ts │ │ │ ├── session.ts │ │ │ ├── start-recording-request.ts │ │ │ ├── status.ts │ │ │ ├── stop-recording-request.ts │ │ │ ├── user-home-dir-response.ts │ │ │ ├── window-info.ts │ │ │ ├── windows-response.ts │ │ │ └── work-dir-response.ts │ │ ├── tsconfig.json │ │ └── tsconfig.lib.json │ ├── toolbox-api-client-go/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── LICENSE │ │ ├── api/ │ │ │ └── openapi.yaml │ │ ├── api_computer_use.go │ │ ├── api_file_system.go │ │ ├── api_git.go │ │ ├── api_info.go │ │ ├── api_interpreter.go │ │ ├── api_lsp.go │ │ ├── api_port.go │ │ ├── api_process.go │ │ ├── api_server.go │ │ ├── client.go │ │ ├── configuration.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── model_command.go │ │ ├── model_completion_context.go │ │ ├── model_completion_item.go │ │ ├── model_completion_list.go │ │ ├── model_computer_use_start_response.go │ │ ├── model_computer_use_status_response.go │ │ ├── model_computer_use_stop_response.go │ │ ├── model_create_context_request.go │ │ ├── model_create_session_request.go │ │ ├── model_display_info.go │ │ ├── model_display_info_response.go │ │ ├── model_execute_request.go │ │ ├── model_execute_response.go │ │ ├── model_file_info.go │ │ ├── model_file_status.go │ │ ├── model_files_download_request.go │ │ ├── model_git_add_request.go │ │ ├── model_git_branch_request.go │ │ ├── model_git_checkout_request.go │ │ ├── model_git_clone_request.go │ │ ├── model_git_commit_info.go │ │ ├── model_git_commit_request.go │ │ ├── model_git_commit_response.go │ │ ├── model_git_git_delete_branch_request.go │ │ ├── model_git_repo_request.go │ │ ├── model_git_status.go │ │ ├── model_initialize_request.go │ │ ├── model_interpreter_context.go │ │ ├── model_is_port_in_use_response.go │ │ ├── model_keyboard_hotkey_request.go │ │ ├── model_keyboard_press_request.go │ │ ├── model_keyboard_type_request.go │ │ ├── model_list_branch_response.go │ │ ├── model_list_contexts_response.go │ │ ├── model_list_recordings_response.go │ │ ├── model_lsp_completion_params.go │ │ ├── model_lsp_document_request.go │ │ ├── model_lsp_location.go │ │ ├── model_lsp_position.go │ │ ├── model_lsp_range.go │ │ ├── model_lsp_server_request.go │ │ ├── model_lsp_symbol.go │ │ ├── model_match.go │ │ ├── model_mouse_click_request.go │ │ ├── model_mouse_click_response.go │ │ ├── model_mouse_drag_request.go │ │ ├── model_mouse_drag_response.go │ │ ├── model_mouse_move_request.go │ │ ├── model_mouse_position_response.go │ │ ├── model_mouse_scroll_request.go │ │ ├── model_port_list.go │ │ ├── model_position.go │ │ ├── model_process_errors_response.go │ │ ├── model_process_logs_response.go │ │ ├── model_process_restart_response.go │ │ ├── model_process_status.go │ │ ├── model_process_status_response.go │ │ ├── model_pty_create_request.go │ │ ├── model_pty_create_response.go │ │ ├── model_pty_list_response.go │ │ ├── model_pty_resize_request.go │ │ ├── model_pty_session_info.go │ │ ├── model_recording.go │ │ ├── model_replace_request.go │ │ ├── model_replace_result.go │ │ ├── model_screenshot_response.go │ │ ├── model_scroll_response.go │ │ ├── model_search_files_response.go │ │ ├── model_session.go │ │ ├── model_session_execute_request.go │ │ ├── model_session_execute_response.go │ │ ├── model_session_send_input_request.go │ │ ├── model_start_recording_request.go │ │ ├── model_status.go │ │ ├── model_stop_recording_request.go │ │ ├── model_user_home_dir_response.go │ │ ├── model_window_info.go │ │ ├── model_windows_response.go │ │ ├── model_work_dir_response.go │ │ ├── openapitools.json │ │ ├── project.json │ │ ├── response.go │ │ └── utils.go │ ├── toolbox-api-client-python/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── daytona_toolbox_api_client/ │ │ │ ├── __init__.py │ │ │ ├── api/ │ │ │ │ ├── __init__.py │ │ │ │ ├── computer_use_api.py │ │ │ │ ├── file_system_api.py │ │ │ │ ├── git_api.py │ │ │ │ ├── info_api.py │ │ │ │ ├── interpreter_api.py │ │ │ │ ├── lsp_api.py │ │ │ │ ├── port_api.py │ │ │ │ ├── process_api.py │ │ │ │ └── server_api.py │ │ │ ├── api_client.py │ │ │ ├── api_response.py │ │ │ ├── configuration.py │ │ │ ├── exceptions.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── command.py │ │ │ │ ├── completion_context.py │ │ │ │ ├── completion_item.py │ │ │ │ ├── completion_list.py │ │ │ │ ├── computer_use_start_response.py │ │ │ │ ├── computer_use_status_response.py │ │ │ │ ├── computer_use_stop_response.py │ │ │ │ ├── create_context_request.py │ │ │ │ ├── create_session_request.py │ │ │ │ ├── display_info.py │ │ │ │ ├── display_info_response.py │ │ │ │ ├── execute_request.py │ │ │ │ ├── execute_response.py │ │ │ │ ├── file_info.py │ │ │ │ ├── file_status.py │ │ │ │ ├── files_download_request.py │ │ │ │ ├── git_add_request.py │ │ │ │ ├── git_branch_request.py │ │ │ │ ├── git_checkout_request.py │ │ │ │ ├── git_clone_request.py │ │ │ │ ├── git_commit_info.py │ │ │ │ ├── git_commit_request.py │ │ │ │ ├── git_commit_response.py │ │ │ │ ├── git_git_delete_branch_request.py │ │ │ │ ├── git_repo_request.py │ │ │ │ ├── git_status.py │ │ │ │ ├── initialize_request.py │ │ │ │ ├── interpreter_context.py │ │ │ │ ├── is_port_in_use_response.py │ │ │ │ ├── keyboard_hotkey_request.py │ │ │ │ ├── keyboard_press_request.py │ │ │ │ ├── keyboard_type_request.py │ │ │ │ ├── list_branch_response.py │ │ │ │ ├── list_contexts_response.py │ │ │ │ ├── list_recordings_response.py │ │ │ │ ├── lsp_completion_params.py │ │ │ │ ├── lsp_document_request.py │ │ │ │ ├── lsp_location.py │ │ │ │ ├── lsp_position.py │ │ │ │ ├── lsp_range.py │ │ │ │ ├── lsp_server_request.py │ │ │ │ ├── lsp_symbol.py │ │ │ │ ├── match.py │ │ │ │ ├── mouse_click_request.py │ │ │ │ ├── mouse_click_response.py │ │ │ │ ├── mouse_drag_request.py │ │ │ │ ├── mouse_drag_response.py │ │ │ │ ├── mouse_move_request.py │ │ │ │ ├── mouse_position_response.py │ │ │ │ ├── mouse_scroll_request.py │ │ │ │ ├── port_list.py │ │ │ │ ├── position.py │ │ │ │ ├── process_errors_response.py │ │ │ │ ├── process_logs_response.py │ │ │ │ ├── process_restart_response.py │ │ │ │ ├── process_status.py │ │ │ │ ├── process_status_response.py │ │ │ │ ├── pty_create_request.py │ │ │ │ ├── pty_create_response.py │ │ │ │ ├── pty_list_response.py │ │ │ │ ├── pty_resize_request.py │ │ │ │ ├── pty_session_info.py │ │ │ │ ├── recording.py │ │ │ │ ├── replace_request.py │ │ │ │ ├── replace_result.py │ │ │ │ ├── screenshot_response.py │ │ │ │ ├── scroll_response.py │ │ │ │ ├── search_files_response.py │ │ │ │ ├── session.py │ │ │ │ ├── session_execute_request.py │ │ │ │ ├── session_execute_response.py │ │ │ │ ├── session_send_input_request.py │ │ │ │ ├── start_recording_request.py │ │ │ │ ├── status.py │ │ │ │ ├── stop_recording_request.py │ │ │ │ ├── user_home_dir_response.py │ │ │ │ ├── window_info.py │ │ │ │ ├── windows_response.py │ │ │ │ └── work_dir_response.py │ │ │ ├── py.typed │ │ │ └── rest.py │ │ ├── project.json │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── test-requirements.txt │ │ └── tox.ini │ ├── toolbox-api-client-python-async/ │ │ ├── .gitignore │ │ ├── .openapi-generator/ │ │ │ ├── FILES │ │ │ └── VERSION │ │ ├── .openapi-generator-ignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── daytona_toolbox_api_client_async/ │ │ │ ├── __init__.py │ │ │ ├── api/ │ │ │ │ ├── __init__.py │ │ │ │ ├── computer_use_api.py │ │ │ │ ├── file_system_api.py │ │ │ │ ├── git_api.py │ │ │ │ ├── info_api.py │ │ │ │ ├── interpreter_api.py │ │ │ │ ├── lsp_api.py │ │ │ │ ├── port_api.py │ │ │ │ ├── process_api.py │ │ │ │ └── server_api.py │ │ │ ├── api_client.py │ │ │ ├── api_response.py │ │ │ ├── configuration.py │ │ │ ├── exceptions.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── command.py │ │ │ │ ├── completion_context.py │ │ │ │ ├── completion_item.py │ │ │ │ ├── completion_list.py │ │ │ │ ├── computer_use_start_response.py │ │ │ │ ├── computer_use_status_response.py │ │ │ │ ├── computer_use_stop_response.py │ │ │ │ ├── create_context_request.py │ │ │ │ ├── create_session_request.py │ │ │ │ ├── display_info.py │ │ │ │ ├── display_info_response.py │ │ │ │ ├── execute_request.py │ │ │ │ ├── execute_response.py │ │ │ │ ├── file_info.py │ │ │ │ ├── file_status.py │ │ │ │ ├── files_download_request.py │ │ │ │ ├── git_add_request.py │ │ │ │ ├── git_branch_request.py │ │ │ │ ├── git_checkout_request.py │ │ │ │ ├── git_clone_request.py │ │ │ │ ├── git_commit_info.py │ │ │ │ ├── git_commit_request.py │ │ │ │ ├── git_commit_response.py │ │ │ │ ├── git_git_delete_branch_request.py │ │ │ │ ├── git_repo_request.py │ │ │ │ ├── git_status.py │ │ │ │ ├── initialize_request.py │ │ │ │ ├── interpreter_context.py │ │ │ │ ├── is_port_in_use_response.py │ │ │ │ ├── keyboard_hotkey_request.py │ │ │ │ ├── keyboard_press_request.py │ │ │ │ ├── keyboard_type_request.py │ │ │ │ ├── list_branch_response.py │ │ │ │ ├── list_contexts_response.py │ │ │ │ ├── list_recordings_response.py │ │ │ │ ├── lsp_completion_params.py │ │ │ │ ├── lsp_document_request.py │ │ │ │ ├── lsp_location.py │ │ │ │ ├── lsp_position.py │ │ │ │ ├── lsp_range.py │ │ │ │ ├── lsp_server_request.py │ │ │ │ ├── lsp_symbol.py │ │ │ │ ├── match.py │ │ │ │ ├── mouse_click_request.py │ │ │ │ ├── mouse_click_response.py │ │ │ │ ├── mouse_drag_request.py │ │ │ │ ├── mouse_drag_response.py │ │ │ │ ├── mouse_move_request.py │ │ │ │ ├── mouse_position_response.py │ │ │ │ ├── mouse_scroll_request.py │ │ │ │ ├── port_list.py │ │ │ │ ├── position.py │ │ │ │ ├── process_errors_response.py │ │ │ │ ├── process_logs_response.py │ │ │ │ ├── process_restart_response.py │ │ │ │ ├── process_status.py │ │ │ │ ├── process_status_response.py │ │ │ │ ├── pty_create_request.py │ │ │ │ ├── pty_create_response.py │ │ │ │ ├── pty_list_response.py │ │ │ │ ├── pty_resize_request.py │ │ │ │ ├── pty_session_info.py │ │ │ │ ├── recording.py │ │ │ │ ├── replace_request.py │ │ │ │ ├── replace_result.py │ │ │ │ ├── screenshot_response.py │ │ │ │ ├── scroll_response.py │ │ │ │ ├── search_files_response.py │ │ │ │ ├── session.py │ │ │ │ ├── session_execute_request.py │ │ │ │ ├── session_execute_response.py │ │ │ │ ├── session_send_input_request.py │ │ │ │ ├── start_recording_request.py │ │ │ │ ├── status.py │ │ │ │ ├── stop_recording_request.py │ │ │ │ ├── user_home_dir_response.py │ │ │ │ ├── window_info.py │ │ │ │ ├── windows_response.py │ │ │ │ └── work_dir_response.py │ │ │ ├── py.typed │ │ │ └── rest.py │ │ ├── project.json │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── test-requirements.txt │ │ └── tox.ini │ └── toolbox-api-client-ruby/ │ ├── .gitignore │ ├── .openapi-generator/ │ │ ├── FILES │ │ └── VERSION │ ├── .openapi-generator-ignore │ ├── .rspec │ ├── .rubocop.yml │ ├── Gemfile │ ├── Rakefile │ ├── daytona_toolbox_api_client.gemspec │ ├── fix-gemspec.sh │ ├── lib/ │ │ ├── daytona_toolbox_api_client/ │ │ │ ├── api/ │ │ │ │ ├── computer_use_api.rb │ │ │ │ ├── file_system_api.rb │ │ │ │ ├── git_api.rb │ │ │ │ ├── info_api.rb │ │ │ │ ├── interpreter_api.rb │ │ │ │ ├── lsp_api.rb │ │ │ │ ├── port_api.rb │ │ │ │ ├── process_api.rb │ │ │ │ └── server_api.rb │ │ │ ├── api_client.rb │ │ │ ├── api_error.rb │ │ │ ├── configuration.rb │ │ │ ├── models/ │ │ │ │ ├── command.rb │ │ │ │ ├── completion_context.rb │ │ │ │ ├── completion_item.rb │ │ │ │ ├── completion_list.rb │ │ │ │ ├── computer_use_start_response.rb │ │ │ │ ├── computer_use_status_response.rb │ │ │ │ ├── computer_use_stop_response.rb │ │ │ │ ├── create_context_request.rb │ │ │ │ ├── create_session_request.rb │ │ │ │ ├── display_info.rb │ │ │ │ ├── display_info_response.rb │ │ │ │ ├── execute_request.rb │ │ │ │ ├── execute_response.rb │ │ │ │ ├── file_info.rb │ │ │ │ ├── file_status.rb │ │ │ │ ├── files_download_request.rb │ │ │ │ ├── git_add_request.rb │ │ │ │ ├── git_branch_request.rb │ │ │ │ ├── git_checkout_request.rb │ │ │ │ ├── git_clone_request.rb │ │ │ │ ├── git_commit_info.rb │ │ │ │ ├── git_commit_request.rb │ │ │ │ ├── git_commit_response.rb │ │ │ │ ├── git_git_delete_branch_request.rb │ │ │ │ ├── git_repo_request.rb │ │ │ │ ├── git_status.rb │ │ │ │ ├── initialize_request.rb │ │ │ │ ├── interpreter_context.rb │ │ │ │ ├── is_port_in_use_response.rb │ │ │ │ ├── keyboard_hotkey_request.rb │ │ │ │ ├── keyboard_press_request.rb │ │ │ │ ├── keyboard_type_request.rb │ │ │ │ ├── list_branch_response.rb │ │ │ │ ├── list_contexts_response.rb │ │ │ │ ├── list_recordings_response.rb │ │ │ │ ├── lsp_completion_params.rb │ │ │ │ ├── lsp_document_request.rb │ │ │ │ ├── lsp_location.rb │ │ │ │ ├── lsp_position.rb │ │ │ │ ├── lsp_range.rb │ │ │ │ ├── lsp_server_request.rb │ │ │ │ ├── lsp_symbol.rb │ │ │ │ ├── match.rb │ │ │ │ ├── mouse_click_request.rb │ │ │ │ ├── mouse_click_response.rb │ │ │ │ ├── mouse_drag_request.rb │ │ │ │ ├── mouse_drag_response.rb │ │ │ │ ├── mouse_move_request.rb │ │ │ │ ├── mouse_position_response.rb │ │ │ │ ├── mouse_scroll_request.rb │ │ │ │ ├── port_list.rb │ │ │ │ ├── position.rb │ │ │ │ ├── process_errors_response.rb │ │ │ │ ├── process_logs_response.rb │ │ │ │ ├── process_restart_response.rb │ │ │ │ ├── process_status.rb │ │ │ │ ├── process_status_response.rb │ │ │ │ ├── pty_create_request.rb │ │ │ │ ├── pty_create_response.rb │ │ │ │ ├── pty_list_response.rb │ │ │ │ ├── pty_resize_request.rb │ │ │ │ ├── pty_session_info.rb │ │ │ │ ├── recording.rb │ │ │ │ ├── replace_request.rb │ │ │ │ ├── replace_result.rb │ │ │ │ ├── screenshot_response.rb │ │ │ │ ├── scroll_response.rb │ │ │ │ ├── search_files_response.rb │ │ │ │ ├── session.rb │ │ │ │ ├── session_execute_request.rb │ │ │ │ ├── session_execute_response.rb │ │ │ │ ├── session_send_input_request.rb │ │ │ │ ├── start_recording_request.rb │ │ │ │ ├── status.rb │ │ │ │ ├── stop_recording_request.rb │ │ │ │ ├── user_home_dir_response.rb │ │ │ │ ├── window_info.rb │ │ │ │ ├── windows_response.rb │ │ │ │ └── work_dir_response.rb │ │ │ └── version.rb │ │ └── daytona_toolbox_api_client.rb │ └── project.json ├── nx.json ├── openapitools.json ├── package.json ├── poetry.lock ├── project.json ├── pyproject.toml ├── scripts/ │ └── setup-proxy-dns.sh ├── tsconfig.base.json └── tsconfig.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .devcontainer/Dockerfile ================================================ FROM buildpack-deps:jammy-curl ARG TARGETARCH # common tools RUN apt update && export DEBIAN_FRONTEND=noninteractive \ && apt -y install --no-install-recommends apt-utils vim htop telnet socat expect-dev tini psmisc libgit2-dev \ python3 python3-pip libx11-dev libxtst-dev libxext-dev libxrandr-dev libxinerama-dev libxi-dev \ libx11-6 libxrandr2 libxext6 libxrender1 libxfixes3 libxss1 libxtst6 libxi6 \ xvfb x11vnc novnc xfce4 xfce4-terminal dbus-x11 dnsmasq gettext-base # build tools RUN apt update && export DEBIAN_FRONTEND=noninteractive \ && apt -y install --no-install-recommends openjdk-11-jdk protobuf-compiler libprotobuf-dev # Mount-s3 (AWS S3 FUSE driver) RUN /bin/bash -c 'apt update && export DEBIAN_FRONTEND=noninteractive \ && apt -y install --no-install-recommends fuse libfuse2 \ && MOUNT_S3_ARCH=$([ "$TARGETARCH" = "amd64" ] && echo "x86_64" || echo "arm64") \ && wget https://s3.amazonaws.com/mountpoint-s3-release/1.20.0/${MOUNT_S3_ARCH}/mount-s3-1.20.0-${MOUNT_S3_ARCH}.deb \ && apt install -y ./mount-s3-1.20.0-${MOUNT_S3_ARCH}.deb \ && rm -f mount-s3-1.20.0-${MOUNT_S3_ARCH}.deb' # Telepresence RUN curl -fL https://app.getambassador.io/download/tel2oss/releases/download/v2.17.0/telepresence-linux-${TARGETARCH} -o /usr/local/bin/telepresence && \ chmod a+x /usr/local/bin/telepresence RUN echo "address=/proxy.localhost/127.0.0.1" >> /etc/dnsmasq.conf CMD ["sh", "-c", "service dnsmasq start && echo 'nameserver 127.0.0.1' > /etc/resolv.conf && tail -f /dev/null"] ================================================ FILE: .devcontainer/buildkitd.toml ================================================ insecure-entitlements = [ "network.host", "security.insecure" ] [registry."registry:5000"] http = true insecure = true ================================================ FILE: .devcontainer/devcontainer.build.json ================================================ { // Duplicate of devcontainer.json but without docker compose // Docker compose is not supported for building and pushing the devcontainer image "name": "Daytona", "build": { "dockerfile": "Dockerfile", "context": "." }, "workspaceFolder": "/workspaces/daytona", // Configure tool-specific properties. "containerEnv": { "COREPACK_ENABLE_DOWNLOAD_PROMPT": "0" }, "remoteEnv": { "NX_DAEMON": "true", "NODE_ENV": "development", "POETRY_VIRTUALENVS_IN_PROJECT": "true" }, "customizations": { // Configure properties specific to VS Code. "vscode": { // Add the IDs of extensions you want installed when the container is created. "extensions": [ "dbaeumer.vscode-eslint", "esbenp.prettier-vscode", "nrwl.angular-console", "astro-build.astro-vscode", "unifiedjs.vscode-mdx", "timonwong.shellcheck", "foxundermoon.shell-format", "cschlosser.doxdocgen", "ms-python.python", "ms-toolsai.jupyter", "bradlc.vscode-tailwindcss", "shopify.ruby-lsp", "castwide.solargraph" ], "settings": { "editor.defaultFormatter": "esbenp.prettier-vscode", "python.defaultInterpreterPath": "${containerWorkspaceFolder}/.venv/bin/python", "python.terminal.activateEnvironment": true, "python.terminal.activateEnvInCurrentTerminal": true } } }, "features": { "ghcr.io/devcontainers/features/common-utils:2.5.3": { "installZsh": "true", "username": "daytona", "uid": "1000", "gid": "1000", "upgradePackages": "false" }, "ghcr.io/devcontainers/features/docker-in-docker:2.12.2": { "version": "24.0.7", "moby": false, "dockerDashComposeVersion": "v2" }, "ghcr.io/devcontainers/features/go:1.3.2": { "version": "1.23.5", "golangciLintVersion": "1.63.4" }, "ghcr.io/devcontainers/features/node:1.6.2": { "version": "22.14.0", "installYarnUsingApt": false }, "ghcr.io/devcontainers/features/ruby:1": { "version": "3.4.5" }, "./tools-feature": { "pipPackages": ["poetry==2.1.3"], "goTools": ["github.com/swaggo/swag/cmd/swag@v1.16.4", "github.com/mitranim/gow@latest"] } }, "onCreateCommand": { // "install-deps": "git config --global --add safe.directory ${containerWorkspaceFolder} && yarn", "env": "test -f .env.local || touch .env.local" }, "postStartCommand": "yarn && poetry lock && poetry install && bundle install", "postAttachCommand": "", "forwardPorts": [5556, "pgadmin:80", "registry-ui:5100", "maildev:1080", "minio:9000", "minio:9001", "jaeger:16686"], // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "daytona" } ================================================ FILE: .devcontainer/devcontainer.json ================================================ { "name": "Daytona", "dockerComposeFile": "docker-compose.yaml", "service": "app", "workspaceFolder": "/workspaces/daytona", // Configure tool-specific properties. "containerEnv": { "COREPACK_ENABLE_DOWNLOAD_PROMPT": "0" }, "remoteEnv": { "NX_DAEMON": "true", "NODE_ENV": "development", "POETRY_VIRTUALENVS_IN_PROJECT": "true", "RUBYLIB": "${containerWorkspaceFolder}/libs/sdk-ruby/lib:${containerWorkspaceFolder}/libs/api-client-ruby/lib:${containerWorkspaceFolder}/libs/toolbox-api-client-ruby/lib", "BUNDLE_GEMFILE": "${containerWorkspaceFolder}/Gemfile" }, "customizations": { // Configure properties specific to VS Code. "vscode": { // Add the IDs of extensions you want installed when the container is created. "extensions": [ "dbaeumer.vscode-eslint", "esbenp.prettier-vscode", "nrwl.angular-console", "astro-build.astro-vscode", "unifiedjs.vscode-mdx", "timonwong.shellcheck", "foxundermoon.shell-format", "cschlosser.doxdocgen", "ms-python.python", "ms-toolsai.jupyter", "bradlc.vscode-tailwindcss", "shopify.ruby-lsp", "castwide.solargraph" ], "settings": { "editor.defaultFormatter": "esbenp.prettier-vscode", "python.defaultInterpreterPath": "${containerWorkspaceFolder}/.venv/bin/python", "python.terminal.activateEnvironment": true, "python.terminal.activateEnvInCurrentTerminal": true } } }, "features": { "ghcr.io/devcontainers/features/common-utils:2.5.3": { "installZsh": "true", "username": "daytona", "uid": "1000", "gid": "1000", "upgradePackages": "false" }, "ghcr.io/devcontainers/features/docker-in-docker:2.12.2": { "version": "28.4.0", "moby": false, "dockerDashComposeVersion": "v2" }, "ghcr.io/devcontainers/features/go:1.3.2": { "version": "1.25.4", "golangciLintVersion": "2.6.2" }, "ghcr.io/devcontainers/features/node:1.6.2": { "version": "22.14.0", "installYarnUsingApt": false }, "ghcr.io/devcontainers/features/ruby:1": { "version": "3.4.5" }, "./tools-feature": { "pipPackages": [ "poetry==2.1.3" ], "goTools": [ "github.com/swaggo/swag/cmd/swag@v1.16.4", "github.com/mitranim/gow@latest", "github.com/princjef/gomarkdoc/cmd/gomarkdoc@v1.1.0" ] } }, "onCreateCommand": { // "install-deps": "git config --global --add safe.directory ${containerWorkspaceFolder} && yarn", "env": "test -f .env.local || touch .env.local" }, "postStartCommand": "yarn && poetry lock && poetry install && bundle install && docker buildx create --name builder --driver-opt network=host --config .devcontainer/buildkitd.toml --driver docker-container --use", "postAttachCommand": "", "forwardPorts": [ 5556, "pgadmin:80", "registry-ui:5100", "maildev:1080", "minio:9000", "minio:9001", "jaeger:16686" ], // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "daytona" } ================================================ FILE: .devcontainer/dex/config.yaml ================================================ # config.yaml issuer: http://localhost:5556/dex storage: type: memory web: http: 0.0.0.0:5556 allowedOrigins: ['*'] allowedHeaders: ['x-requested-with'] staticClients: - id: daytona redirectURIs: - 'http://localhost:3000' - 'http://localhost:3000/api/oauth2-redirect.html' - 'http://localhost:3009/callback' - 'http://proxy.localhost:4000/callback' name: 'Daytona' public: true enablePasswordDB: true staticPasswords: - email: 'dev@daytona.io' # password generated with: # echo password | htpasswd -BinC 10 admin | cut -d: -f2 hash: '$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W' username: 'admin' userID: '1234' ================================================ FILE: .devcontainer/docker-compose.yaml ================================================ version: '3.8' services: app: build: context: . dockerfile: Dockerfile privileged: true volumes: - ..:/workspaces/daytona dex: image: dexidp/dex:v2.42.0 volumes: - ./dex/config.yaml:/etc/dex/config.yaml command: ['dex', 'serve', '/etc/dex/config.yaml'] network_mode: service:app db: image: postgres:18 environment: - POSTGRES_PASSWORD=pass - POSTGRES_USER=user - POSTGRES_DB=application_ctx pgadmin: image: dpage/pgadmin4:9.2.0 entrypoint: ['sh', '-c', 'chmod 600 /pgpass && exec /entrypoint.sh'] environment: PGADMIN_DEFAULT_EMAIL: dev@daytona.io PGADMIN_DEFAULT_PASSWORD: pgadmin PGADMIN_CONFIG_SERVER_MODE: 'False' PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: 'False' user: root volumes: - ./pgadmin4/servers.json:/pgadmin4/servers.json - ./pgadmin4/pgpass:/pgpass depends_on: - db redis: image: redis:latest registry-ui: image: joxit/docker-registry-ui:main restart: always environment: - SINGLE_REGISTRY=true - REGISTRY_TITLE=Docker Registry UI - DELETE_IMAGES=true - SHOW_CONTENT_DIGEST=true - NGINX_PROXY_PASS_URL=http://registry:5000 - SHOW_CATALOG_NB_TAGS=true - CATALOG_MIN_BRANCHES=1 - CATALOG_MAX_BRANCHES=1 - TAGLIST_PAGE_SIZE=100 - REGISTRY_SECURED=false - CATALOG_ELEMENTS_LIMIT=1000 registry: image: registry:2.8.2 restart: always environment: REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin: '[http://registry-ui.example.com]' REGISTRY_HTTP_HEADERS_Access-Control-Allow-Methods: '[HEAD,GET,OPTIONS,DELETE]' REGISTRY_HTTP_HEADERS_Access-Control-Allow-Credentials: '[true]' REGISTRY_HTTP_HEADERS_Access-Control-Allow-Headers: '[Authorization,Accept,Cache-Control]' REGISTRY_HTTP_HEADERS_Access-Control-Expose-Headers: '[Docker-Content-Digest]' REGISTRY_STORAGE_DELETE_ENABLED: 'true' volumes: - registry:/var/lib/registry maildev: image: maildev/maildev minio: image: minio/minio:latest environment: - MINIO_ROOT_USER=minioadmin - MINIO_ROOT_PASSWORD=minioadmin - MINIO_IDENTITY_STS_EXPIRY="24h" volumes: - minio_data:/data command: server /data --console-address ":9001" jaeger: image: jaegertracing/all-in-one:1.74.0 otel-collector: image: otel/opentelemetry-collector-contrib:0.138.0 volumes: - ./otel/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml volumes: registry: {} minio_data: {} ================================================ FILE: .devcontainer/otel/otel-collector-config.yaml ================================================ receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 http: endpoint: 0.0.0.0:4318 processors: batch: exporters: # Logs to console debug: verbosity: detailed # Metrics to Prometheus endpoint prometheus: endpoint: 0.0.0.0:9090 namespace: otel # Traces to Jaeger otlp/jaeger: endpoint: jaeger:4317 tls: insecure: true service: pipelines: traces: receivers: [otlp] processors: [batch] exporters: [otlp/jaeger] metrics: receivers: [otlp] processors: [batch] exporters: [prometheus] logs: receivers: [otlp] processors: [batch] exporters: [debug] ================================================ FILE: .devcontainer/pgadmin4/pgpass ================================================ db:5432:*:user:pass ================================================ FILE: .devcontainer/pgadmin4/servers.json ================================================ { "Servers": { "1": { "Name": "Daytona", "Group": "Servers", "Host": "db", "Port": 5432, "MaintenanceDB": "postgres", "Username": "user", "PassFile": "/pgpass" } } } ================================================ FILE: .devcontainer/tools-feature/devcontainer-feature.json ================================================ { "name": "Development tools", "id": "tools", "version": "1.0.0", "description": "Installs development tools.", "options": { "pipPackages": { "type": "array", "description": "List of pip packages to install", "items": { "type": "string" } }, "goTools": { "type": "array", "description": "List of Go tools to install", "items": { "type": "string" } } }, "installsAfter": [ "ghcr.io/devcontainers/features/go", "ghcr.io/devcontainers/features/python", "ghcr.io/devcontainers/features/ruby", "ghcr.io/devcontainers/features/docker-in-docker" ] } ================================================ FILE: .devcontainer/tools-feature/install.sh ================================================ #!/bin/bash # Copyright 2025 Daytona Platforms Inc. # SPDX-License-Identifier: AGPL-3.0 set -e echo "Installing Python packages and Go tools..." USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" # Determine the appropriate non-root user if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then USERNAME="" POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do if id -u "${CURRENT_USER}" > /dev/null 2>&1; then USERNAME=${CURRENT_USER} break fi done if [ "${USERNAME}" = "" ]; then USERNAME=root fi elif [ "${USERNAME}" = "none" ] || ! id -u "${USERNAME}" > /dev/null 2>&1; then USERNAME=root fi export GOROOT="${TARGET_GOROOT:-"/usr/local/go"}" export GOPATH="${TARGET_GOPATH:-"/go"}" export GOCACHE=/tmp/gotools/cache sudo -E -u "${USERNAME}" bash -c ' export PATH=$GOROOT/bin:$PATH export HOME=/home/${USER} # Install pip packages if [ -n "$PIPPACKAGES" ]; then echo "Installing pip packages: $PIPPACKAGES" IFS=',' read -ra PACKAGES <<< "${PIPPACKAGES}" pip3 install --no-cache-dir "${PACKAGES[@]}" else echo "No pip packages specified. Skipping." fi # Install Go tools if [ -n "$GOTOOLS" ]; then echo "Installing Go tools: $GOTOOLS" IFS=',' read -ra TOOLS <<< "${GOTOOLS}" for tool in "${TOOLS[@]}"; do go install $tool done else echo "No Go tools specified. Skipping." fi ' # Set insecure registry cat > /etc/docker/daemon.json <&2 exit 1 fi if [ "${{ inputs.install-swag }}" = "true" ] && [ "${{ inputs.install-go }}" != "true" ]; then echo "Error: install-swag is 'true' but install-go is not 'true'. Please enable install-go when using install-swag." >&2 exit 1 fi - uses: actions/setup-go@v5 if: inputs.install-go == 'true' with: go-version-file: go.work cache: ${{ inputs.go-cache }} cache-dependency-path: | **/go.sum - uses: actions/setup-java@v4 if: inputs.install-java == 'true' with: java-version: 21 distribution: 'temurin' - uses: actions/setup-python@v5 if: inputs.install-python == 'true' with: python-version: '3.12' - uses: ruby/setup-ruby@v1 if: inputs.install-ruby == 'true' with: ruby-version: '3.4.5' bundler-cache: false - name: Bundle install if: inputs.install-ruby == 'true' shell: bash run: | bundle install - uses: actions/setup-node@v4 if: inputs.install-node == 'true' with: node-version: 22 - name: System dependencies shell: bash run: | sudo apt-get update && sudo apt-get install -y gcc libx11-dev libxtst-dev if [[ "${{ inputs.install-node }}" == 'true' || "${{ inputs.run-yarn-install }}" == 'true' ]]; then corepack enable fi - name: Install Poetry if: inputs.install-poetry == 'true' && inputs.install-python == 'true' shell: bash run: | python3 -m pip install --upgrade pip python3 -m pip install "poetry==2.1.3" - name: Install swag if: inputs.install-swag == 'true' && inputs.install-go == 'true' shell: bash run: go install github.com/swaggo/swag/cmd/swag@v1.16.4 - name: Get yarn cache directory if: inputs.run-yarn-install == 'true' id: yarn-cache shell: bash run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT - uses: actions/cache@v4 if: inputs.run-yarn-install == 'true' with: path: ${{ steps.yarn-cache.outputs.dir }} key: yarn-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }} restore-keys: yarn-${{ runner.os }}-${{ runner.arch }}- - name: Yarn install if: inputs.run-yarn-install == 'true' shell: bash run: yarn install --immutable - name: Cache Poetry virtualenv if: inputs.run-poetry-install == 'true' uses: actions/cache@v4 with: path: .venv key: poetry-venv-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('poetry.lock') }} restore-keys: poetry-venv-${{ runner.os }}-${{ runner.arch }}- - name: Poetry install if: inputs.run-poetry-install == 'true' shell: bash run: | poetry lock poetry install - name: Go work sync if: inputs.run-go-work-sync == 'true' shell: bash run: | GONOSUMDB=github.com/daytonaio/daytona go work sync go env -w GOFLAGS="-buildvcs=false" ================================================ FILE: .github/pull_request_template.md ================================================ ## Description Please include a summary of the change or the feature being introduced. Include relevant motivation and context. List any dependencies that are required for this change. ## Documentation - [ ] This change requires a documentation update - [ ] I have made corresponding changes to the documentation ## Related Issue(s) This PR addresses issue #X ## Screenshots If relevant, please add screenshots. ## Notes Please add any relevant notes if necessary. ================================================ FILE: .github/workflows/build_devcontainer.yaml ================================================ name: 'Build devcontainer image' on: push: branches: - main jobs: build: runs-on: ubuntu-latest steps: - name: Checkout (GitHub) uses: actions/checkout@v4 - name: Set up QEMU for multi-architecture builds uses: docker/setup-qemu-action@v3 - name: Setup Docker buildx for multi-architecture builds uses: docker/setup-buildx-action@v3 with: use: true - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and release devcontainer Multi-Platform uses: devcontainers/ci@v0.3 env: # see: https://github.com/devcontainers/ci/issues/191#issuecomment-1603857155 BUILDX_NO_DEFAULT_ATTESTATIONS: true with: imageName: ghcr.io/daytonaio/daytona-devcontainer cacheFrom: ghcr.io/daytonaio/daytona-devcontainer platform: linux/amd64,linux/arm64 imageTag: main,latest configFile: .devcontainer/devcontainer.build.json ================================================ FILE: .github/workflows/default_image_publish.yaml ================================================ name: Default Snapshot Images Publish on: workflow_dispatch: inputs: version: description: Version to release (e.g., "0.1.2") required: true default: '0.0.0-dev' push_latest: description: Push latest tags (disable for rc/pre-release) type: boolean default: true env: VERSION: ${{ inputs.version }} BUILDX_NO_DEFAULT_ATTESTATIONS: 1 REGISTRY_IMAGE: daytonaio/sandbox jobs: docker_build: strategy: fail-fast: false matrix: include: - runner: [self-hosted, Linux, ARM64, github-actions-runner-arm] platform: linux/arm64 - runner: [self-hosted, Linux, X64, github-actions-runner-amd64] platform: linux/amd64 runs-on: ${{ matrix.runner }} steps: - name: Prepare run: | platform=${{ matrix.platform }} echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV - name: Checkout code uses: actions/checkout@v4 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Log in to Docker Hub uses: docker/login-action@v3 with: registry: docker.io username: daytonaio password: ${{ secrets.DOCKER_TOKEN }} - name: Build and push sandbox by digest id: build-sandbox uses: docker/build-push-action@v6 with: context: ./images/sandbox file: ./images/sandbox/Dockerfile platforms: ${{ matrix.platform }} tags: ${{ env.REGISTRY_IMAGE }} outputs: type=image,push-by-digest=true,name-canonical=true,push=true cache-from: type=registry,ref=daytonaio/sandbox:buildcache-${{ env.PLATFORM_PAIR }} cache-to: type=registry,ref=daytonaio/sandbox:buildcache-${{ env.PLATFORM_PAIR }},mode=max - name: Export sandbox digest run: | mkdir -p ${{ runner.temp }}/digests/sandbox digest="${{ steps.build-sandbox.outputs.digest }}" touch "${{ runner.temp }}/digests/sandbox/${digest#sha256:}" - name: Build and push sandbox-slim by digest id: build-sandbox-slim uses: docker/build-push-action@v6 with: context: ./images/sandbox-slim file: ./images/sandbox-slim/Dockerfile platforms: ${{ matrix.platform }} tags: ${{ env.REGISTRY_IMAGE }} outputs: type=image,push-by-digest=true,name-canonical=true,push=true cache-from: type=registry,ref=daytonaio/sandbox:buildcache-slim-${{ env.PLATFORM_PAIR }} cache-to: type=registry,ref=daytonaio/sandbox:buildcache-slim-${{ env.PLATFORM_PAIR }},mode=max - name: Export sandbox-slim digest run: | mkdir -p ${{ runner.temp }}/digests/sandbox-slim digest="${{ steps.build-sandbox-slim.outputs.digest }}" touch "${{ runner.temp }}/digests/sandbox-slim/${digest#sha256:}" - name: Upload sandbox digest uses: actions/upload-artifact@v4 with: name: digests-default-${{ env.PLATFORM_PAIR }} path: ${{ runner.temp }}/digests/sandbox/* if-no-files-found: error retention-days: 1 - name: Upload sandbox-slim digest uses: actions/upload-artifact@v4 with: name: digests-slim-${{ env.PLATFORM_PAIR }} path: ${{ runner.temp }}/digests/sandbox-slim/* if-no-files-found: error retention-days: 1 docker_manifest: needs: docker_build runs-on: [self-hosted, Linux, X64, github-actions-runner-amd64] steps: - name: Download sandbox digests uses: actions/download-artifact@v4 with: path: ${{ runner.temp }}/digests/sandbox pattern: digests-default-* merge-multiple: true - name: Download sandbox-slim digests uses: actions/download-artifact@v4 with: path: ${{ runner.temp }}/digests/sandbox-slim pattern: digests-slim-* merge-multiple: true - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Log in to Docker Hub uses: docker/login-action@v3 with: registry: docker.io username: daytonaio password: ${{ secrets.DOCKER_TOKEN }} - name: Create and push sandbox manifest working-directory: ${{ runner.temp }}/digests/sandbox run: | TAGS="-t ${{ env.REGISTRY_IMAGE }}:${{ env.VERSION }}" if [ "${{ inputs.push_latest }}" = "true" ]; then TAGS="$TAGS -t ${{ env.REGISTRY_IMAGE }}:latest" fi docker buildx imagetools create $TAGS \ $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - name: Create and push sandbox-slim manifest working-directory: ${{ runner.temp }}/digests/sandbox-slim run: | TAGS="-t ${{ env.REGISTRY_IMAGE }}:${{ env.VERSION }}-slim" if [ "${{ inputs.push_latest }}" = "true" ]; then TAGS="$TAGS -t ${{ env.REGISTRY_IMAGE }}:latest-slim" fi docker buildx imagetools create $TAGS \ $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - name: Inspect images run: | docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ env.VERSION }} docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ env.VERSION }}-slim ================================================ FILE: .github/workflows/pr_checks.yaml ================================================ name: '[PR] Validate code' on: pull_request: branches: - main permissions: contents: read concurrency: # New commit on branch cancels running workflows of the same branch group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true env: POETRY_VIRTUALENVS_IN_PROJECT: true jobs: go-work: name: Go work runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version-file: go.work cache: true cache-dependency-path: '**/go.sum' - name: go work run: | GONOSUMDB=github.com/daytonaio/daytona go work sync git diff --exit-code -- 'go.work*' '*/go.mod' '*/go.sum' || (echo "Go workspace files are not up to date! Please run 'go work sync' and commit the changes." && exit 1) cli-docs: name: CLI docs runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 with: node-version: 22 - run: corepack enable - name: Get yarn cache directory id: yarn-cache run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT - uses: actions/cache@v4 with: path: ${{ steps.yarn-cache.outputs.dir }} key: yarn-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }} restore-keys: yarn-${{ runner.os }}-${{ runner.arch }}- - run: yarn install --immutable - name: Check CLI reference docs are up to date run: | cd apps/docs node tools/update-cli-reference.js --local git diff --exit-code -- src/content/docs/en/tools/cli.mdx || (echo "CLI reference docs on the docs site are out of sync! Please run 'cd apps/docs && node tools/update-cli-reference.js --local' and commit the changes." && exit 1) golangci: name: Go lint runs-on: ubuntu-latest strategy: matrix: working-directory: [apps/daemon, apps/runner, apps/cli, apps/proxy, libs/sdk-go, apps/otel-collector/exporter] steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version-file: go.work cache: true cache-dependency-path: '**/go.sum' - name: golangci-lint uses: golangci/golangci-lint-action@v9 with: version: v2.6.2 working-directory: ${{ matrix.working-directory }} args: --timeout=5m ./... - name: format run: | cd ${{ matrix.working-directory }} go fmt ./... git diff --exit-code '**/*.go' || (echo "Code is not formatted! Please run 'go fmt ./...' and commit" && exit 1) lint-computer-use: name: Go lint (Computer Use) runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version-file: go.work cache: true cache-dependency-path: '**/go.sum' - name: format run: | sudo apt-get update && sudo apt-get install -y gcc libx11-dev libxtst-dev cd libs/computer-use go fmt ./... git diff --exit-code '**/*.go' || (echo "Code is not formatted! Please run 'go fmt ./...' and commit" && exit 1) - name: golangci-lint uses: golangci/golangci-lint-action@v9 with: version: v2.6.2 working-directory: libs/computer-use args: --timeout=5m ./... lint-python: name: Python lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: '3.12' - name: Install Dependencies run: | corepack enable python -m pip install --upgrade pip pip install "poetry==2.1.3" poetry install - name: Get yarn cache directory id: yarn-cache run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT - uses: actions/cache@v4 with: path: ${{ steps.yarn-cache.outputs.dir }} key: yarn-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }} restore-keys: yarn-${{ runner.os }}-${{ runner.arch }}- - run: yarn install --immutable - name: Lint Python Code run: | source "$(poetry env info --path)/bin/activate" yarn lint:py format-lint-api-clients: name: Format, lint and generate API clients runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-toolchain with: run-go-work-sync: 'false' - name: generate-api-clients run: | source "$(poetry env info --path)/bin/activate" go install github.com/princjef/gomarkdoc/cmd/gomarkdoc@v1.1.0 echo -e 'DEFAULT_PACKAGE_VERSION=0.0.0-dev\nDEFAULT_GEM_VERSION=0.0.0.pre.dev\n\nPYPI_PKG_VERSION=\nNPM_PKG_VERSION=\nNPM_TAG=\nPYPI_TOKEN=\nNPM_TOKEN=' > .env mkdir -p dist/apps/api yarn generate:api-client yarn lint:fix yarn format poetry lock yarn docs GONOSUMDB=github.com/daytonaio/daytona go work sync git diff --exit-code || (echo "Code not formatted or linting errors! Hint: 'yarn generate:api-client', 'yarn lint:fix', 'yarn docs' and commit" && exit 1) build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: ./.github/actions/setup-toolchain - name: Build all run: | source "$(poetry env info --path)/bin/activate" VERSION=0.0.0-dev yarn build --nxBail=true license-check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Check AGPL License Headers uses: apache/skywalking-eyes/header@main with: token: ${{ github.token }} config: .licenserc.yaml mode: 'check' - name: Check Apache License Headers uses: apache/skywalking-eyes/header@main with: token: ${{ github.token }} config: .licenserc-clients.yaml mode: 'check' ================================================ FILE: .github/workflows/prepare-release.yaml ================================================ name: Prepare Release on: workflow_dispatch: inputs: version: description: Version to release (start with "v") required: true default: 'v0.0.0-dev' env: VERSION: ${{ inputs.version }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} jobs: prepare: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-tags: true fetch-depth: 0 - uses: ./.github/actions/setup-toolchain - uses: dev-hanz-ops/install-gh-cli-action@v0.2.1 - name: Configure git run: | git config --global --add safe.directory $GITHUB_WORKSPACE git config --global user.name "Daytona Release Bot" git config --global user.email "daytona-release@users.noreply.github.com" - name: Create release branch run: | git checkout -b prepare-release-${{ inputs.version }} - name: Prepare release run: | VERSION=${{ inputs.version }} yarn prepare-release - name: Push branch and create PR run: | git push origin prepare-release-${{ inputs.version }} gh pr create \ --title "chore: prepare release ${{ inputs.version }}" \ --body "Automated PR to prepare release ${{ inputs.version }}" \ --base main \ --head prepare-release-${{ inputs.version }} ================================================ FILE: .github/workflows/release.yaml ================================================ name: Release on: workflow_dispatch: inputs: version: description: Version to release (start with "v") required: true default: 'v0.0.0-dev' env: VERSION: ${{ inputs.version }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} BUILDX_NO_DEFAULT_ATTESTATIONS: 1 DAYTONA_API_URL: ${{ secrets.DAYTONA_API_URL }} DAYTONA_AUTH0_DOMAIN: ${{ secrets.DAYTONA_AUTH0_DOMAIN }} DAYTONA_AUTH0_CLIENT_ID: ${{ secrets.DAYTONA_AUTH0_CLIENT_ID }} DAYTONA_AUTH0_CALLBACK_PORT: ${{ secrets.DAYTONA_AUTH0_CALLBACK_PORT }} DAYTONA_AUTH0_CLIENT_SECRET: ${{ secrets.DAYTONA_AUTH0_CLIENT_SECRET }} DAYTONA_AUTH0_AUDIENCE: ${{ secrets.DAYTONA_AUTH0_AUDIENCE }} POETRY_VIRTUALENVS_IN_PROJECT: true GONOSUMDB: github.com/daytonaio/daytona jobs: publish: runs-on: [self-hosted, Linux, X64, github-actions-runner-amd64] steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-tags: true fetch-depth: 0 - uses: ./.github/actions/setup-toolchain with: run-go-work-sync: 'false' - uses: dev-hanz-ops/install-gh-cli-action@v0.2.1 - name: Configure git run: | git config --global --add safe.directory $GITHUB_WORKSPACE git config --global user.name "Daytona Release Bot" git config --global user.email "daytona-release@users.noreply.github.com" - name: Tag Go modules run: | git tag libs/api-client-go/${{ inputs.version }} git tag libs/toolbox-api-client-go/${{ inputs.version }} git tag libs/sdk-go/${{ inputs.version }} git push origin libs/api-client-go/${{ inputs.version }} libs/toolbox-api-client-go/${{ inputs.version }} libs/sdk-go/${{ inputs.version }} - name: Go work sync run: | GONOSUMDB=github.com/daytonaio/daytona go work sync go env -w GOFLAGS="-buildvcs=false" # Write version to required folders so nx release can run - name: Write package.json to required folders run: | mkdir dist echo '{ "name": "api", "version": "0.0.0" }' > dist/package.json echo '{ "name": "api", "version": "0.0.0" }' > apps/api/package.json echo '{ "name": "dashboard", "version": "0.0.0" }' > apps/dashboard/package.json echo '{ "name": "docs", "version": "0.0.0" }' > apps/docs/package.json echo '{ "name": "runner", "version": "0.0.0" }' > apps/runner/package.json - name: Create release run: yarn nx release ${{ inputs.version }} --skip-publish --verbose build_projects: needs: publish runs-on: [self-hosted, Linux, X64, github-actions-runner-amd64] steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-tags: true fetch-depth: 0 - uses: ./.github/actions/setup-toolchain - uses: dev-hanz-ops/install-gh-cli-action@v0.2.1 - name: Install gettext run: sudo apt-get install -y gettext - name: Configure git run: | git config --global --add safe.directory $GITHUB_WORKSPACE git config --global user.name "Daytona Release Bot" git config --global user.email "daytona-release@users.noreply.github.com" - name: Build projects run: | source "$(poetry env info --path)/bin/activate" yarn build:production yarn nx build-amd64 runner --configuration=production --nxBail=true - name: Build runner .deb package run: VERSION="${VERSION#v}" yarn nx package-deb runner - name: Build CLI run: | cd ./apps/cli GOOS=linux GOARCH=amd64 ./hack/build.sh --skip-env-file GOOS=linux GOARCH=arm64 ./hack/build.sh --skip-env-file GOOS=darwin GOARCH=amd64 ./hack/build.sh --skip-env-file GOOS=darwin GOARCH=arm64 ./hack/build.sh --skip-env-file GOOS=windows GOARCH=amd64 ./hack/build.sh --skip-env-file GOOS=windows GOARCH=arm64 ./hack/build.sh --skip-env-file cd ../.. - name: Upload runner to release assets run: | gh release upload ${{ inputs.version }} dist/apps/runner-amd64#daytona-runner-${{ inputs.version }}-amd64 --clobber - name: Upload runner .deb to release assets run: | DEB_VERSION="${VERSION#v}" gh release upload ${{ inputs.version }} "dist/apps/runner-deb/daytona-runner_${DEB_VERSION}_amd64.deb#daytona-runner-${DEB_VERSION}-amd64.deb" --clobber - name: Upload daemon to release assets run: | gh release upload ${{ inputs.version }} dist/apps/daemon-amd64#daytona-daemon-${{ inputs.version }}-amd64 --clobber - name: Upload CLI to release assets run: | gh release upload ${{ inputs.version }} dist/apps/cli/daytona-linux-amd64#daytona-cli-${{ inputs.version }}-linux-amd64 --clobber gh release upload ${{ inputs.version }} dist/apps/cli/daytona-linux-arm64#daytona-cli-${{ inputs.version }}-linux-arm64 --clobber gh release upload ${{ inputs.version }} dist/apps/cli/daytona-darwin-amd64#daytona-cli-${{ inputs.version }}-darwin-amd64 --clobber gh release upload ${{ inputs.version }} dist/apps/cli/daytona-darwin-arm64#daytona-cli-${{ inputs.version }}-darwin-arm64 --clobber gh release upload ${{ inputs.version }} dist/apps/cli/daytona-windows-amd64.exe#daytona-cli-${{ inputs.version }}-windows-amd64.exe --clobber gh release upload ${{ inputs.version }} dist/apps/cli/daytona-windows-arm64.exe#daytona-cli-${{ inputs.version }}-windows-arm64.exe --clobber - name: Upload computer-use artifact uses: actions/upload-artifact@v4 with: name: computer-use-amd64 path: dist/libs/computer-use-amd64 retention-days: 1 overwrite: true - name: Upload runner artifact uses: actions/upload-artifact@v4 with: name: runner-amd64 path: dist/apps/runner-amd64 retention-days: 1 overwrite: true # Separately build docker images for AMD64 and ARM64 docker_build: needs: build_projects runs-on: ${{ matrix.runner }} strategy: matrix: include: - runner: [self-hosted, Linux, X64, github-actions-runner-amd64] arch: amd64 - runner: [self-hosted, Linux, ARM64, github-actions-runner-arm] arch: arm64 steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-tags: true fetch-depth: 0 - name: Download computer-use artifact uses: actions/download-artifact@v4 with: name: computer-use-amd64 path: dist/libs/ - name: Download runner artifact uses: actions/download-artifact@v4 with: name: runner-amd64 path: dist/apps/ - name: Check artifacts run: | ls -la dist/libs/ ls -la dist/apps/ - uses: actions/setup-go@v5 with: go-version-file: go.work - uses: actions/setup-node@v4 with: node-version: 22 - name: Generate go.work.sum run: GONOSUMDB=github.com/daytonaio/daytona go work sync - name: Log in to the Container registry uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 with: registry: docker.io username: daytonaio password: ${{ secrets.DOCKER_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Project dependencies run: corepack enable - name: Get yarn cache directory id: yarn-cache run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT - uses: actions/cache@v4 with: path: ${{ steps.yarn-cache.outputs.dir }} key: yarn-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }} restore-keys: yarn-${{ runner.os }}-${{ runner.arch }}- - run: yarn install --immutable - name: Publish docker images run: | VERSION=${{ inputs.version }} ARCH="-${{ matrix.arch }}" yarn docker:production # Push combined manifest docker_push_manifest: needs: docker_build runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - uses: actions/setup-node@v4 with: node-version: 22 - name: Project dependencies run: corepack enable - name: Get yarn cache directory id: yarn-cache run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT - uses: actions/cache@v4 with: path: ${{ steps.yarn-cache.outputs.dir }} key: yarn-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }} restore-keys: yarn-${{ runner.os }}-${{ runner.arch }}- - run: yarn install --immutable - name: Log in to the Container registry uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 with: registry: docker.io username: daytonaio password: ${{ secrets.DOCKER_TOKEN }} - name: Push manifest run: | VERSION=${{ inputs.version }} yarn push-manifest sync_gosum: needs: build_projects runs-on: [self-hosted, Linux, X64, github-actions-runner-amd64] steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-tags: true fetch-depth: 0 - uses: actions/setup-go@v5 with: go-version-file: go.work - uses: actions/setup-node@v4 with: node-version: 22 - uses: dev-hanz-ops/install-gh-cli-action@v0.2.1 - name: Configure git run: | git config --global user.name "Daytona Release Bot" git config --global user.email "daytona-release@users.noreply.github.com" - name: Project dependencies run: corepack enable - name: Get yarn cache directory id: yarn-cache run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT - uses: actions/cache@v4 with: path: ${{ steps.yarn-cache.outputs.dir }} key: yarn-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('yarn.lock') }} restore-keys: yarn-${{ runner.os }}-${{ runner.arch }}- - run: yarn install --immutable - name: Sync go.sum and create PR run: | git checkout -b sync-gosum-${{ inputs.version }} GONOSUMDB=github.com/daytonaio/daytona go work sync if git diff --quiet -- **/go.sum; then echo "No go.sum changes detected; skipping commit and PR creation." else git add **/go.sum git commit -s -m "chore: sync go.sum for ${{ inputs.version }}" git push origin sync-gosum-${{ inputs.version }} gh pr create \ --title "chore: sync go.sum for ${{ inputs.version }}" \ --body "Automated PR to sync go.sum after release ${{ inputs.version }}" \ --base main \ --head sync-gosum-${{ inputs.version }} fi env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/sdk_publish.yaml ================================================ name: SDK and CLI Publish on: workflow_dispatch: inputs: version: description: Version to release (start with "v") required: true default: 'v0.0.0-dev' pypi_pkg_version: description: 'PyPI package version (default: version)' required: false npm_pkg_version: description: 'NPM package version (default: version)' required: false npm_tag: description: 'NPM tag (default: latest)' required: false default: 'latest' rubygems_pkg_version: description: 'RubyGems package version (default: version)' required: false env: VERSION: ${{ inputs.version }} PYPI_PKG_VERSION: ${{ inputs.pypi_pkg_version || inputs.version}} NPM_PKG_VERSION: ${{ inputs.npm_pkg_version || inputs.version}} RUBYGEMS_PKG_VERSION: ${{ inputs.rubygems_pkg_version || inputs.version}} NPM_TAG: ${{ inputs.npm_tag }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} RUBYGEMS_API_KEY: ${{ secrets.RUBYGEMS_API_KEY }} POETRY_VIRTUALENVS_IN_PROJECT: true jobs: publish: runs-on: [self-hosted, Linux, X64, github-actions-runner-amd64] steps: - name: Checkout code uses: actions/checkout@v4 with: fetch-tags: true fetch-depth: 0 - uses: ./.github/actions/setup-toolchain with: install-swag: 'false' - uses: dev-hanz-ops/install-gh-cli-action@v0.2.1 - name: Configure git run: | git config --global --add safe.directory $GITHUB_WORKSPACE git config --global user.name "Daytona Release Bot" git config --global user.email "daytona-release@users.noreply.github.com" - name: Configure RubyGems credentials run: | mkdir -p ~/.gem echo "---" > ~/.gem/credentials echo ":rubygems_api_key: $RUBYGEMS_API_KEY" >> ~/.gem/credentials chmod 0600 ~/.gem/credentials - name: Publish projects run: | source "$(poetry env info --path)/bin/activate" yarn publish update-homebrew-tap: if: ${{ inputs.npm_tag == 'latest' }} needs: publish runs-on: [self-hosted, Linux, X64, github-actions-runner-amd64] name: Update Homebrew CLI tap steps: - name: Update version run: | curl -f -X POST -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.GITHUBBOT_TOKEN }}" \ https://api.github.com/repos/daytonaio/homebrew-cli/dispatches \ -d "{\"event_type\": \"update-version\", \"client_payload\": {\"version\": \"${{ env.VERSION }}\"}}" ================================================ FILE: .github/workflows/translate.yaml ================================================ name: GT Translate on: push: branches: [main] jobs: translate: runs-on: ubuntu-latest if: ${{ github.event.head_commit.author.name != 'github-actions[bot]' && !contains(github.event.head_commit.message, 'gt-translate/') }} permissions: contents: write pull-requests: write steps: - uses: actions/checkout@v4 - uses: generaltranslation/translate@v0 with: gt_api_key: ${{ secrets.GT_API_KEY }} gt_project_id: ${{ secrets.GT_PROJECT_ID }} config: 'apps/docs/gt.config.json' inline: true timeout: 3600 pr_branch: 'gt-translate/${{ github.ref_name }}' ================================================ FILE: .gitignore ================================================ # See http://help.github.com/ignore-files/ for more about ignoring files. # compiled output dist tmp out-tsc # dependencies node_modules package-lock.json # IDEs and editors /.idea .project .classpath .c9/ *.launch .settings/ *.sublime-workspace .cursor # IDE - VSCode .vscode/* !.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json # misc /.sass-cache /connect.lock /coverage /libpeerconnection.log npm-debug.log yarn-error.log testem.log /typings # System Files .DS_Store Thumbs.db .nx/cache .nx/workspace-data vite.config.*.timestamp* vitest.config.*.timestamp* # yarn # https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored .pnp.* .yarn/* !.yarn/patches !.yarn/plugins !.yarn/releases !.yarn/sdks !.yarn/versions .eslintcache .tmp/ .env.local # Python package metadata *.egg-info/ *.egg *.pyc __pycache__/ # Ruby gem artifacts *.gem **/.bundle/ **/vendor/bundle **/Gemfile.lock !/Gemfile.lock examples/**/*.png **/.venv **/poetry.lock !/poetry.lock .astro .github/instructions/nx.instructions.md go.work.sum .claude/worktrees ================================================ FILE: .golangci.yaml ================================================ version: '2' run: build-tags: - testing linters: settings: errcheck: exclude-functions: - (*github.com/gin-gonic/gin.Context).AbortWithError - (*github.com/gin-gonic/gin.Context).Error - io.Copy - syscall.Syscall - (github.com/gliderlabs/ssh.Session).Exit - (io.Writer).Write exclusions: generated: lax presets: - comments - common-false-positives - legacy - std-error-handling paths: - third_party$ - builtin$ - examples$ formatters: exclusions: generated: lax paths: - third_party$ - builtin$ - examples$ ================================================ FILE: .husky/.gitignore ================================================ _ ================================================ FILE: .husky/pre-commit ================================================ #!/bin/sh yarn lint-staged ================================================ FILE: .licenserc-clients.yaml ================================================ header: license: spdx-id: Apache-2.0 copyright-owner: Daytona Platforms Inc. content: | Copyright Daytona Platforms Inc. SPDX-License-Identifier: Apache-2.0 pattern: | Copyright (\d{4} )?Daytona Platforms Inc\. SPDX-License-Identifier: Apache-2\.0 paths: - 'libs/**/*.go' - 'libs/**/*.sh' - 'libs/**/*.js' - 'libs/**/*.ts' - 'libs/**/*.tsx' - 'libs/**/*.py' - 'guides/**/*.py' - 'guides/**/*.ts' - 'guides/**/*.go' - 'guides/**/*.js' paths-ignore: - 'libs/analytics-api-client/**' - 'libs/api-client/**' - 'libs/runner-api-client/**' - 'libs/api-client-go/**' - 'libs/api-client-python/**' - 'libs/api-client-python-async/**' - 'libs/api-client-ruby/**' - 'apps/docs/**' - 'libs/computer-use/**' - 'hack/**' - 'libs/toolbox-api-client/**' - 'libs/toolbox-api-client-python/**' - 'libs/toolbox-api-client-python-async/**' - 'libs/toolbox-api-client-ruby/**' - 'libs/toolbox-api-client-go/**' comment: on-failure ================================================ FILE: .licenserc.yaml ================================================ header: license: spdx-id: AGPL-3.0 copyright-owner: Daytona Platforms Inc. content: | Copyright Daytona Platforms Inc. SPDX-License-Identifier: AGPL-3.0 pattern: | Copyright (\d{4} )?Daytona Platforms Inc\. SPDX-License-Identifier: AGPL-3\.0 paths: - '**/*.go' - '**/*.sh' - '**/*.js' - '**/*.ts' - '**/*.tsx' - '**/*.py' paths-ignore: - 'libs/**' - '!libs/computer-use/**' - 'apps/api/src/generate-openapi.ts' - 'apps/runner/pkg/api/docs/docs.go' - 'examples/**' - 'apps/docs/**' - 'hack/**' - 'guides/**' - 'apps/daemon/pkg/toolbox/docs/docs.go' - 'apps/dashboard/public/mockServiceWorker.js' comment: on-failure ================================================ FILE: .markdownlint-cli2.jsonc ================================================ { "$schema": "https://raw.githubusercontent.com/DavidAnson/markdownlint-cli2/24eb4dce508ab81398d14d75179123fca425f12d/schema/markdownlint-cli2-config-schema.json", "config": { "no-emphasis-as-heading": false, "line-length": false, "no-inline-html": false, "first-line-h1": false, "no-bare-urls": false, "no-duplicate-heading": false, "emphasis-style": { "style": "underscore", }, "ol-prefix": false, "fenced-code-language": false, "single-title": false, "heading-increment": false, "table-column-count": false, "table-pipe-style": false, "no-empty-links": false, }, "ignores": [ "**/node_modules/**", "**/apps/docs/**", "**/.venv/**", "**/libs/toolbox-api-client-go/**" ], } ================================================ FILE: .npmrc ================================================ # Expose Astro dependencies for \`pnpm\` users shamefully-hoist=true ================================================ FILE: .nxignore ================================================ .claude/ examples/ ================================================ FILE: .prettierignore ================================================ # Add files here to ignore them from prettier formatting /dist /coverage /.nx/cache /.nx/workspace-data *.md libs/*api-client*/** ================================================ FILE: .prettierrc ================================================ { "singleQuote": true, "semi": false, "tabWidth": 2, "printWidth": 120 } ================================================ FILE: .rubocop.yml ================================================ AllCops: NewCops: enable TargetRubyVersion: 3.2 Layout/LineLength: AllowedPatterns: ['\A\s*#'] AllowCopDirectives: true Style/Documentation: Enabled: false Style/AccessorGrouping: EnforcedStyle: separated # Relax some metrics for examples Metrics/MethodLength: Max: 25 Exclude: - 'examples/**/*' Metrics/AbcSize: Max: 25 Exclude: - 'examples/**/*' Metrics/CyclomaticComplexity: Exclude: - 'examples/**/*' Metrics/PerceivedComplexity: Exclude: - 'examples/**/*' Metrics/BlockLength: Exclude: - 'examples/**/*' Metrics/ClassLength: Exclude: - 'examples/**/*' # Examples can use any style for string concatenation Style/StringConcatenation: Exclude: - 'examples/**/*' # Examples don't need execute permissions Lint/ScriptPermission: Exclude: - 'examples/**/*' ================================================ FILE: .verdaccio/config.yml ================================================ # path to a directory with all packages storage: ../tmp/local-registry/storage # a list of other known repositories we can talk to uplinks: npmjs: url: https://registry.npmjs.org/ maxage: 60m packages: '**': # give all users (including non-authenticated users) full access # because it is a local registry access: $all publish: $all unpublish: $all # if package is not available locally, proxy requests to npm registry proxy: npmjs # log settings log: type: stdout format: pretty level: warn publish: allow_offline: true # set offline to true to allow publish offline ================================================ FILE: .vscode/launch.json ================================================ { // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ { "command": "yarn serve", "name": "Debug", "request": "launch", "type": "node-terminal" }, { "command": "yarn serve:skip-runner", "name": "Debug - Skip Runner", "request": "launch", "type": "node-terminal" }, { "command": "yarn serve:skip-proxy", "name": "Debug - Skip Proxy", "request": "launch", "type": "node-terminal" }, { "name": "Runner", "type": "go", "request": "launch", "mode": "debug", "program": "${workspaceFolder}/apps/runner/cmd/runner", "console": "integratedTerminal", "envFile": "${workspaceFolder}/apps/runner/.env", "output": "${workspaceFolder}/dist/apps/runner", "preLaunchTask": "debug-build-runner" }, { "name": "Daemon", "type": "go", "request": "launch", "mode": "debug", "program": "${workspaceFolder}/apps/daemon/cmd/daemon", "console": "integratedTerminal", "output": "${workspaceFolder}/dist/apps/daemon" }, { "name": "Proxy", "type": "go", "request": "launch", "mode": "debug", "program": "${workspaceFolder}/apps/proxy/cmd/proxy", "console": "integratedTerminal", "envFile": [ "${workspaceFolder}/apps/proxy/.env", "${workspaceFolder}/apps/proxy/.env.local" ], "output": "${workspaceFolder}/dist/apps/proxy" } ] } ================================================ FILE: .vscode/settings.json ================================================ { "files.watcherExclude": { "**/libs/api-client*/**": true } } ================================================ FILE: .vscode/tasks.json ================================================ { "version": "2.0.0", "tasks": [ { "label": "debug-build-runner", "type": "shell", "command": "yarn nx build runner --output-style=stream", "group": { "kind": "build", "isDefault": true }, "presentation": { "reveal": "silent" }, "problemMatcher": ["$go"] } ] } ================================================ FILE: .yarnrc.yml ================================================ enableInlineHunks: true nodeLinker: node-modules ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at codeofconduct@daytona.io. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to Daytona The team at Daytona welcomes contributions from the community. There are many ways to get involved! Thanks for taking the time to contribute! ❤️ > And if you like the project but don't have time to contribute, that's perfectly okay. There are other simple ways to support the project and show your appreciation, which we would greatly appreciate: > > - Star the project > - Tweet about it > - Contribute to our [Docs](https://github.com/daytonaio/docs/) > - Refer this project in your project's readme > - Mention the project at local meetups and tell your friends/colleagues ## Code of Conduct This project and everyone participating in it is governed by the [Daytona Code of Conduct](https://github.com/daytonaio/daytona?tab=coc-ov-file#readme). By participating, you are expected to uphold this code. Please report unacceptable behavior to [info@daytona.io](mailto:info@daytona.io). ## Provide Feedback You might find things that can be improved while you are using Daytona. You can help by [submitting an issue](https://github.com/daytonaio/daytona/issues/new) when: - A new feature or an enhancement to an existing feature will improve the utility or usability of Daytona. - Daytona crashes, or you encounter a bug that can only be resolved by restarting Daytona. - An error occurs that is unrecoverable, causes Sandbox integrity problems or loss, or generally prevents you from using a Sandbox. Before creating a new issue, please confirm that an existing issue doesn't already exist. We will then take care of the issue as soon as possible. ## Participate in the Community You can engage with our community by: - Helping other users on [Daytona Community Slack](https://go.daytona.io/slack). - Improving [documentation](https://github.com/daytonaio/docs/) - Participating in general discussions about development and DevOps - Authoring new Daytona Plugins and sharing those Plugins - Authoring new dev containers and sharing examples ## Contributing Code You can contribute to Daytona by: - Enhancing current functionality - Fixing bugs - Adding new features and capabilities Before starting your contribution, especially for core features, we encourage you to reach out to us on [Slack](https://go.daytona.io/slack). This allows us to ensure that your proposed feature aligns with the project's roadmap and goals. Developers are the key to making Daytona the best tool it can be, and we value input from the community. We look forward to working with you to improve Daytona and make development environments as easy as possible for developers everywhere. ### Steps to Contribute Code Follow the following steps to ensure your contribution goes smoothly. 1. Read and follow the steps outlined in the [Daytona Contributing Policy](README.md#contributing). 1. Configure your development environment by either following the guide below. 1. [Fork](https://help.github.com/articles/working-with-forks/) the GitHub Repository allowing you to make the changes in your own copy of the repository. 1. Create a [GitHub issue](https://github.com/daytonaio/daytona/issues) if one doesn't exist already. 1. [Prepare your changes](/PREPARING_YOUR_CHANGES.md) and ensure your commits are descriptive. The document contains an optional commit template, if desired. 1. Ensure that you sign off on all your commits to comply with the DCO v1.1. We have more details in [Prepare your changes](/PREPARING_YOUR_CHANGES.md). 1. Ensure to generate new docs after making command related changes, by running `./hack/generate-cli-docs.sh` in the daytona root directory. 1. Ensure to generate a new API client after making changes related to the API spec, by running `./hack/swagger.sh` in the daytona root directory. 1. Ensure that you are using `yarn` as the package manager for any Node.js dependencies. 1. Ensure that you have no lint errors. We use `golangci-lint` as our linter which you can install by following instructions found [here](https://golangci-lint.run/welcome/install/#local-installation) (or simply open Daytona in a Dev Container). You can check for linting errors by running `golangci-lint run` in the root of the project. 1. Create a pull request on GitHub. If you're new to GitHub, read about [pull requests](https://help.github.com/articles/about-pull-requests/). You are welcome to submit your pull request for commentary or review before it is complete by creating a [draft pull request](https://help.github.com/en/articles/about-pull-requests#draft-pull-requests). Please include specific questions or items you'd like feedback on. 1. A member of the Daytona team will review your PR within three business days (excluding any holidays) and either merge, comment, and/or assign someone for review. 1. Work with the reviewer to complete a code review. For each change, create a new commit and push it to make changes to your pull request. When necessary, the reviewer can trigger CI to run tests prior to merging. 1. Once you believe your pull request is ready to be reviewed, ensure the pull request is no longer a draft by [marking it ready for review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request). 1. The reviewer will look over your contribution and either approve it or provide comments letting you know if there is anything left to do. We try to give you the opportunity to make the required changes yourself, but in some cases, we may perform the changes ourselves if it makes sense to (minor changes or for urgent issues). We do our best to review PRs promptly, but complex changes could require more time. 1. After completing your review, a Daytona team member will trigger merge to run all tests. Upon passing, your change will be merged into `main`, and your pull requests will be closed. All merges to `main` create a new release, and all final changes are attributed to you. Note: In some cases, we might decide that a PR should be closed without merging. We'll make sure to provide clear reasoning when this happens. ### Coding Style and Conventions To make the code base consistent, we follow a few guidelines and conventions listed below. It is possible that the code base does not currently comply with all these guidelines. While working on a PR, if you see something that can be refactored to comply, go ahead, but keep in mind that we are not looking for massive PRs that only address that. API and service method conventions: 1. Avoid using model names in service methods - e.g. `Create` instead of `CreateSandbox`, `Find` instead of `FindSandbox` 1. Use appropriate verbs in the UI - e.g. `Create API Key` instead of `Generate API Key` since the method is called `Create` 1. Refer to the table below for a connection between API and service methods | HTTP Method | Controller / Service / Store | | ----------- | ---------------------------- | | POST | Create or Update | | DELETE | Delete | | PUT | Save | | GET | Find or List | #### What Does Contributing Mean for You? Here is what being a contributor means for you: - License all our contributions to the project under the AGPL 3.0 License or the Apache 2.0 License - Have the legal rights to license our contributions ourselves, or get permission to license them from our employers, clients, or others who may have them For more information, see the [README](README.md) and feel free to reach out to us on [Slack](https://go.daytona.io/slack). ================================================ FILE: COPYRIGHT ================================================ Copyrights in the Daytona software are retained by their contributors. No copyright assignment is required to contribute to Daytona software. When we refer to the 'Daytona software authors', we mean anyone who has contributed to this repository. Unless otherwise noted, such as with a LICENSE file in a directory, or a specific copyright notice on a file, all files in this repository are licensed under the AGPL 3.0 license. ================================================ FILE: Gemfile ================================================ # frozen_string_literal: true source 'https://rubygems.org' # Local gems from libs gem 'daytona_api_client', '>= 0.0.0.pre.dev', path: 'libs/api-client-ruby' gem 'daytona_toolbox_api_client', '>= 0.0.0.pre.dev', path: 'libs/toolbox-api-client-ruby' gem 'daytona', '>= 0.0.0.pre.dev', path: 'libs/sdk-ruby' # Shared development dependencies group :development do gem 'pry', '~> 0.15' gem 'pry-byebug' gem 'rake', '~> 13.0' gem 'rspec', '~> 3.6', '>= 3.6.0' gem 'rubocop', '1.84.2' gem 'rubocop-rake', '~> 0.7' gem 'solargraph', '~> 0.57' gem 'webmock', '~> 3.25' gem 'yard-markdown', '~> 0.5.0' end ================================================ FILE: Gemfile.lock ================================================ PATH remote: libs/api-client-ruby specs: daytona_api_client (0.0.0.pre.dev) typhoeus (~> 1.0, >= 1.0.1) PATH remote: libs/sdk-ruby specs: daytona (0.0.0.pre.dev) aws-sdk-s3 (~> 1.0) daytona_api_client (= 0.0.0.pre.dev) daytona_toolbox_api_client (= 0.0.0.pre.dev) dotenv (~> 2.0) opentelemetry-exporter-otlp (~> 0.29) opentelemetry-exporter-otlp-metrics (~> 0.1) opentelemetry-metrics-sdk (~> 0.2) opentelemetry-sdk (~> 1.4) toml (~> 0.3) websocket-client-simple (~> 0.6) PATH remote: libs/toolbox-api-client-ruby specs: daytona_toolbox_api_client (0.0.0.pre.dev) typhoeus (~> 1.0, >= 1.0.1) GEM remote: https://rubygems.org/ specs: addressable (2.8.8) public_suffix (>= 2.0.2, < 8.0) ast (2.4.3) aws-eventstream (1.4.0) aws-partitions (1.1209.0) aws-sdk-core (3.241.4) aws-eventstream (~> 1, >= 1.3.0) aws-partitions (~> 1, >= 1.992.0) aws-sigv4 (~> 1.9) base64 bigdecimal jmespath (~> 1, >= 1.6.1) logger aws-sdk-kms (1.121.0) aws-sdk-core (~> 3, >= 3.241.4) aws-sigv4 (~> 1.5) aws-sdk-s3 (1.212.0) aws-sdk-core (~> 3, >= 3.241.4) aws-sdk-kms (~> 1) aws-sigv4 (~> 1.5) aws-sigv4 (1.12.1) aws-eventstream (~> 1, >= 1.0.2) backport (1.2.0) base64 (0.3.0) benchmark (0.5.0) bigdecimal (4.0.1) byebug (13.0.0) reline (>= 0.6.0) coderay (1.1.3) crack (1.0.1) bigdecimal rexml csv (3.3.5) diff-lcs (1.6.2) dotenv (2.8.1) ethon (0.15.0) ffi (>= 1.15.0) event_emitter (0.2.6) ffi (1.17.3-aarch64-linux-gnu) ffi (1.17.3-aarch64-linux-musl) ffi (1.17.3-arm-linux-gnu) ffi (1.17.3-arm-linux-musl) ffi (1.17.3-arm64-darwin) ffi (1.17.3-x86_64-darwin) ffi (1.17.3-x86_64-linux-gnu) ffi (1.17.3-x86_64-linux-musl) google-protobuf (4.33.5) bigdecimal rake (>= 13) google-protobuf (4.33.5-aarch64-linux-gnu) bigdecimal rake (>= 13) google-protobuf (4.33.5-aarch64-linux-musl) bigdecimal rake (>= 13) google-protobuf (4.33.5-arm64-darwin) bigdecimal rake (>= 13) google-protobuf (4.33.5-x86_64-darwin) bigdecimal rake (>= 13) google-protobuf (4.33.5-x86_64-linux-gnu) bigdecimal rake (>= 13) google-protobuf (4.33.5-x86_64-linux-musl) bigdecimal rake (>= 13) googleapis-common-protos-types (1.22.0) google-protobuf (~> 4.26) hashdiff (1.2.1) io-console (0.8.2) jaro_winkler (1.6.1) jmespath (1.6.2) json (2.18.0) kramdown (2.5.2) rexml (>= 3.4.4) kramdown-parser-gfm (1.1.0) kramdown (~> 2.0) language_server-protocol (3.17.0.5) lint_roller (1.1.0) logger (1.7.0) method_source (1.1.0) mutex_m (0.3.0) nokogiri (1.19.1-aarch64-linux-gnu) racc (~> 1.4) nokogiri (1.19.1-aarch64-linux-musl) racc (~> 1.4) nokogiri (1.19.1-arm-linux-gnu) racc (~> 1.4) nokogiri (1.19.1-arm-linux-musl) racc (~> 1.4) nokogiri (1.19.1-arm64-darwin) racc (~> 1.4) nokogiri (1.19.1-x86_64-darwin) racc (~> 1.4) nokogiri (1.19.1-x86_64-linux-gnu) racc (~> 1.4) nokogiri (1.19.1-x86_64-linux-musl) racc (~> 1.4) observer (0.1.2) open3 (0.2.1) opentelemetry-api (1.7.0) opentelemetry-common (0.23.0) opentelemetry-api (~> 1.0) opentelemetry-exporter-otlp (0.31.1) google-protobuf (>= 3.18) googleapis-common-protos-types (~> 1.3) opentelemetry-api (~> 1.1) opentelemetry-common (~> 0.20) opentelemetry-sdk (~> 1.10) opentelemetry-semantic_conventions opentelemetry-exporter-otlp-metrics (0.6.1) google-protobuf (>= 3.18, < 5.0) googleapis-common-protos-types (~> 1.3) opentelemetry-api (~> 1.1) opentelemetry-common (~> 0.20) opentelemetry-metrics-api (~> 0.2) opentelemetry-metrics-sdk (~> 0.5) opentelemetry-sdk (~> 1.2) opentelemetry-semantic_conventions opentelemetry-metrics-api (0.4.0) opentelemetry-api (~> 1.0) opentelemetry-metrics-sdk (0.11.2) opentelemetry-api (~> 1.1) opentelemetry-metrics-api (~> 0.2) opentelemetry-sdk (~> 1.2) opentelemetry-registry (0.4.0) opentelemetry-api (~> 1.1) opentelemetry-sdk (1.10.0) opentelemetry-api (~> 1.1) opentelemetry-common (~> 0.20) opentelemetry-registry (~> 0.2) opentelemetry-semantic_conventions opentelemetry-semantic_conventions (1.36.0) opentelemetry-api (~> 1.0) ostruct (0.6.3) parallel (1.27.0) parser (3.3.10.1) ast (~> 2.4.1) racc parslet (2.0.0) prism (1.8.0) pry (0.16.0) coderay (~> 1.1) method_source (~> 1.0) reline (>= 0.6.0) pry-byebug (3.12.0) byebug (~> 13.0) pry (>= 0.13, < 0.17) public_suffix (7.0.2) racc (1.8.1) rainbow (3.1.1) rake (13.3.1) rbs (3.10.2) logger regexp_parser (2.11.3) reline (0.6.3) io-console (~> 0.5) reverse_markdown (3.0.2) nokogiri rexml (3.4.4) rspec (3.13.2) rspec-core (~> 3.13.0) rspec-expectations (~> 3.13.0) rspec-mocks (~> 3.13.0) rspec-core (3.13.6) rspec-support (~> 3.13.0) rspec-expectations (3.13.5) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.13.0) rspec-mocks (3.13.7) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.13.0) rspec-support (3.13.6) rubocop (1.84.2) json (~> 2.3) language_server-protocol (~> 3.17.0.2) lint_roller (~> 1.1.0) parallel (~> 1.10) parser (>= 3.3.0.2) rainbow (>= 2.2.2, < 4.0) regexp_parser (>= 2.9.3, < 3.0) rubocop-ast (>= 1.49.0, < 2.0) ruby-progressbar (~> 1.7) unicode-display_width (>= 2.4.0, < 4.0) rubocop-ast (1.49.0) parser (>= 3.3.7.2) prism (~> 1.7) rubocop-rake (0.7.1) lint_roller (~> 1.1) rubocop (>= 1.72.1) ruby-progressbar (1.13.0) solargraph (0.58.2) ast (~> 2.4.3) backport (~> 1.2) benchmark (~> 0.4) bundler (>= 2.0) diff-lcs (~> 1.4) jaro_winkler (~> 1.6, >= 1.6.1) kramdown (~> 2.3) kramdown-parser-gfm (~> 1.1) logger (~> 1.6) observer (~> 0.1) open3 (~> 0.2.1) ostruct (~> 0.6) parser (~> 3.0) prism (~> 1.4) rbs (>= 3.6.1, <= 4.0.0.dev.4) reverse_markdown (~> 3.0) rubocop (~> 1.76) thor (~> 1.0) tilt (~> 2.0) yard (~> 0.9, >= 0.9.24) yard-activesupport-concern (~> 0.0) yard-solargraph (~> 0.1) thor (1.5.0) tilt (2.7.0) toml (0.3.0) parslet (>= 1.8.0, < 3.0.0) typhoeus (1.5.0) ethon (>= 0.9.0, < 0.16.0) unicode-display_width (3.2.0) unicode-emoji (~> 4.1) unicode-emoji (4.2.0) webmock (3.26.1) addressable (>= 2.8.0) crack (>= 0.3.2) hashdiff (>= 0.4.0, < 2.0.0) websocket (1.2.11) websocket-client-simple (0.9.0) base64 event_emitter mutex_m websocket yard (0.9.38) yard-activesupport-concern (0.0.1) yard (>= 0.8) yard-markdown (0.5.0) csv yard yard-solargraph (0.1.0) yard (~> 0.9) PLATFORMS aarch64-linux aarch64-linux-gnu aarch64-linux-musl arm-linux-gnu arm-linux-musl arm64-darwin x86_64-darwin x86_64-linux-gnu x86_64-linux-musl DEPENDENCIES daytona (>= 0.0.0.pre.dev)! daytona_api_client (>= 0.0.0.pre.dev)! daytona_toolbox_api_client (>= 0.0.0.pre.dev)! pry (~> 0.15) pry-byebug rake (~> 13.0) rspec (~> 3.6, >= 3.6.0) rubocop (= 1.84.2) rubocop-rake (~> 0.7) solargraph (~> 0.57) webmock (~> 3.25) yard-markdown (~> 0.5.0) BUNDLED WITH 2.6.9 ================================================ FILE: LICENSE ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: NOTICE ================================================ Daytona Copyright the various Daytona software authors. The initial developer of the software is Daytona Platforms, Inc. (https://daytona.io). Copyright 2025 Daytona Platforms, Inc. All Rights Reserved. ================================================ FILE: PACKAGING.md ================================================ # Packaging Guidelines for Daytona The Daytona team appreciates any efforts to make our software more accessible to users on various platforms. While we encourage packaging and distribution of our open-source project, we have some important guidelines, particularly regarding naming. ## Critical Naming Guideline **Important**: While you are free to package and distribute our software, you **MUST NOT** name your package `daytona` or, in any way, suggest that, the package you distribute, is an official distribution of `daytona`. This restriction is to prevent confusion and maintain the integrity of our project identity. - Acceptable: "unofficial-daytona-package", "unofficial-daytona-distribution", etc. - Not Acceptable: "daytona", "official-daytona", etc. ## General Guidelines 1. **License Compliance**: Ensure that the AGPL 3.0/Apache 2.0 license is included with the package and that all copyright notices are preserved. 2. **Version Accuracy**: Use the exact version number of Daytona that you are packaging. Do not modify the version number or add custom suffixes without explicit permission. 3. **Dependencies**: Include all necessary dependencies as specified in our project documentation. Do not add extra dependencies without consulting the project maintainers. 4. **Modifications**: If you need to make any modifications to the source code for packaging purposes, please document these changes clearly and consider submitting them as pull requests to the main project. 5. **Standard Note**: Please include the following standard note in your package description or metadata: ``` This package contains an unofficial distribution of Daytona, an open source project developed by Daytona Platforms Inc. This package is not officially supported or endorsed by the Daytona project. For the official version, please visit https://github.com/daytonaio/daytona. ``` ## Feedback and Questions If you have any questions about packaging Daytona or need clarification on these guidelines, especially regarding naming conventions, please open an issue in our GitHub repository. We appreciate your contribution to making Daytona more accessible to users across different platforms, while respecting our project's identity! ================================================ FILE: PREPARING_YOUR_CHANGES.md ================================================ # Preparing Your Changes This document contains information related to preparing changes for a pull request. Here's a quick checklist for a good PR, more details below: 1. A discussion around the change on [Slack](https://go.daytona.io/slack) or in an issue. 1. A GitHub Issue with a good description associated with the PR 1. One feature/change per PR 1. One commit per PR 1. PR rebased on main (git rebase, not git pull) 1. Good descriptive commit message, with link to issue 1. No changes to code not directly related to your PR 1. Includes functional/integration test 1. Includes documentation ## Commit Message Format We do not require a particular commit message format of any kind, but we do require that individual commits be descriptive, relative to size and impact. For example, if a descriptive title covers what the commit does in practice, then an additional description below the title is not required. However, if the commit has an out-sized impact relative to other commits, its description will need to reflect that. Reviewers may ask you to amend your commits if they are not descriptive enough. Since the descriptiveness of a commit is subjective, please feel free to talk to us on [Slack](https://go.daytona.io/slack) if you have any questions. ### Optional Commit Template If you would like an optional commit template, see the following: ```text ``` ## Squashed Commits We require that you squash all changes to a single commit. You can do this with the `git rebase -i HEAD~X` command where X is the number of commits you want to squash. See the [Git Documentation](https://git-scm.com/book/en/v2/Git-Branching-Rebasing) for more details. ## Developer's Certificate of Origin Any contributions to Daytona must only contain code that can legally be contributed to Daytona, and which the Daytona project can distribute under its license. Prior to contributing to Daytona please read the [Developer's Certificate of Origin](https://developercertificate.org/) and sign-off all commits with the `--signoff` option provided by `git commit`. For example: ``` git commit --signoff --message "This is the commit message" ``` This option adds a `Signed-off-by` trailer at the end of the commit log message. ## DCO Policy on Real Names The DCO is a representation by someone stating they have the right to contribute the code they have proposed and is important for legal purposes. We have adopted the CNCF DCO Guidelines (https://github.com/cncf/foundation/blob/main/dco-guidelines.md). Which for simplicity we will include here in full: ### DCO Guidelines v1.1 The DCO is a representation by someone stating they have the right to contribute the code they have proposed for acceptance into a project: https://developercertificate.org That representation is important for legal purposes and was the community-developed outcome after a $1 billion [lawsuit](https://en.wikipedia.org/wiki/SCO%E2%80%93Linux_disputes) by SCO against IBM. The representation is designed to prevent issues but also keep the burden on contributors low. It has proven very adaptable to other projects, is built into git itself (and now also GitHub), and is in use by thousands of projects to avoid more burdensome requirements to contribute (such as a CLA). ### DCO and Real Names The DCO requires the use of a real name that can be used to identify someone in case there is an issue about a contribution they made. **A real name does not require a legal name, nor a birth name, nor any name that appears on an official ID (e.g. a passport). Your real name is the name you convey to people in the community for them to use to identify you as you. The key concern is that your identification is sufficient enough to contact you if an issue were to arise in the future about your contribution.** Your real name should not be an anonymous id or false name that misrepresents who you are. ================================================ FILE: PUBLISHING.md ================================================ # Publishing Daytona SDKs This document describes how to publish the Daytona SDKs (Python, TypeScript, and Ruby) to their respective package registries. ## Table of Contents - [Prerequisites](#prerequisites) - [Python SDK (PyPI)](#python-sdk-pypi) - [TypeScript SDK (npm)](#typescript-sdk-npm) - [Ruby SDK (RubyGems)](#ruby-sdk-rubygems) - [Automated Publishing (CI/CD)](#automated-publishing-cicd) - [Version Management](#version-management) ## Prerequisites Before publishing any SDK, ensure you have: 1. **Maintainer Access**: Write access to the Daytona repository 2. **Package Registry Credentials**: - PyPI: Token with upload permissions - npm: Token with publish permissions - RubyGems: API key with push permissions 3. **Local Development Setup**: - All dependencies installed (`yarn install`) - SDKs built successfully - Tests passing ## Python SDK (PyPI) ### Using Nx ```bash # From repository root export PYPI_TOKEN="your-pypi-token" export PYPI_PKG_VERSION="X.Y.Z" # pre-release format example: "X.Y.Za1" yarn nx publish sdk-python ``` **Note**: [Guide](https://packaging.python.org/en/latest/discussions/versioning/) for versioning Python packages. ## TypeScript SDK (npm) ### Using Nx ```bash # From repository root export NPM_TOKEN="your-npm-token" export NPM_PKG_VERSION="X.Y.Z" # pre-release format example: "X.Y.Z-alpha.1" export NPM_TAG="latest" # or "beta", "alpha", etc. yarn nx publish sdk-typescript ``` **Note**: NPM packages must have [SemVer-aligned formats](https://semver.org/). ## Ruby SDK (RubyGems) ### Using Nx ```bash # From repository root export RUBYGEMS_API_KEY="your-rubygems-api-key" export RUBYGEMS_PKG_VERSION="X.Y.Z" # pre-release format example: "X.Y.Z.alpha.1" yarn nx publish sdk-ruby ``` **Note**: [Guide](https://guides.rubygems.org/patterns/#prerelease-gems) for versioning Ruby gems. ## Automated Publishing (CI/CD) ### GitHub Actions Workflow The repository includes a GitHub Actions workflow for automated publishing: `.github/workflows/sdk_publish.yaml` #### Triggering a Release 1. Go to **Actions** → **SDK and CLI Publish** in the GitHub repository 2. Click **Run workflow** 3. Fill in the parameters: - **version**: The version to release (e.g., `v0.126.0`) - **pypi_pkg_version**: (Optional) Override PyPI version - **npm_pkg_version**: (Optional) Override npm version - **rubygems_pkg_version**: (Optional) Override RubyGems version - **npm_tag**: npm dist-tag (default: `latest`) #### Required Secrets Ensure these secrets are configured in GitHub repository settings: - `PYPI_TOKEN`: PyPI API token - `NPM_TOKEN`: npm access token - `RUBYGEMS_API_KEY`: RubyGems API key - `GITHUBBOT_TOKEN`: GitHub token for Homebrew tap updates ### What the Workflow Does 1. Checks out the code 2. Sets up all required environments (Go, Java, Python, Node.js, Ruby) 3. Installs dependencies 4. Configures credentials for all package registries 5. Runs `yarn publish` which uses Nx to publish all SDKs in the correct order 6. Updates the Homebrew tap (for the CLI) ## Version Management ### Version Format `MAJOR.MINOR.PATCH` releases follow semantics: - **MAJOR**: Breaking changes - **MINOR**: New features (backward compatible) - **PATCH**: Bug fixes (backward compatible) Prerelease formats depend on SDK language: 1. For **Typescript** (npm) follow semantic versioning ([SemVer](https://semver.org/)): `MAJOR.MINOR.PATCH` For pre-releases, use: - `0.126.0-alpha.1` - Alpha release - `0.126.0-beta.1` - Beta release - `0.126.0-rc.1` - Release candidate 2. For **Python** (PyPI) follow Python packages versioning [guide](https://packaging.python.org/en/latest/discussions/versioning/): For pre-releases, use: - `1.2.0a1` - Alpha release - `1.2.0b1` - Beta release - `1.2.0rc1` - Release candidate 3. For **Ruby** (gem) follow Ruby gems versioning [guide](https://guides.rubygems.org/patterns/#prerelease-gems): For pre-releases, use: - `0.126.0.alpha.1` - Alpha release - `0.126.0.beta.1` - Beta release - `0.126.0.rc.1` - Release candidate ### Checking Published Versions #### PyPI ```bash pip index versions daytona # or curl -s https://pypi.org/pypi/daytona/json | jq -r .info.version ``` #### npm ```bash npm view @daytonaio/sdk version # or npm info @daytonaio/sdk ``` #### RubyGems ```bash gem search daytona --remote --exact # or gem info daytona --remote ``` ## References - [Semantic Versioning](https://semver.org/) - [Python packages versioning](https://packaging.python.org/en/latest/discussions/versioning/) - [Ruby gems versioning guide](https://guides.rubygems.org/patterns/#prerelease-gems) ================================================ FILE: README.md ================================================
[![Documentation](https://img.shields.io/github/v/release/daytonaio/docs?label=Docs&color=23cc71)](https://www.daytona.io/docs) ![License](https://img.shields.io/badge/License-AGPL--3-blue) [![Go Report Card](https://goreportcard.com/badge/github.com/daytonaio/daytona)](https://goreportcard.com/report/github.com/daytonaio/daytona) [![Issues - daytona](https://img.shields.io/github/issues/daytonaio/daytona)](https://github.com/daytonaio/daytona/issues) ![GitHub Release](https://img.shields.io/github/v/release/daytonaio/daytona)
 
Daytona logo

Run AI Code.
Secure and Elastic Infrastructure for Running Your AI-Generated Code.

Documentation · Report Bug · Request Feature · Join our Slack · Connect on X

Daytona  - Secure and elastic infra for running your AI-generated code. | Product Hunt Daytona  - Secure and elastic infra for running your AI-generated code. | Product Hunt

--- ## Installation ### Python SDK ```bash pip install daytona ``` ### TypeScript SDK ```bash npm install @daytonaio/sdk ``` --- ## Features - **Lightning-Fast Infrastructure**: Sub-90ms Sandbox creation from code to execution. - **Separated & Isolated Runtime**: Execute AI-generated code with zero risk to your infrastructure. - **Massive Parallelization for Concurrent AI Workflows**: Fork Sandbox filesystem and memory state (Coming soon!) - **Programmatic Control**: File, Git, LSP, and Execute API - **Unlimited Persistence**: Your Sandboxes can live forever - **OCI/Docker Compatibility**: Use any OCI/Docker image to create a Sandbox --- ## Quick Start 1. Create an account at https://app.daytona.io 1. Generate a [new API key](https://app.daytona.io/dashboard/keys) 1. Follow the [Getting Started docs](https://www.daytona.io/docs/getting-started/) to start using the Daytona SDK ## Creating your first Sandbox ### Python SDK ```py from daytona import Daytona, DaytonaConfig, CreateSandboxBaseParams # Initialize the Daytona client daytona = Daytona(DaytonaConfig(api_key="YOUR_API_KEY")) # Create the Sandbox instance sandbox = daytona.create(CreateSandboxBaseParams(language="python")) # Run code securely inside the Sandbox response = sandbox.process.code_run('print("Sum of 3 and 4 is " + str(3 + 4))') if response.exit_code != 0: print(f"Error running code: {response.exit_code} {response.result}") else: print(response.result) # Clean up the Sandbox daytona.delete(sandbox) ``` ### Typescript SDK ```jsx import { Daytona } from '@daytonaio/sdk' async function main() { // Initialize the Daytona client const daytona = new Daytona({ apiKey: 'YOUR_API_KEY', }) let sandbox try { // Create the Sandbox instance sandbox = await daytona.create({ language: 'typescript', }) // Run code securely inside the Sandbox const response = await sandbox.process.codeRun('console.log("Sum of 3 and 4 is " + (3 + 4))') if (response.exitCode !== 0) { console.error('Error running code:', response.exitCode, response.result) } else { console.log(response.result) } } catch (error) { console.error('Sandbox flow error:', error) } finally { if (sandbox) await daytona.delete(sandbox) } } main().catch(console.error) ``` ### Go SDK ```go package main import ( "context" "fmt" "log" "time" "github.com/daytonaio/daytona/libs/sdk-go/pkg/daytona" "github.com/daytonaio/daytona/libs/sdk-go/pkg/types" ) func main() { // Initialize the Daytona client with DAYTONA_API_KEY in env // Alternative is to use daytona.NewClientWithConfig(...) for more specific config client, err := daytona.NewClient() if err != nil { log.Fatalf("Failed to create client: %v", err) } ctx := context.Background() // Create the Sandbox instance params := types.SnapshotParams{ SandboxBaseParams: types.SandboxBaseParams{ Language: types.CodeLanguagePython, }, } sandbox, err := client.Create(ctx, params, daytona.WithTimeout(90*time.Second)) if err != nil { log.Fatalf("Failed to create sandbox: %v", err) } // Run code securely inside the Sandbox response, err := sandbox.Process.ExecuteCommand(ctx, `python3 -c "print('Sum of 3 and 4 is', 3 + 4)"`) if err != nil { log.Fatalf("Failed to execute command: %v", err) } if response.ExitCode != 0 { fmt.Printf("Error running code: %d %s\n", response.ExitCode, response.Result) } else { fmt.Println(response.Result) } // Clean up the Sandbox if err := sandbox.Delete(ctx); err != nil { log.Fatalf("Failed to delete sandbox: %v", err) } } ``` --- ## Contributing Daytona is Open Source under the [GNU AFFERO GENERAL PUBLIC LICENSE](LICENSE), and is the [copyright of its contributors](NOTICE). If you would like to contribute to the software, read the Developer Certificate of Origin Version 1.1 (https://developercertificate.org/). Afterwards, navigate to the [contributing guide](CONTRIBUTING.md) to get started. ================================================ FILE: SECURITY.md ================================================ # Security Policy ## Reporting a Vulnerability At Daytona, we take security seriously. If you believe you have found a security vulnerability in any Daytona-owned repository or service, please report it responsibly. **Please do NOT report security vulnerabilities through public GitHub issues.** Instead, please email us at: **security@daytona.io** You can also report vulnerabilities privately through [GitHub's security advisory feature](https://github.com/daytonaio/daytona/security/advisories/new). Please include: - Description of the vulnerability - Steps to reproduce - Impact assessment - Any relevant screenshots or proof-of-concept We will acknowledge receipt within 2 business days and provide an initial assessment within 5 business days. ## Scope The following assets and areas are in scope for vulnerability reports: - **Daytona platform** — app.daytona.io, including the web application and management interfaces - **API and SDK** — all documented and undocumented API endpoints, client SDKs - **Sandbox runtime isolation** — escape from sandbox to host, cross-tenant access, isolation boundary bypasses - **Authentication and authorization** — SSO, API key management, session handling, privilege escalation across accounts or organizations - **Secrets management** — scoped secret injection, unauthorized access to secrets, leakage across sandbox boundaries - **Public GitHub repositories** — any repository under the [daytonaio](https://github.com/daytonaio) organization ## Excluded Submission Types The following categories are excluded from this program. Reports in these categories will be closed without further assessment unless they demonstrate impact beyond what is described. 1. **In-sandbox privilege escalation, root access, or capability use** — Daytona sandboxes provide full root access within user-namespace isolation by design. Findings that chain to host escape or cross-sandbox access remain in scope. 2. **Findings within the reporter's own sandbox** that do not demonstrate impact beyond that sandbox's isolation boundary. 3. **Denial of service** — DoS, DDoS, resource exhaustion, volumetric testing, or network flooding. 4. **Rate limiting observations** that do not demonstrate resource exhaustion, financial impact, or abuse potential. 5. **Social engineering** — phishing, vishing, pretexting, or any form of social engineering targeting Daytona employees or users. 6. **Physical security testing** of offices, data centers, or personnel. 7. **Marketing and documentation sites** — findings against daytona.io or docs.daytona.io, excluding subdomain takeover vulnerabilities. 8. **Third-party services** — vulnerabilities in services or platforms not owned or operated by Daytona. 9. **Known public files or directories** — e.g., robots.txt, .well-known, or other intentionally public resources. 10. **DNSSEC or TLS cipher suite configuration suggestions** without a demonstrated exploit path. 11. **Missing Secure/HTTPOnly flags** on non-sensitive cookies. 12. **CSRF on unauthenticated or public-facing forms.** 13. **Outdated browsers and platforms** — vulnerabilities only affecting unpatched or end-of-life software. 14. **Automated scan output** — reports generated solely by automated tools without validated proof of impact. 15. **Best practice recommendations** without demonstrable security impact. 16. **Spam or service degradation** — testing that results in sending unsolicited messages or degradation of service to other users. ## Supported Versions We accept vulnerability reports for the latest stable release of Daytona. ## Safe Harbor Daytona supports safe harbor for security researchers who act in good faith and in accordance with this policy. We will not pursue legal action against researchers who: - Make a good-faith effort to avoid privacy violations, data destruction, and service disruption - Only access data to the extent necessary to demonstrate the vulnerability - Do not exfiltrate, retain, or disclose any user data encountered during research - Report findings promptly through the channels listed above - Do not disclose findings publicly before coordinated resolution (see Disclosure Timeline below) - Comply with all applicable laws If legal action is initiated by a third party against a researcher for activities conducted in accordance with this policy, we will take steps to make it known that the research was authorized. This safe harbor applies to all Daytona services and assets listed in the Scope section. ## Disclosure Timeline We follow a coordinated disclosure process: - **90 days** — We target remediation within 90 days of a validated report. Complex issues may require additional time, and we will communicate timelines transparently. - **30 days post-patch** — After a fix is released, we ask that researchers wait 30 days before public disclosure to allow users to update. - **No response** — If we fail to acknowledge or respond to a report within 90 days, the researcher may proceed with public disclosure after providing 14 days advance written notice to security@daytona.io. ## Rewards We offer rewards from $100 to $1,000 for valid, original findings that demonstrate real security impact. Severity, exploitability, and report quality are all considered. Duplicate reports are credited to the first submission. ================================================ FILE: apps/api/Dockerfile ================================================ FROM node:24-slim AS daytona ENV CI=true # Install dependencies (apt instead of apk) RUN apt-get update && apt-get install -y --no-install-recommends bash curl && \ rm -rf /var/lib/apt/lists/* RUN npm install -g corepack && corepack enable WORKDIR /daytona # Yarn caching layer COPY package.json yarn.lock .yarnrc.yml ./ RUN yarn install --immutable # Nx + TS config COPY nx.json tsconfig.base.json ./ # App source COPY apps/api/ apps/api/ COPY apps/dashboard/ apps/dashboard/ # Lib dependencies COPY libs/runner-api-client/ libs/runner-api-client/ COPY libs/api-client/ libs/api-client/ COPY libs/analytics-api-client/ libs/analytics-api-client/ COPY libs/toolbox-api-client/ libs/toolbox-api-client/ COPY libs/sdk-typescript/ libs/sdk-typescript/ ENV NX_DAEMON=false RUN yarn nx build api --configuration=production --nxBail=true RUN VITE_BASE_API_URL=%DAYTONA_BASE_API_URL% yarn nx build dashboard --configuration=production --nxBail=true --output-style=stream ARG VERSION=0.0.1 ENV VERSION=${VERSION} HEALTHCHECK CMD [ "curl", "-f", "http://localhost:3000/api/config" ] ENTRYPOINT ["node", "dist/apps/api/main.js"] ================================================ FILE: apps/api/eslint.config.mjs ================================================ import baseConfig from '../../eslint.config.mjs' export default [ ...baseConfig, { files: ['**/*.ts'], rules: { 'no-restricted-syntax': [ 'error', { selector: 'Decorator[expression.callee.name="InjectRepository"] > CallExpression > Identifier[name="Sandbox"]', message: 'Do not use @InjectRepository(Sandbox). Use the custom SandboxRepository instead.', }, ], }, }, ] ================================================ FILE: apps/api/jest.config.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export default { displayName: 'daytona', preset: '../../jest.preset.js', testEnvironment: 'node', transform: { '^.+\\.[tj]s$': ['ts-jest', { tsconfig: '/tsconfig.spec.json' }], }, moduleFileExtensions: ['ts', 'js', 'html'], coverageDirectory: '../../coverage/apps/daytona', } ================================================ FILE: apps/api/project.json ================================================ { "name": "api", "$schema": "../../node_modules/nx/schemas/project-schema.json", "sourceRoot": "apps/api/src", "projectType": "application", "tags": [], "targets": { "build": { "executor": "@nx/webpack:webpack", "options": { "outputPath": "dist/apps/api", "deleteOutputPath": false, "main": "apps/api/src/main.ts", "tsConfig": "apps/api/tsconfig.app.json", "generatePackageJson": true, "target": "node", "compiler": "tsc", "sourceMap": true, "webpackConfig": "apps/api/webpack.config.js", "assets": [ { "input": "apps/api/src/assets", "glob": "**/*", "output": "./assets/" } ] }, "configurations": { "production": { "optimization": true, "extractLicenses": true, "inspect": false } } }, "openapi": { "executor": "nx:run-commands", "cache": true, "inputs": [ "{projectRoot}/src/**/*.ts", "!{projectRoot}/src/**/*.spec.ts", "{projectRoot}/tsconfig.app.json", { "dependentTasksOutputFiles": "**/*", "transitive": true } ], "outputs": ["{workspaceRoot}/dist/apps/api/openapi.json", "{workspaceRoot}/dist/apps/api/openapi.3.1.0.json"], "options": { "commands": [ "mkdir -p dist/apps/api", "yarn ts-node apps/api/src/generate-openapi.ts -o dist/apps/api/openapi.json" ], "parallel": false, "env": { "TS_NODE_PROJECT": "apps/api/tsconfig.app.json", "NODE_OPTIONS": "--require tsconfig-paths/register", "SKIP_CONNECTIONS": "true" } } }, "serve": { "executor": "@nx/js:node", "defaultConfiguration": "development", "dependsOn": ["build"], "options": { "buildTarget": "api:build", "runBuildTargetDependencies": false, "watch": true }, "configurations": { "development": { "buildTarget": "api:build:development" }, "production": { "buildTarget": "api:build:production" } } }, "format": { "executor": "nx:run-commands", "options": { "command": "cd {projectRoot} && prettier --write \"**/*.{ts,json,mjs}\" --config ../../.prettierrc" } }, "test": { "options": { "passWithNoTests": true } }, "check-version-env": {}, "docker": { "options": { "target": "daytona" }, "dependsOn": [ { "target": "build-amd64", "projects": "runner" } ] }, "push-manifest": {}, "lint": { "executor": "nx:run-commands", "options": { "command": "bash apps/api/scripts/validate-migration-paths.sh $(find apps/api/src/migrations -name '*-migration.ts' -type f)" } } } } ================================================ FILE: apps/api/scripts/validate-migration-paths.sh ================================================ #!/bin/bash # Copyright Daytona Platforms Inc. # SPDX-License-Identifier: AGPL-3.0 # Fails if any migration file is placed directly under # apps/api/src/migrations/ instead of pre-deploy/ or post-deploy/. # Legacy migrations (timestamp <= LEGACY_CUTOFF) are excluded. # Exit on error set -e LEGACY_CUTOFF=1770880371265 forbidden=() for f in "$@"; do rel="$(realpath --relative-to="$PWD" "$f")" case "$rel" in apps/api/src/migrations/pre-deploy/*-migration.ts) ;; apps/api/src/migrations/post-deploy/*-migration.ts) ;; apps/api/src/migrations/*/*-migration.ts) forbidden+=("$rel") ;; apps/api/src/migrations/*-migration.ts) timestamp=$(basename "$rel" | grep -oP '^\d+') if [ -n "$timestamp" ] && [ "$timestamp" -le "$LEGACY_CUTOFF" ]; then continue fi forbidden+=("$rel") ;; esac done if [ ${#forbidden[@]} -gt 0 ]; then echo "Migration files must be placed in one of:" >&2 echo " - apps/api/src/migrations/pre-deploy/" >&2 echo " - apps/api/src/migrations/post-deploy/" >&2 echo "" >&2 echo "Invalid paths:" >&2 for p in "${forbidden[@]}"; do echo " - $p" >&2 done echo "" >&2 echo "See apps/api/src/migrations/README.md for more information." >&2 exit 1 fi ================================================ FILE: apps/api/src/admin/admin.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { AdminRunnerController } from './controllers/runner.controller' import { AdminSandboxController } from './controllers/sandbox.controller' import { SandboxModule } from '../sandbox/sandbox.module' import { RegionModule } from '../region/region.module' import { OrganizationModule } from '../organization/organization.module' @Module({ imports: [SandboxModule, RegionModule, OrganizationModule], controllers: [AdminRunnerController, AdminSandboxController], }) export class AdminModule {} ================================================ FILE: apps/api/src/admin/controllers/runner.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Delete, Get, HttpCode, NotFoundException, Param, ParseUUIDPipe, Patch, Post, Query, UseGuards, } from '@nestjs/common' import { ApiBearerAuth, ApiOAuth2, ApiOperation, ApiParam, ApiQuery, ApiResponse, ApiTags } from '@nestjs/swagger' import { AdminCreateRunnerDto } from '../dto/create-runner.dto' import { Audit, MASKED_AUDIT_VALUE, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredApiRole } from '../../common/decorators/required-role.decorator' import { RegionService } from '../../region/services/region.service' import { CreateRunnerResponseDto } from '../../sandbox/dto/create-runner-response.dto' import { RunnerFullDto } from '../../sandbox/dto/runner-full.dto' import { RunnerDto } from '../../sandbox/dto/runner.dto' import { RunnerService } from '../../sandbox/services/runner.service' import { SystemRole } from '../../user/enums/system-role.enum' @ApiTags('admin') @Controller('admin/runners') @UseGuards(CombinedAuthGuard, SystemActionGuard) @RequiredApiRole([SystemRole.ADMIN]) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class AdminRunnerController { constructor( private readonly runnerService: RunnerService, private readonly regionService: RegionService, ) {} @Post() @HttpCode(201) @ApiOperation({ summary: 'Create runner', operationId: 'adminCreateRunner', }) @ApiResponse({ status: 201, type: CreateRunnerResponseDto, }) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.RUNNER, targetIdFromResult: (result: RunnerDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ domain: req.body?.domain, apiUrl: req.body?.apiUrl, proxyUrl: req.body?.proxyUrl, regionId: req.body?.regionId, name: req.body?.name, apiKey: MASKED_AUDIT_VALUE, apiVersion: req.body?.apiVersion, }), }, }) async create(@Body() createRunnerDto: AdminCreateRunnerDto): Promise { const region = await this.regionService.findOne(createRunnerDto.regionId) if (!region) { throw new NotFoundException('Region not found') } const { runner, apiKey } = await this.runnerService.create({ domain: createRunnerDto.domain, apiUrl: createRunnerDto.apiUrl, proxyUrl: createRunnerDto.proxyUrl, regionId: createRunnerDto.regionId, name: createRunnerDto.name, apiKey: createRunnerDto.apiKey, apiVersion: createRunnerDto.apiVersion, cpu: createRunnerDto.cpu, memoryGiB: createRunnerDto.memoryGiB, diskGiB: createRunnerDto.diskGiB, }) return CreateRunnerResponseDto.fromRunner(runner, apiKey) } @Get(':id') @HttpCode(200) @ApiOperation({ summary: 'Get runner by ID', operationId: 'adminGetRunnerById', }) @ApiResponse({ status: 200, type: RunnerFullDto, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) async getRunnerById(@Param('id', ParseUUIDPipe) id: string): Promise { return this.runnerService.findOneFullOrFail(id) } @Get() @HttpCode(200) @ApiOperation({ summary: 'List all runners', operationId: 'adminListRunners', }) @ApiResponse({ status: 200, type: [RunnerFullDto], }) @ApiQuery({ name: 'regionId', description: 'Filter runners by region ID', type: String, required: false, }) async findAll(@Query('regionId') regionId?: string): Promise { if (regionId) { return this.runnerService.findAllByRegionFull(regionId) } return this.runnerService.findAllFull() } @Patch(':id/scheduling') @HttpCode(200) @ApiOperation({ summary: 'Update runner scheduling status', operationId: 'adminUpdateRunnerScheduling', }) @ApiResponse({ status: 204, }) @Audit({ action: AuditAction.UPDATE_SCHEDULING, targetType: AuditTarget.RUNNER, targetIdFromRequest: (req) => req.params.id, requestMetadata: { body: (req: TypedRequest<{ unschedulable: boolean }>) => ({ unschedulable: req.body?.unschedulable, }), }, }) async updateSchedulingStatus( @Param('id', ParseUUIDPipe) id: string, @Body('unschedulable') unschedulable: boolean, ): Promise { await this.runnerService.updateSchedulingStatus(id, unschedulable) } @Delete(':id') @HttpCode(204) @ApiOperation({ summary: 'Delete runner', operationId: 'adminDeleteRunner', }) @ApiResponse({ status: 204, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.RUNNER, targetIdFromRequest: (req) => req.params.id, }) async delete(@Param('id', ParseUUIDPipe) id: string): Promise { return this.runnerService.remove(id) } } ================================================ FILE: apps/api/src/admin/controllers/sandbox.controller.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, HttpCode, NotFoundException, Param, Post, UseGuards } from '@nestjs/common' import { ApiBearerAuth, ApiOAuth2, ApiOperation, ApiParam, ApiResponse, ApiTags } from '@nestjs/swagger' import { Audit } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredApiRole } from '../../common/decorators/required-role.decorator' import { OrganizationService } from '../../organization/services/organization.service' import { SandboxDto } from '../../sandbox/dto/sandbox.dto' import { SandboxService } from '../../sandbox/services/sandbox.service' import { SystemRole } from '../../user/enums/system-role.enum' @ApiTags('admin') @Controller('admin/sandbox') @UseGuards(CombinedAuthGuard, SystemActionGuard) @RequiredApiRole([SystemRole.ADMIN]) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class AdminSandboxController { constructor( private readonly sandboxService: SandboxService, private readonly organizationService: OrganizationService, ) {} @Post(':sandboxId/recover') @HttpCode(200) @ApiOperation({ summary: 'Recover sandbox from error state as an admin', operationId: 'adminRecoverSandbox', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Recovery initiated', type: SandboxDto, }) @Audit({ action: AuditAction.RECOVER, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, targetIdFromResult: (result: SandboxDto) => result?.id, }) async recoverSandbox(@Param('sandboxId') sandboxId: string): Promise { const organization = await this.organizationService.findBySandboxId(sandboxId) if (!organization) { throw new NotFoundException('Sandbox not found') } const recoveredSandbox = await this.sandboxService.recover(sandboxId, organization) return this.sandboxService.toSandboxDto(recoveredSandbox) } } ================================================ FILE: apps/api/src/admin/dto/create-runner.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsNumber, IsOptional, IsString } from 'class-validator' import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { CreateRunnerDto } from '../../sandbox/dto/create-runner.dto' @ApiSchema({ name: 'AdminCreateRunner' }) export class AdminCreateRunnerDto extends CreateRunnerDto { @IsString() @ApiProperty() apiKey: string @IsString() @ApiProperty({ description: 'The api version of the runner to create', pattern: '^(0|2)$', example: '2', }) apiVersion: '0' | '2' @ApiProperty({ required: false, description: 'The domain of the runner', example: 'runner1.example.com', }) @IsString() @IsOptional() domain?: string @IsString() @ApiProperty({ description: 'The API URL of the runner', example: 'https://api.runner1.example.com', required: false, }) @IsOptional() apiUrl?: string @IsString() @ApiProperty({ description: 'The proxy URL of the runner', example: 'https://proxy.runner1.example.com', required: false, }) @IsOptional() proxyUrl?: string @IsNumber() @ApiProperty({ description: 'The CPU capacity of the runner', example: 8, required: false, }) @IsOptional() cpu?: number @IsNumber() @ApiProperty({ description: 'The memory capacity of the runner in GiB', example: 16, required: false, }) @IsOptional() memoryGiB?: number @IsNumber() @ApiProperty({ description: 'The disk capacity of the runner in GiB', example: 100, required: false, }) @IsOptional() diskGiB?: number } ================================================ FILE: apps/api/src/analytics/analytics.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { AnalyticsService } from './services/analytics.service' @Module({ imports: [TypeOrmModule.forFeature([AnalyticsService])], providers: [AnalyticsService], exports: [AnalyticsService], }) export class AnalyticsModule {} ================================================ FILE: apps/api/src/analytics/services/analytics.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { OnEvent } from '@nestjs/event-emitter' import { SandboxEvents } from '../../sandbox/constants/sandbox-events.constants' import { SandboxCreatedEvent } from '../../sandbox/events/sandbox-create.event' import { SandboxDesiredStateUpdatedEvent } from '../../sandbox/events/sandbox-desired-state-updated.event' import { SandboxDestroyedEvent } from '../../sandbox/events/sandbox-destroyed.event' import { SandboxPublicStatusUpdatedEvent } from '../../sandbox/events/sandbox-public-status-updated.event' import { SandboxStartedEvent } from '../../sandbox/events/sandbox-started.event' import { SandboxStateUpdatedEvent } from '../../sandbox/events/sandbox-state-updated.event' import { SandboxStoppedEvent } from '../../sandbox/events/sandbox-stopped.event' import { PostHog } from 'posthog-node' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { Organization } from '../../organization/entities/organization.entity' import { OrganizationEvents } from '../../organization/constants/organization-events.constant' import { TypedConfigService } from '../../config/typed-config.service' @Injectable() export class AnalyticsService { private readonly logger = new Logger(AnalyticsService.name) private readonly posthog?: PostHog constructor(private readonly configService: TypedConfigService) { if (!this.configService.get('posthog.apiKey')) { return } if (!this.configService.get('posthog.host')) { return } // Initialize PostHog client this.posthog = new PostHog(this.configService.get('posthog.apiKey'), { host: this.configService.get('posthog.host'), }) } @OnEvent(SandboxEvents.CREATED) async handleSandboxCreatedEvent(event: SandboxCreatedEvent) { this.logger.debug(`Sandbox created: ${JSON.stringify(event)}`) } @OnEvent(SandboxEvents.STARTED) async handleSandboxStartedEvent(event: SandboxStartedEvent) { this.logger.debug(`Sandbox started: ${JSON.stringify(event)}`) } @OnEvent(SandboxEvents.STOPPED) async handleSandboxStoppedEvent(event: SandboxStoppedEvent) { this.logger.debug(`Sandbox stopped: ${JSON.stringify(event)}`) } @OnEvent(SandboxEvents.DESTROYED) async handleSandboxDestroyedEvent(event: SandboxDestroyedEvent) { this.logger.debug(`Sandbox destroyed: ${JSON.stringify(event)}`) } @OnEvent(SandboxEvents.PUBLIC_STATUS_UPDATED) async handleSandboxPublicStatusUpdatedEvent(event: SandboxPublicStatusUpdatedEvent) { this.logger.debug(`Sandbox public status updated: ${JSON.stringify(event)}`) } @OnEvent(SandboxEvents.DESIRED_STATE_UPDATED) async handleSandboxDesiredStateUpdatedEvent(event: SandboxDesiredStateUpdatedEvent) { this.logger.debug(`Sandbox desired state updated: ${JSON.stringify(event)}`) } @OnEvent(SandboxEvents.STATE_UPDATED) async handleSandboxStateUpdatedEvent(event: SandboxStateUpdatedEvent) { this.logger.debug(`Sandbox state updated: ${JSON.stringify(event)}`) } @OnAsyncEvent({ event: OrganizationEvents.CREATED, }) async handlePersonalOrganizationCreatedEvent(payload: Organization) { if (!payload.personal) { return } if (!this.posthog) { return } this.posthog.groupIdentify({ groupType: 'organization', groupKey: payload.id, properties: { name: `Personal - ${payload.createdBy}`, created_at: payload.createdAt, created_by: payload.createdBy, personal: payload.personal, environment: this.configService.get('posthog.environment'), }, }) } } ================================================ FILE: apps/api/src/api-key/api-key.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Post, Get, Delete, Param, Body, UseGuards, ForbiddenException, HttpCode } from '@nestjs/common' import { ApiKeyService } from './api-key.service' import { CreateApiKeyDto } from './dto/create-api-key.dto' import { ApiHeader, ApiOAuth2, ApiOperation, ApiResponse, ApiTags, ApiBearerAuth } from '@nestjs/swagger' import { ApiKeyResponseDto } from './dto/api-key-response.dto' import { ApiKeyListDto } from './dto/api-key-list.dto' import { CombinedAuthGuard } from '../auth/combined-auth.guard' import { CustomHeaders } from '../common/constants/header.constants' import { AuthContext } from '../common/decorators/auth-context.decorator' import { AuthContext as IAuthContext } from '../common/interfaces/auth-context.interface' import { OrganizationAuthContext } from '../common/interfaces/auth-context.interface' import { OrganizationMemberRole } from '../organization/enums/organization-member-role.enum' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../organization/guards/organization-resource-action.guard' import { SystemRole } from '../user/enums/system-role.enum' import { Audit, TypedRequest } from '../audit/decorators/audit.decorator' import { AuditAction } from '../audit/enums/audit-action.enum' import { AuditTarget } from '../audit/enums/audit-target.enum' import { ApiKey } from './api-key.entity' import { AuthenticatedRateLimitGuard } from '../common/guards/authenticated-rate-limit.guard' @ApiTags('api-keys') @Controller('api-keys') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class ApiKeyController { constructor(private readonly apiKeyService: ApiKeyService) {} @Post() @ApiOperation({ summary: 'Create API key', operationId: 'createApiKey', }) @ApiResponse({ status: 201, description: 'API key created successfully.', type: ApiKeyResponseDto, }) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.API_KEY, targetIdFromResult: (result: ApiKeyResponseDto) => result?.name, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, permissions: req.body?.permissions, expiresAt: req.body?.expiresAt, }), }, }) async createApiKey( @AuthContext() authContext: OrganizationAuthContext, @Body() createApiKeyDto: CreateApiKeyDto, ): Promise { this.validateRequestedApiKeyPermissions(authContext, createApiKeyDto.permissions) const { apiKey, value } = await this.apiKeyService.createApiKey( authContext.organizationId, authContext.userId, createApiKeyDto.name, createApiKeyDto.permissions, createApiKeyDto.expiresAt, ) return ApiKeyResponseDto.fromApiKey(apiKey, value) } @Get() @ApiOperation({ summary: 'List API keys', operationId: 'listApiKeys', }) @ApiResponse({ status: 200, description: 'API keys retrieved successfully.', type: [ApiKeyListDto], }) @ApiResponse({ status: 500, description: 'Error fetching API keys.' }) async getApiKeys(@AuthContext() authContext: OrganizationAuthContext): Promise { let apiKeys: ApiKey[] = [] if (authContext.role === SystemRole.ADMIN || authContext.organizationUser?.role === OrganizationMemberRole.OWNER) { apiKeys = await this.apiKeyService.getApiKeys(authContext.organizationId) } else { apiKeys = await this.apiKeyService.getApiKeys(authContext.organizationId, authContext.userId) } return apiKeys.map((apiKey) => ApiKeyListDto.fromApiKey(apiKey)) } @Get('current') @ApiOperation({ summary: "Get current API key's details", operationId: 'getCurrentApiKey', }) @ApiResponse({ status: 200, description: 'API key retrieved successfully.', type: ApiKeyListDto, }) async getCurrentApiKey(@AuthContext() authContext: IAuthContext): Promise { if (!authContext.apiKey) { throw new ForbiddenException('Authenticate with an API key to use this endpoint') } return ApiKeyListDto.fromApiKey(authContext.apiKey) } @Get(':name') @ApiOperation({ summary: 'Get API key', operationId: 'getApiKey', }) @ApiResponse({ status: 200, description: 'API key retrieved successfully.', type: ApiKeyListDto, }) async getApiKey( @AuthContext() authContext: OrganizationAuthContext, @Param('name') name: string, ): Promise { const apiKey = await this.apiKeyService.getApiKeyByName(authContext.organizationId, authContext.userId, name) return ApiKeyListDto.fromApiKey(apiKey) } @Delete(':name') @ApiOperation({ summary: 'Delete API key', operationId: 'deleteApiKey', }) @ApiResponse({ status: 204, description: 'API key deleted successfully.' }) @HttpCode(204) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.API_KEY, targetIdFromRequest: (req) => req.params.name, }) async deleteApiKey(@AuthContext() authContext: OrganizationAuthContext, @Param('name') name: string) { await this.apiKeyService.deleteApiKey(authContext.organizationId, authContext.userId, name) } @Delete(':userId/:name') @ApiOperation({ summary: 'Delete API key for user', operationId: 'deleteApiKeyForUser', }) @ApiResponse({ status: 204, description: 'API key deleted successfully.' }) @HttpCode(204) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.API_KEY, targetIdFromRequest: (req) => req.params.name, requestMetadata: { params: (req) => ({ userId: req.params.userId, }), }, }) async deleteApiKeyForUser( @AuthContext() authContext: OrganizationAuthContext, @Param('userId') userId: string, @Param('name') name: string, ) { if ( userId !== authContext.userId && authContext.role !== SystemRole.ADMIN && authContext.organizationUser?.role !== OrganizationMemberRole.OWNER ) { throw new ForbiddenException('Incorrect user ID provided') } await this.apiKeyService.deleteApiKey(authContext.organizationId, userId, name) } private validateRequestedApiKeyPermissions( authContext: OrganizationAuthContext, requestedPermissions: OrganizationResourcePermission[], ): void { if (authContext.role === SystemRole.ADMIN) { return } if (!authContext.organizationUser) { throw new ForbiddenException(`Insufficient permissions for assigning: ${requestedPermissions.join(', ')}`) } if (authContext.organizationUser.role === OrganizationMemberRole.OWNER) { return } const organizationUserPermissions = new Set( authContext.organizationUser.assignedRoles.flatMap((role) => role.permissions), ) const forbiddenPermissions = requestedPermissions.filter( (permission) => !organizationUserPermissions.has(permission), ) if (forbiddenPermissions.length) { throw new ForbiddenException(`Insufficient permissions for assigning: ${forbiddenPermissions.join(', ')}`) } } } ================================================ FILE: apps/api/src/api-key/api-key.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, Entity, Index, PrimaryColumn } from 'typeorm' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' @Entity() @Index('api_key_org_user_idx', ['organizationId', 'userId']) export class ApiKey { @PrimaryColumn({ type: 'uuid', }) organizationId: string @PrimaryColumn() userId: string @PrimaryColumn() name: string @Column({ unique: true, default: '' }) keyHash: string @Column({ default: '', }) keyPrefix: string @Column({ default: '', }) keySuffix: string @Column({ type: 'enum', enum: OrganizationResourcePermission, array: true, }) permissions: OrganizationResourcePermission[] @Column() createdAt: Date @Column({ nullable: true }) lastUsedAt?: Date @Column({ nullable: true }) expiresAt?: Date } ================================================ FILE: apps/api/src/api-key/api-key.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { ApiKeyController } from './api-key.controller' import { ApiKeyService } from './api-key.service' import { ApiKey } from './api-key.entity' import { TypeOrmModule } from '@nestjs/typeorm' import { OrganizationModule } from '../organization/organization.module' import { RedisLockProvider } from '../sandbox/common/redis-lock.provider' @Module({ imports: [OrganizationModule, TypeOrmModule.forFeature([ApiKey])], controllers: [ApiKeyController], providers: [ApiKeyService, RedisLockProvider], exports: [ApiKeyService], }) export class ApiKeyModule {} ================================================ FILE: apps/api/src/api-key/api-key.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ConflictException, Injectable, Logger, NotFoundException } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { EntityManager, Repository, ArrayOverlap } from 'typeorm' import { ApiKey } from './api-key.entity' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' import { RedisLockProvider } from '../sandbox/common/redis-lock.provider' import { OnAsyncEvent } from '../common/decorators/on-async-event.decorator' import { OrganizationEvents } from '../organization/constants/organization-events.constant' import { OrganizationResourcePermissionsUnassignedEvent } from '../organization/events/organization-resource-permissions-unassigned.event' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { generateApiKeyHash, generateApiKeyValue } from '../common/utils/api-key' @Injectable() export class ApiKeyService { private readonly logger = new Logger(ApiKeyService.name) constructor( @InjectRepository(ApiKey) private apiKeyRepository: Repository, private readonly redisLockProvider: RedisLockProvider, @InjectRedis() private readonly redis: Redis, ) {} private getApiKeyPrefix(value: string): string { return value.substring(0, 3) } private getApiKeySuffix(value: string): string { return value.slice(-3) } async createApiKey( organizationId: string, userId: string, name: string, permissions: OrganizationResourcePermission[], expiresAt?: Date, apiKeyValue?: string, ): Promise<{ apiKey: ApiKey; value: string }> { const existingKey = await this.apiKeyRepository.findOne({ where: { organizationId, userId, name } }) if (existingKey) { throw new ConflictException('API key with this name already exists') } const value = apiKeyValue || generateApiKeyValue() const apiKey = await this.apiKeyRepository.save({ organizationId, userId, name, keyHash: generateApiKeyHash(value), keyPrefix: this.getApiKeyPrefix(value), keySuffix: this.getApiKeySuffix(value), permissions, createdAt: new Date(), expiresAt, }) return { apiKey, value } } async getApiKeys(organizationId: string, userId?: string): Promise { const apiKeys = await this.apiKeyRepository.find({ where: { organizationId, userId }, order: { lastUsedAt: { direction: 'DESC', nulls: 'LAST', }, createdAt: 'DESC', }, }) return apiKeys } async getApiKeyByName(organizationId: string, userId: string, name: string): Promise { const apiKey = await this.apiKeyRepository.findOne({ where: { organizationId, userId, name, }, }) if (!apiKey) { throw new NotFoundException('API key not found') } return apiKey } async getApiKeyByValue(value: string): Promise { const apiKey = await this.apiKeyRepository.findOne({ where: { keyHash: generateApiKeyHash(value), }, }) if (!apiKey) { throw new NotFoundException('API key not found') } return apiKey } async deleteApiKey(organizationId: string, userId: string, name: string): Promise { const apiKey = await this.apiKeyRepository.findOne({ where: { organizationId, userId, name } }) if (!apiKey) { throw new NotFoundException('API key not found') } await this.deleteWithEntityManager(this.apiKeyRepository.manager, apiKey) } async updateLastUsedAt(organizationId: string, userId: string, name: string, lastUsedAt: Date): Promise { const cooldownKey = `api-key-last-used-update-${organizationId}-${userId}-${name}` const aquired = await this.redisLockProvider.lock(cooldownKey, 10) // redis for cooldown period - 10 seconds // prevents database flooding when multiple requests are made at the same time if (!aquired) { return } await this.apiKeyRepository.update( { organizationId, userId, name, }, { lastUsedAt }, ) } private async deleteWithEntityManager(entityManager: EntityManager, apiKey: ApiKey): Promise { await entityManager.remove(apiKey) // Invalidate cache when API key is deleted await this.invalidateApiKeyCache(apiKey.keyHash) } private async invalidateApiKeyCache(keyHash: string): Promise { try { const cacheKey = `api-key:validation:${keyHash}` await this.redis.del(cacheKey) this.logger.debug(`Invalidated cache for API key: ${cacheKey}`) } catch (error) { this.logger.error('Error invalidating API key cache:', error) } } @OnAsyncEvent({ event: OrganizationEvents.PERMISSIONS_UNASSIGNED, }) async handleOrganizationResourcePermissionsUnassignedEvent( payload: OrganizationResourcePermissionsUnassignedEvent, ): Promise { const apiKeysToRevoke = await this.apiKeyRepository.find({ where: { organizationId: payload.organizationId, userId: payload.userId, permissions: ArrayOverlap(payload.unassignedPermissions), }, }) await Promise.all(apiKeysToRevoke.map((apiKey) => this.deleteWithEntityManager(payload.entityManager, apiKey))) } } ================================================ FILE: apps/api/src/api-key/dto/api-key-list.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { ApiKey } from '../api-key.entity' @ApiSchema({ name: 'ApiKeyList' }) export class ApiKeyListDto { @ApiProperty({ description: 'The name of the API key', example: 'My API Key', }) name: string @ApiProperty({ description: 'The masked API key value', example: 'bb_********************def', }) value: string @ApiProperty({ description: 'When the API key was created', example: '2024-03-14T12:00:00.000Z', }) createdAt: Date @ApiProperty({ description: 'The list of organization resource permissions assigned to the API key', enum: OrganizationResourcePermission, isArray: true, }) permissions: OrganizationResourcePermission[] @ApiProperty({ description: 'When the API key was last used', example: '2024-03-14T12:00:00.000Z', nullable: true, }) lastUsedAt?: Date @ApiProperty({ description: 'When the API key expires', example: '2024-03-14T12:00:00.000Z', nullable: true, }) expiresAt?: Date @ApiProperty({ description: 'The user ID of the user who created the API key', example: '123', }) userId: string constructor(partial: Partial) { Object.assign(this, partial) } static fromApiKey(apiKey: ApiKey): ApiKeyListDto { const maskedValue = `${apiKey.keyPrefix}********************${apiKey.keySuffix}` return new ApiKeyListDto({ name: apiKey.name, value: maskedValue, createdAt: apiKey.createdAt, permissions: apiKey.permissions, lastUsedAt: apiKey.lastUsedAt, expiresAt: apiKey.expiresAt, userId: apiKey.userId, }) } } ================================================ FILE: apps/api/src/api-key/dto/api-key-response.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { ApiKey } from '../api-key.entity' @ApiSchema({ name: 'ApiKeyResponse' }) export class ApiKeyResponseDto { @ApiProperty({ description: 'The name of the API key', example: 'My API Key', }) name: string @ApiProperty({ description: 'The API key value', example: 'bb_sk_1234567890abcdef', }) value: string @ApiProperty({ description: 'When the API key was created', example: '2024-03-14T12:00:00.000Z', }) createdAt: Date @ApiProperty({ description: 'The list of organization resource permissions assigned to the API key', enum: OrganizationResourcePermission, isArray: true, }) permissions: OrganizationResourcePermission[] @ApiProperty({ description: 'When the API key expires', example: '2025-06-09T12:00:00.000Z', nullable: true, }) expiresAt?: Date static fromApiKey(apiKey: ApiKey, value: string): ApiKeyResponseDto { return { name: apiKey.name, value, createdAt: apiKey.createdAt, permissions: apiKey.permissions, expiresAt: apiKey.expiresAt, } } } ================================================ FILE: apps/api/src/api-key/dto/create-api-key.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsArray, IsDate, IsEnum, IsNotEmpty, IsOptional, IsString } from 'class-validator' import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { Type } from 'class-transformer' @ApiSchema({ name: 'CreateApiKey' }) export class CreateApiKeyDto { @ApiProperty({ description: 'The name of the API key', example: 'My API Key', required: true, }) @IsNotEmpty() @IsString() name: string @ApiProperty({ description: 'The list of organization resource permissions explicitly assigned to the API key', enum: OrganizationResourcePermission, isArray: true, required: true, }) @IsArray() @IsEnum(OrganizationResourcePermission, { each: true }) permissions: OrganizationResourcePermission[] @ApiPropertyOptional({ description: 'When the API key expires', example: '2025-06-09T12:00:00.000Z', nullable: true, }) @IsOptional() @Type(() => Date) @IsDate() expiresAt?: Date } ================================================ FILE: apps/api/src/app.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module, NestModule, MiddlewareConsumer, RequestMethod, ExecutionContext } from '@nestjs/common' import { VersionHeaderMiddleware } from './common/middleware/version-header.middleware' import { FailedAuthRateLimitMiddleware } from './common/middleware/failed-auth-rate-limit.middleware' import { AppService } from './app.service' import { UserModule } from './user/user.module' import { TypeOrmModule } from '@nestjs/typeorm' import { SandboxModule } from './sandbox/sandbox.module' import { AuthModule } from './auth/auth.module' import { ServeStaticModule } from '@nestjs/serve-static' import { join } from 'path' import { ApiKeyModule } from './api-key/api-key.module' import { seconds, ThrottlerModule } from '@nestjs/throttler' import { DockerRegistryModule } from './docker-registry/docker-registry.module' import { RedisModule, getRedisConnectionToken } from '@nestjs-modules/ioredis' import { ScheduleModule } from '@nestjs/schedule' import { EventEmitterModule } from '@nestjs/event-emitter' import { UsageModule } from './usage/usage.module' import { AnalyticsModule } from './analytics/analytics.module' import { OrganizationModule } from './organization/organization.module' import { EmailModule } from './email/email.module' import { TypedConfigService } from './config/typed-config.service' import { TypedConfigModule } from './config/typed-config.module' import { NotificationModule } from './notification/notification.module' import { WebhookModule } from './webhook/webhook.module' import { ObjectStorageModule } from './object-storage/object-storage.module' import { CustomNamingStrategy } from './common/utils/naming-strategy.util' import { MaintenanceMiddleware } from './common/middleware/maintenance.middleware' import { AuditModule } from './audit/audit.module' import { HealthModule } from './health/health.module' import { OpenFeatureModule } from '@openfeature/nestjs-sdk' import { OpenFeaturePostHogProvider } from './common/providers/openfeature-posthog.provider' import { LoggerModule } from 'nestjs-pino' import { getPinoTransport, swapMessageAndObject } from './common/utils/pino.util' import { Redis } from 'ioredis' import { ThrottlerStorageRedisService } from '@nest-lab/throttler-storage-redis' import { RegionModule } from './region/region.module' import { BodyParserErrorModule } from './common/modules/body-parser-error.module' import { AdminModule } from './admin/admin.module' import { ClickHouseModule } from './clickhouse/clickhouse.module' import { SandboxTelemetryModule } from './sandbox-telemetry/sandbox-telemetry.module' @Module({ imports: [ LoggerModule.forRootAsync({ useFactory: (configService: TypedConfigService) => { const logConfig = configService.get('log') const isProduction = configService.get('production') return { pinoHttp: { autoLogging: logConfig.requests.enabled, level: logConfig.level, hooks: { logMethod: swapMessageAndObject, }, quietReqLogger: true, transport: getPinoTransport(isProduction, logConfig), }, } }, inject: [TypedConfigService], }), TypedConfigModule.forRoot({ isGlobal: true, }), TypeOrmModule.forRootAsync({ inject: [TypedConfigService], useFactory: (configService: TypedConfigService) => { return { type: 'postgres', host: configService.getOrThrow('database.host'), port: configService.getOrThrow('database.port'), username: configService.getOrThrow('database.username'), password: configService.getOrThrow('database.password'), database: configService.getOrThrow('database.database'), autoLoadEntities: true, migrations: [join(__dirname, 'migrations/**/*-migration.{ts,js}')], migrationsRun: configService.get('runMigrations') || !configService.getOrThrow('production'), namingStrategy: new CustomNamingStrategy(), manualInitialization: configService.get('skipConnections'), ssl: configService.get('database.tls.enabled') ? { rejectUnauthorized: configService.get('database.tls.rejectUnauthorized'), } : undefined, extra: { max: configService.get('database.pool.max'), min: configService.get('database.pool.min'), idleTimeoutMillis: configService.get('database.pool.idleTimeoutMillis'), connectionTimeoutMillis: configService.get('database.pool.connectionTimeoutMillis'), }, cache: { type: 'ioredis', ignoreErrors: true, options: configService.getRedisConfig({ keyPrefix: 'typeorm:' }), }, entitySkipConstructor: true, } }, }), BodyParserErrorModule, ServeStaticModule.forRoot({ rootPath: join(__dirname, '..'), exclude: ['/api/{*path}'], renderPath: '/runner-amd64', serveStaticOptions: { cacheControl: false, }, }), ServeStaticModule.forRoot({ rootPath: join(__dirname, '..', 'dashboard'), exclude: ['/api/{*path}'], renderPath: '/', serveStaticOptions: { cacheControl: false, }, }), RedisModule.forRootAsync({ inject: [TypedConfigService], useFactory: (configService: TypedConfigService) => ({ type: 'single', options: configService.getRedisConfig(), }), }), RedisModule.forRootAsync( { inject: [TypedConfigService], useFactory: (configService: TypedConfigService) => ({ type: 'single', options: configService.getRedisConfig({ db: 1 }), }), }, 'throttler', ), ThrottlerModule.forRootAsync({ useFactory: async (redis: Redis, configService: TypedConfigService) => { const rateLimit = configService.get('rateLimit') const throttlers = [ { name: 'anonymous', config: rateLimit.anonymous }, { name: 'failed-auth', config: rateLimit.failedAuth }, { name: 'authenticated', config: rateLimit.authenticated }, { name: 'sandbox-create', config: rateLimit.sandboxCreate }, { name: 'sandbox-lifecycle', config: rateLimit.sandboxLifecycle }, ] .filter(({ config }) => config.ttl !== undefined && config.limit !== undefined) .map(({ name, config }) => ({ name, ttl: seconds(config.ttl), limit: config.limit, })) return { throttlers, storage: new ThrottlerStorageRedisService(redis), } }, inject: [getRedisConnectionToken('throttler'), TypedConfigService], }), EventEmitterModule.forRoot({ maxListeners: 100, }), ApiKeyModule, AuthModule, UserModule, SandboxModule, DockerRegistryModule, ScheduleModule.forRoot(), UsageModule, AnalyticsModule, OrganizationModule, RegionModule, AdminModule, EmailModule.forRootAsync({ inject: [TypedConfigService], useFactory: (configService: TypedConfigService) => { return { host: configService.get('smtp.host'), port: configService.get('smtp.port'), user: configService.get('smtp.user'), password: configService.get('smtp.password'), secure: configService.get('smtp.secure'), from: configService.get('smtp.from'), dashboardUrl: configService.getOrThrow('dashboardUrl'), } }, }), NotificationModule, WebhookModule, ObjectStorageModule, AuditModule, HealthModule, ClickHouseModule, SandboxTelemetryModule, OpenFeatureModule.forRoot({ contextFactory: (request: ExecutionContext) => { const req = request.switchToHttp().getRequest() return { targetingKey: req.user?.userId, organizationId: req.user?.organizationId, } }, defaultProvider: new OpenFeaturePostHogProvider({ clientOptions: { host: process.env.POSTHOG_HOST, }, apiKey: process.env.POSTHOG_API_KEY, }), }), ], controllers: [], providers: [AppService], }) export class AppModule implements NestModule { configure(consumer: MiddlewareConsumer) { consumer.apply(VersionHeaderMiddleware).forRoutes({ path: '{*path}', method: RequestMethod.ALL }) consumer.apply(FailedAuthRateLimitMiddleware).forRoutes({ path: '{*path}', method: RequestMethod.ALL }) consumer.apply(MaintenanceMiddleware).forRoutes({ path: '{*path}', method: RequestMethod.ALL }) } } ================================================ FILE: apps/api/src/app.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnApplicationBootstrap, OnApplicationShutdown } from '@nestjs/common' import { DockerRegistryService } from './docker-registry/services/docker-registry.service' import { RegistryType } from './docker-registry/enums/registry-type.enum' import { OrganizationService } from './organization/services/organization.service' import { UserService } from './user/user.service' import { ApiKeyService } from './api-key/api-key.service' import { EventEmitterReadinessWatcher } from '@nestjs/event-emitter' import { SnapshotService } from './sandbox/services/snapshot.service' import { SystemRole } from './user/enums/system-role.enum' import { TypedConfigService } from './config/typed-config.service' import { SchedulerRegistry } from '@nestjs/schedule' import { RegionService } from './region/services/region.service' import { RunnerService } from './sandbox/services/runner.service' import { RunnerAdapterFactory } from './sandbox/runner-adapter/runnerAdapter' import { RegionType } from './region/enums/region-type.enum' import { RunnerState } from './sandbox/enums/runner-state.enum' export const DAYTONA_ADMIN_USER_ID = 'daytona-admin' @Injectable() export class AppService implements OnApplicationBootstrap, OnApplicationShutdown { private readonly logger = new Logger(AppService.name) constructor( private readonly dockerRegistryService: DockerRegistryService, private readonly configService: TypedConfigService, private readonly userService: UserService, private readonly organizationService: OrganizationService, private readonly apiKeyService: ApiKeyService, private readonly eventEmitterReadinessWatcher: EventEmitterReadinessWatcher, private readonly snapshotService: SnapshotService, private readonly schedulerRegistry: SchedulerRegistry, private readonly regionService: RegionService, private readonly runnerService: RunnerService, private readonly runnerAdapterFactory: RunnerAdapterFactory, ) {} async onApplicationShutdown(signal?: string) { this.logger.log(`Received shutdown signal: ${signal}. Shutting down gracefully...`) await this.stopAllCronJobs() } async onApplicationBootstrap() { if (this.configService.get('disableCronJobs') || this.configService.get('maintananceMode')) { await this.stopAllCronJobs() } await this.eventEmitterReadinessWatcher.waitUntilReady() await this.initializeDefaultRegion() await this.initializeAdminUser() await this.initializeTransientRegistry() await this.initializeBackupRegistry() await this.initializeInternalRegistry() await this.initializeBackupRegistry() // Default runner init is not awaited because v2 runners depend on the API to be ready this.initializeDefaultRunner() .then(() => this.initializeDefaultSnapshot()) .catch((error) => { this.logger.error('Error initializing default runner', error) }) } private async stopAllCronJobs(): Promise { for (const cronName of this.schedulerRegistry.getCronJobs().keys()) { this.logger.debug(`Stopping cron job: ${cronName}`) this.schedulerRegistry.deleteCronJob(cronName) } } private async initializeDefaultRegion(): Promise { const existingRegion = await this.regionService.findOne(this.configService.getOrThrow('defaultRegion.id')) if (existingRegion) { return } this.logger.log('Initializing default region...') await this.regionService.create( { id: this.configService.getOrThrow('defaultRegion.id'), name: this.configService.getOrThrow('defaultRegion.name'), enforceQuotas: this.configService.getOrThrow('defaultRegion.enforceQuotas'), regionType: RegionType.SHARED, }, null, ) this.logger.log(`Default region created successfully: ${this.configService.getOrThrow('defaultRegion.name')}`) } private async initializeDefaultRunner(): Promise { if (!this.configService.get('defaultRunner.name')) { return } const defaultRegionId = this.configService.getOrThrow('defaultRegion.id') const existingRunners = await this.runnerService.findAllByRegion(defaultRegionId) if ( existingRunners.length > 0 && existingRunners.some((r) => r.name === this.configService.get('defaultRunner.name')) ) { return } this.logger.log(`Creating default runner: ${this.configService.getOrThrow('defaultRunner.name')}`) const runnerVersion = this.configService.getOrThrow('defaultRunner.apiVersion') if (runnerVersion === '0') { const { runner } = await this.runnerService.create({ apiUrl: this.configService.getOrThrow('defaultRunner.apiUrl'), proxyUrl: this.configService.getOrThrow('defaultRunner.proxyUrl'), apiKey: this.configService.getOrThrow('defaultRunner.apiKey'), cpu: this.configService.getOrThrow('defaultRunner.cpu'), memoryGiB: this.configService.getOrThrow('defaultRunner.memory'), diskGiB: this.configService.getOrThrow('defaultRunner.disk'), regionId: this.configService.getOrThrow('defaultRegion.id'), domain: this.configService.getOrThrow('defaultRunner.domain'), apiVersion: runnerVersion, name: this.configService.getOrThrow('defaultRunner.name'), }) const runnerAdapter = await this.runnerAdapterFactory.create(runner) this.logger.log(`Waiting for runner ${runner.name} to be healthy...`) for (let i = 0; i < 30; i++) { try { await runnerAdapter.healthCheck() this.logger.log(`Runner ${runner.name} is healthy`) return } catch { // ignore } await new Promise((resolve) => setTimeout(resolve, 1000)) } } else if (runnerVersion === '2') { const { runner } = await this.runnerService.create({ apiKey: this.configService.getOrThrow('defaultRunner.apiKey'), regionId: this.configService.getOrThrow('defaultRegion.id'), apiVersion: runnerVersion, name: this.configService.getOrThrow('defaultRunner.name'), }) this.logger.log(`Waiting for runner ${runner.name} to be healthy...`) for (let i = 0; i < 30; i++) { const { state } = await this.runnerService.findOneFullOrFail(runner.id) if (state === RunnerState.READY) { this.logger.log(`Runner ${runner.name} is healthy`) return } await new Promise((resolve) => setTimeout(resolve, 1000)) } } this.logger.log( `Default runner ${this.configService.getOrThrow('defaultRunner.name')} created successfully but didn't pass health check`, ) } private async initializeAdminUser(): Promise { if (await this.userService.findOne(DAYTONA_ADMIN_USER_ID)) { return } const user = await this.userService.create({ id: DAYTONA_ADMIN_USER_ID, name: 'Daytona Admin', personalOrganizationQuota: { totalCpuQuota: this.configService.getOrThrow('admin.totalCpuQuota'), totalMemoryQuota: this.configService.getOrThrow('admin.totalMemoryQuota'), totalDiskQuota: this.configService.getOrThrow('admin.totalDiskQuota'), maxCpuPerSandbox: this.configService.getOrThrow('admin.maxCpuPerSandbox'), maxMemoryPerSandbox: this.configService.getOrThrow('admin.maxMemoryPerSandbox'), maxDiskPerSandbox: this.configService.getOrThrow('admin.maxDiskPerSandbox'), snapshotQuota: this.configService.getOrThrow('admin.snapshotQuota'), maxSnapshotSize: this.configService.getOrThrow('admin.maxSnapshotSize'), volumeQuota: this.configService.getOrThrow('admin.volumeQuota'), }, personalOrganizationDefaultRegionId: this.configService.getOrThrow('defaultRegion.id'), role: SystemRole.ADMIN, }) const personalOrg = await this.organizationService.findPersonal(user.id) const { value } = await this.apiKeyService.createApiKey( personalOrg.id, user.id, DAYTONA_ADMIN_USER_ID, [], undefined, this.configService.getOrThrow('admin.apiKey'), ) this.logger.log( ` ========================================= ========================================= Admin user created with API key: ${value} ========================================= =========================================`, ) } private async initializeTransientRegistry(): Promise { const existingRegistry = await this.dockerRegistryService.getAvailableTransientRegistry( this.configService.getOrThrow('defaultRegion.id'), ) if (existingRegistry) { return } const registryUrl = this.configService.getOrThrow('transientRegistry.url') const registryAdmin = this.configService.getOrThrow('transientRegistry.admin') const registryPassword = this.configService.getOrThrow('transientRegistry.password') const registryProjectId = this.configService.getOrThrow('transientRegistry.projectId') if (!registryUrl || !registryAdmin || !registryPassword || !registryProjectId) { this.logger.warn('Registry configuration not found, skipping transient registry setup') return } this.logger.log('Initializing default transient registry...') await this.dockerRegistryService.create({ name: 'Transient Registry', url: registryUrl, username: registryAdmin, password: registryPassword, project: registryProjectId, registryType: RegistryType.TRANSIENT, isDefault: true, }) this.logger.log('Default transient registry initialized successfully') } private async initializeInternalRegistry(): Promise { const existingRegistry = await this.dockerRegistryService.getAvailableInternalRegistry( this.configService.getOrThrow('defaultRegion.id'), ) if (existingRegistry) { return } const registryUrl = this.configService.getOrThrow('internalRegistry.url') const registryAdmin = this.configService.getOrThrow('internalRegistry.admin') const registryPassword = this.configService.getOrThrow('internalRegistry.password') const registryProjectId = this.configService.getOrThrow('internalRegistry.projectId') if (!registryUrl || !registryAdmin || !registryPassword || !registryProjectId) { this.logger.warn('Registry configuration not found, skipping internal registry setup') return } this.logger.log('Initializing default internal registry...') await this.dockerRegistryService.create({ name: 'Internal Registry', url: registryUrl, username: registryAdmin, password: registryPassword, project: registryProjectId, registryType: RegistryType.INTERNAL, isDefault: true, }) this.logger.log('Default internal registry initialized successfully') } private async initializeBackupRegistry(): Promise { const existingRegistry = await this.dockerRegistryService.getAvailableBackupRegistry( this.configService.getOrThrow('defaultRegion.id'), ) if (existingRegistry) { return } const registryUrl = this.configService.getOrThrow('internalRegistry.url') const registryAdmin = this.configService.getOrThrow('internalRegistry.admin') const registryPassword = this.configService.getOrThrow('internalRegistry.password') const registryProjectId = this.configService.getOrThrow('internalRegistry.projectId') if (!registryUrl || !registryAdmin || !registryPassword || !registryProjectId) { this.logger.warn('Registry configuration not found, skipping backup registry setup') return } this.logger.log('Initializing default backup registry...') await this.dockerRegistryService.create( { name: 'Backup Registry', url: registryUrl, username: registryAdmin, password: registryPassword, project: registryProjectId, registryType: RegistryType.BACKUP, isDefault: true, }, undefined, true, ) this.logger.log('Default backup registry initialized successfully') } private async initializeDefaultSnapshot(): Promise { const adminPersonalOrg = await this.organizationService.findPersonal(DAYTONA_ADMIN_USER_ID) try { const existingSnapshot = await this.snapshotService.getSnapshotByName( this.configService.getOrThrow('defaultSnapshot'), adminPersonalOrg.id, ) if (existingSnapshot) { return } } catch { this.logger.log('Default snapshot not found, creating...') } const defaultSnapshot = this.configService.getOrThrow('defaultSnapshot') await this.snapshotService.createFromPull( adminPersonalOrg, { name: defaultSnapshot, imageName: defaultSnapshot, }, true, ) this.logger.log('Default snapshot created successfully') } } ================================================ FILE: apps/api/src/assets/.gitkeep ================================================ ================================================ FILE: apps/api/src/assets/templates/organization-invitation.template.ejs ================================================ Daytona Organization Invitation
Daytona logo
 

<% if (invitedBy) { %><%= invitedBy %> has invited you to join the <%= organizationName %> organization<% } else { %>You have been invited to join the <%= organizationName %> organization<% } %>

 

This invitation expires on <%= expiresAt %>

 
Join <%= organizationName %>
 

Button not working? Paste the following link into your browser:
<%= invitationLink %>

 

* If you were not expecting this invitation, please disregard this email.

 

Copyright © <%= new Date().getFullYear() %> Daytona Platforms, Inc. All Rights Reserved

                                                           
================================================ FILE: apps/api/src/audit/adapters/audit-opensearch.adapter.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, Logger, OnModuleInit } from '@nestjs/common' import { errors } from '@opensearch-project/opensearch' import { AuditLog } from '../entities/audit-log.entity' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' import { AuditLogStorageAdapter } from '../interfaces/audit-storage.interface' import { AuditLogFilter } from '../interfaces/audit-filter.interface' import { TypedConfigService } from '../../config/typed-config.service' import { OpensearchClient } from 'nestjs-opensearch' import { PolicyEnvelope } from '@opensearch-project/opensearch/api/_types/ism._common.js' import { QueryContainer } from '@opensearch-project/opensearch/api/_types/_common.query_dsl.js' import { Bulk_RequestBody, Search_RequestBody, Search_Response } from '@opensearch-project/opensearch/api/index.js' import { TotalHits } from '@opensearch-project/opensearch/api/_types/_core.search.js' import { isMatch } from 'es-toolkit/compat' // Safe limit for offset-based pagination to avoid hitting OpenSearch's 10000 limit const MAX_OFFSET_PAGINATION_LIMIT = 10000 export class AuditOpenSearchStorageAdapter implements AuditLogStorageAdapter, OnModuleInit { private readonly logger = new Logger(AuditOpenSearchStorageAdapter.name) private indexName: string constructor( private readonly configService: TypedConfigService, private readonly client: OpensearchClient, ) { this.indexName = configService.getOrThrow('audit.publish.opensearchIndexName') } async onModuleInit(): Promise { await this.putIndexTemplate() await this.setupISM() await this.createDataStream() this.logger.log('OpenSearch audit storage adapter initialized') } async write(auditLogs: AuditLog[]): Promise { try { const documents = auditLogs.map((auditLog) => ({ '@timestamp': new Date(), // Required field for data streams ...auditLog, })) // Include document ID to prevent duplicates const bulkBody: Bulk_RequestBody = documents.flatMap((document) => [ { create: { _index: this.indexName, _id: document.id } }, document, ]) const response = await this.client.bulk({ body: bulkBody, refresh: false, }) if (response.body.errors) { const errors = response.body.items .filter((item: any) => item.create?.error) .map((item: any) => item.create.error) // Check if any errors are not 409 (idempotent errors are OK) or version conflicts (also idempotent) const nonIdempotentErrors = errors.filter( (error: any) => error.status !== 409 && error.type !== 'version_conflict_engine_exception', ) if (nonIdempotentErrors.length > 0) { throw new Error(`OpenSearch bulk operation failed: ${JSON.stringify(nonIdempotentErrors)}`) } } this.logger.debug(`Saved ${auditLogs.length} audit logs to OpenSearch`) } catch (error) { this.logger.error(`Failed to save audit log to OpenSearch: ${error.message}`) throw error } } async getAllLogs( page?: number, limit?: number, filters?: AuditLogFilter, nextToken?: string, ): Promise> { const query = this.buildDateRangeQuery(filters) const searchBody = this.buildSearchBody(query, page, limit, nextToken) const response = await this.executeSearch(searchBody) return this.processSearchResponse(response, page, limit, nextToken, query) } async getOrganizationLogs( organizationId: string, page?: number, limit?: number, filters?: AuditLogFilter, nextToken?: string, ): Promise> { if (!organizationId) { throw new Error('Organization ID is required') } const query = this.buildOrganizationQuery(organizationId, filters) const searchBody = this.buildSearchBody(query, page, limit, nextToken) const response = await this.executeSearch(searchBody) return this.processSearchResponse(response, page, limit, nextToken, query) } private async createDataStream() { try { await this.client.indices.createDataStream({ name: this.indexName }) this.logger.debug(`Created data stream: ${this.indexName}.`) } catch (error) { if (error instanceof errors.ResponseError && error.body.error.type === 'resource_already_exists_exception') { this.logger.debug(`Data stream already exists: ${this.indexName}. Skipping creation.`) return } throw error } } private async putIndexTemplate() { const templateName = `${this.indexName}-template` await this.client.indices.putIndexTemplate({ name: templateName, body: { index_patterns: [`${this.indexName}*`], data_stream: {}, template: { settings: { index: { number_of_shards: 1, number_of_replicas: 1, }, }, mappings: { dynamic: 'true', dynamic_templates: [ { ids_as_keyword: { match: '*Id', mapping: { type: 'keyword', index: true }, }, }, { default_strings: { match: '*', match_mapping_type: 'string', mapping: { type: 'keyword', index: false }, }, }, { non_queryable_fields: { match: '*', match_mapping_type: 'object', mapping: { type: 'object', enabled: false, }, }, }, ], properties: { id: { type: 'keyword' }, actorEmail: { type: 'keyword' }, action: { type: 'keyword' }, targetType: { type: 'keyword' }, statusCode: { type: 'integer' }, createdAt: { type: 'date' }, }, }, }, }, }) } private mapSourceToAuditLog(source: any): AuditLog { return new AuditLog({ id: source.id, actorId: source.actorId, actorEmail: source.actorEmail, organizationId: source.organizationId, action: source.action, targetType: source.targetType, targetId: source.targetId, statusCode: source.statusCode, errorMessage: source.errorMessage, ipAddress: source.ipAddress, userAgent: source.userAgent, source: source.source, metadata: source.metadata, createdAt: new Date(source.createdAt), }) } private async setupISM(): Promise { try { const retentionDays = this.configService.get('audit.retentionDays') || 0 if (!retentionDays || retentionDays < 1) { this.logger.debug('Audit log retention not configured, skipping ISM setup') return } await this.createISMPolicy(retentionDays) await this.applyISMPolicyToIndexTemplate() this.logger.debug(`OpenSearch ISM policy configured for ${retentionDays} days retention`) } catch (error) { this.logger.warn(`Failed to setup ISM policy: ${error.message}`) } } private async createISMPolicy(retentionDays: number): Promise { const policyName = `${this.indexName}-lifecycle-policy` const policy: PolicyEnvelope = { policy: { description: `Lifecycle policy for audit logs with ${retentionDays} days retention`, default_state: 'hot', states: [ { name: 'hot', actions: [ { rollover: { // incorrect client type definitions // ref: https://github.com/opensearch-project/opensearch-js/issues/1001 min_index_age: '30d' as any, min_primary_shard_size: '20gb' as any, min_doc_count: 20_000_000, }, }, ], transitions: [ { state_name: 'delete', conditions: { min_index_age: `${retentionDays}d`, // Delete after retention period }, }, ], }, { name: 'delete', actions: [ { delete: {}, }, ], }, ], ism_template: [ { index_patterns: [`${this.indexName}*`], priority: 100, }, ], }, } try { // Check does policy already exist const existingPolicy = await this.client.ism.getPolicy({ policy_id: policyName, }) // Check does policy need to be updated if (isMatch(existingPolicy.body, policy)) { this.logger.debug(`ISM policy ${policyName} is up to date`) } else { this.logger.debug(`ISM policy ${policyName} is out of date. Updating it.`) await this.client.ism.putPolicy({ policy_id: policyName, if_primary_term: existingPolicy.body._primary_term, if_seq_no: existingPolicy.body._seq_no, body: policy, }) this.logger.debug(`ISM policy ${policyName} updated`) } } catch (error) { if (error instanceof errors.ResponseError && error.statusCode === 404) { this.logger.debug(`ISM policy ${policyName} not found, creating it.`) await this.client.ism.putPolicy({ policy_id: policyName, body: policy, }) this.logger.debug(`ISM policy ${policyName} created`) return } this.logger.error(`Failed to create ISM policy`, error) throw error } } private async applyISMPolicyToIndexTemplate(): Promise { const templateName = `${this.indexName}-template` const policyName = `${this.indexName}-lifecycle-policy` try { // Get existing template const existingTemplate = await this.client.indices.getIndexTemplate({ name: templateName, }) if (!existingTemplate.body?.index_templates?.[0]) { this.logger.debug(`Index template ${templateName} not found, cannot apply ILM policy`) return } // Update template with ILM policy const template = existingTemplate.body.index_templates[0].index_template // Add ILM settings to the template if (!template.template) template.template = {} if (!template.template.settings) template.template.settings = {} if (!template.template.settings.index) template.template.settings.index = {} template.template.settings.index = { ...template.template.settings.index, 'plugins.index_state_management.policy_id': policyName, 'plugins.index_state_management.rollover_alias': this.indexName, number_of_shards: 1, number_of_replicas: 1, refresh_interval: '5s', } // Update the template await this.client.indices.putIndexTemplate({ name: templateName, body: template, }) this.logger.debug(`Applied ILM policy ${policyName} to index template ${templateName}`) } catch (error) { this.logger.error(`Failed to apply ILM policy to index template: ${error.message}`) } } private buildDateRangeQuery(filters?: AuditLogFilter): QueryContainer { return { bool: { filter: [ { range: { createdAt: { gte: filters?.from?.toISOString(), lte: filters?.to?.toISOString(), }, }, }, ], }, } } private buildOrganizationQuery(organizationId: string, filters?: AuditLogFilter): QueryContainer { return { bool: { filter: [ { term: { organizationId }, }, { range: { createdAt: { gte: filters?.from?.toISOString(), lte: filters?.to?.toISOString(), }, }, }, ], }, } } private buildSearchBody( query: QueryContainer, page?: number, limit?: number, nextToken?: string, ): Search_RequestBody { const size = limit const searchBody: Search_RequestBody = { query, sort: [{ createdAt: { order: 'desc' } }, { id: { order: 'desc' } }], size: size + 1, // Request one extra to check if there are more results } if (nextToken) { // Cursor-based pagination using search_after try { const searchAfter = JSON.parse(Buffer.from(nextToken, 'base64').toString()) searchBody.search_after = searchAfter this.logger.debug(`Using cursor-based pagination with search_after: ${JSON.stringify(searchAfter)}`) } catch { throw new BadRequestException(`Invalid nextToken provided: ${nextToken}`) } } else { // Offset-based pagination - only use when within safe limits const from = (page - 1) * limit if (from + size <= MAX_OFFSET_PAGINATION_LIMIT) { searchBody.from = from this.logger.debug(`Using offset-based pagination: from=${from}, size=${size + 1}`) } else { throw new BadRequestException( `Offset-based pagination not supported for page ${page} with limit ${limit}. Please use cursor-based pagination with nextToken parameter instead.`, ) } } return searchBody } private async executeSearch(searchBody: Search_RequestBody) { return await this.client.search({ index: this.indexName, body: searchBody, track_total_hits: MAX_OFFSET_PAGINATION_LIMIT, }) } private async processSearchResponse( response: Search_Response, page?: number, limit?: number, nextToken?: string, query?: QueryContainer, ): Promise> { const size = limit const hits = response.body.hits?.hits || [] const totalHits = response.body.hits?.total as TotalHits const hasMore = hits.length > size const items = hasMore ? hits.slice(0, size) : hits // Generate nextToken when there are more results and we're approaching limits let nextTokenResult: string | undefined const currentOffset = nextToken ? 0 : (page - 1) * limit // If using cursor, we don't know the exact offset const nextPageOffset = currentOffset + limit const wouldExceedLimit = nextPageOffset >= MAX_OFFSET_PAGINATION_LIMIT // Only generate nextToken if we're already using cursor pagination OR if the next page would exceed the limit if (hasMore && items.length > 0 && (nextToken || wouldExceedLimit)) { const lastItem = items[items.length - 1] const searchAfter = [lastItem._source.createdAt, lastItem._source.id] nextTokenResult = Buffer.from(JSON.stringify(searchAfter)).toString('base64') } let total = totalHits?.value let totalPages = Math.ceil(total / limit) if (totalHits?.relation === 'gte') { // TODO: This should be cached to avoid hitting OpenSearch for every request const totalResponse = await this.client.count({ index: this.indexName, body: { query }, }) total = totalResponse.body.count totalPages = Math.ceil(total / limit) } return { items: items.map((hit) => this.mapSourceToAuditLog(hit._source)), total, page: page || 1, totalPages, nextToken: nextTokenResult, } } } ================================================ FILE: apps/api/src/audit/adapters/audit-typeorm.adapter.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logger } from '@nestjs/common' import { AuditLogStorageAdapter } from '../interfaces/audit-storage.interface' import { InjectRepository } from '@nestjs/typeorm' import { AuditLog } from '../entities/audit-log.entity' import { FindManyOptions, Repository } from 'typeorm' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' import { AuditLogFilter } from '../interfaces/audit-filter.interface' import { createRangeFilter } from '../../common/utils/range-filter' export class AuditTypeormStorageAdapter implements AuditLogStorageAdapter { private readonly logger = new Logger(AuditTypeormStorageAdapter.name) constructor( @InjectRepository(AuditLog) private readonly auditLogRepository: Repository, ) {} async write(auditLogs: AuditLog[]): Promise { throw new Error('Typeorm adapter does not support writing audit logs.') } async getAllLogs(page?: number, limit = 1000, filters?: AuditLogFilter): Promise> { const options: FindManyOptions = { order: { createdAt: 'DESC', }, skip: (page - 1) * limit, take: limit, where: { createdAt: createRangeFilter(filters?.from, filters?.to), }, } const [items, total] = await this.auditLogRepository.findAndCount(options) return { items, total, page: page, totalPages: Math.ceil(total / limit), } } async getOrganizationLogs( organizationId: string, page?: number, limit = 1000, filters?: AuditLogFilter, ): Promise> { const options: FindManyOptions = { order: { createdAt: 'DESC', }, skip: (page - 1) * limit, take: limit, where: [ { organizationId, createdAt: createRangeFilter(filters?.from, filters?.to), }, ], } const [items, total] = await this.auditLogRepository.findAndCount(options) return { items, total, page: page, totalPages: Math.ceil(total / limit), } } } ================================================ FILE: apps/api/src/audit/audit.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { OrganizationModule } from '../organization/organization.module' import { AuditLog } from './entities/audit-log.entity' import { AuditService } from './services/audit.service' import { AuditInterceptor } from './interceptors/audit.interceptor' import { AuditLogSubscriber } from './subscribers/audit-log.subscriber' import { RedisLockProvider } from '../sandbox/common/redis-lock.provider' import { AuditController } from './controllers/audit.controller' import { AuditKafkaConsumerController } from './publishers/kafka/audit-kafka-consumer.controller' import { ClientsModule, Transport } from '@nestjs/microservices' import { TypedConfigService } from '../config/typed-config.service' import { Partitioners } from 'kafkajs' import { OpensearchModule } from 'nestjs-opensearch' import { AuditStorageAdapterProvider } from './providers/audit-storage.provider' import { AuditPublisherProvider } from './providers/audit-publisher.provider' import { AUDIT_KAFKA_SERVICE } from './constants/audit-tokens' @Module({ imports: [ OrganizationModule, TypeOrmModule.forFeature([AuditLog]), ClientsModule.registerAsync([ { name: AUDIT_KAFKA_SERVICE, inject: [TypedConfigService], useFactory: (configService: TypedConfigService) => { return { transport: Transport.KAFKA, options: { producerOnlyMode: true, client: configService.getKafkaClientConfig(), producer: { allowAutoTopicCreation: true, createPartitioner: Partitioners.DefaultPartitioner, idempotent: true, }, }, } }, }, ]), OpensearchModule.forRootAsync({ inject: [TypedConfigService], useFactory: (configService: TypedConfigService) => { return configService.getOpenSearchConfig() }, }), ], controllers: [AuditController, AuditKafkaConsumerController], providers: [ AuditService, AuditInterceptor, AuditLogSubscriber, RedisLockProvider, AuditStorageAdapterProvider, AuditPublisherProvider, ], exports: [AuditService, AuditInterceptor], }) export class AuditModule {} ================================================ FILE: apps/api/src/audit/constants/audit-log-events.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const AuditLogEvents = { CREATED: 'audit-log.created', UPDATED: 'audit-log.updated', } as const ================================================ FILE: apps/api/src/audit/constants/audit-tokens.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const AUDIT_STORAGE_ADAPTER = 'AUDIT_STORAGE_ADAPTER' export const AUDIT_LOG_PUBLISHER = 'AUDIT_LOG_PUBLISHER' export const AUDIT_KAFKA_SERVICE = 'AUDIT_KAFKA_SERVICE' export const AUDIT_KAFKA_TOPIC = 'audit-logs' ================================================ FILE: apps/api/src/audit/controllers/audit.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Param, Query, UseGuards } from '@nestjs/common' import { ApiTags, ApiOperation, ApiResponse, ApiBearerAuth, ApiOAuth2, ApiParam } from '@nestjs/swagger' import { AuditLogDto } from '../dto/audit-log.dto' import { PaginatedAuditLogsDto } from '../dto/paginated-audit-logs.dto' import { AuditService } from '../services/audit.service' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredSystemRole } from '../../common/decorators/required-role.decorator' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { SystemRole } from '../../user/enums/system-role.enum' import { ListAuditLogsQueryDto } from '../dto/list-audit-logs-query.dto' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('audit') @Controller('audit') @UseGuards(CombinedAuthGuard, SystemActionGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class AuditController { constructor(private readonly auditService: AuditService) {} @Get() @ApiOperation({ summary: 'Get all audit logs', operationId: 'getAllAuditLogs', }) @ApiResponse({ status: 200, description: 'Paginated list of all audit logs', type: PaginatedAuditLogsDto, }) @RequiredSystemRole(SystemRole.ADMIN) async getAllLogs(@Query() query: ListAuditLogsQueryDto): Promise { const result = await this.auditService.getAllLogs( query.page, query.limit, { from: query.from, to: query.to, }, query.nextToken, ) return { items: result.items.map(AuditLogDto.fromAuditLog), total: result.total, page: result.page, totalPages: result.totalPages, nextToken: result.nextToken, } } @Get('/organizations/:organizationId') @ApiOperation({ summary: 'Get audit logs for organization', operationId: 'getOrganizationAuditLogs', }) @ApiResponse({ status: 200, description: 'Paginated list of organization audit logs', type: PaginatedAuditLogsDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.READ_AUDIT_LOGS]) async getOrganizationLogs( @Param('organizationId') organizationId: string, @Query() query: ListAuditLogsQueryDto, ): Promise { const result = await this.auditService.getOrganizationLogs( organizationId, query.page, query.limit, { from: query.from, to: query.to, }, query.nextToken, ) return { items: result.items.map(AuditLogDto.fromAuditLog), total: result.total, page: result.page, totalPages: result.totalPages, nextToken: result.nextToken, } } } ================================================ FILE: apps/api/src/audit/decorators/audit.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SetMetadata } from '@nestjs/common' import { Request } from 'express' import { AuditAction } from '../enums/audit-action.enum' import { AuditTarget } from '../enums/audit-target.enum' export type TypedRequest = Omit & { body: T } export const MASKED_AUDIT_VALUE = '********' export interface AuditContext { action: AuditAction targetType?: AuditTarget targetIdFromRequest?: (req: Request) => string | null | undefined targetIdFromResult?: (result: any) => string | null | undefined requestMetadata?: Record any> } export const AUDIT_CONTEXT_KEY = 'audit_context' export const Audit = (context: AuditContext) => SetMetadata(AUDIT_CONTEXT_KEY, context) ================================================ FILE: apps/api/src/audit/dto/audit-log.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { AuditLog, AuditLogMetadata } from '../entities/audit-log.entity' @ApiSchema({ name: 'AuditLog' }) export class AuditLogDto { @ApiProperty() id: string @ApiProperty() actorId: string @ApiProperty() actorEmail: string @ApiPropertyOptional() organizationId?: string @ApiProperty() action: string @ApiPropertyOptional() targetType?: string @ApiPropertyOptional() targetId?: string @ApiPropertyOptional() statusCode?: number @ApiPropertyOptional() errorMessage?: string @ApiPropertyOptional() ipAddress?: string @ApiPropertyOptional() userAgent?: string @ApiPropertyOptional() source?: string @ApiPropertyOptional({ type: 'object', additionalProperties: true, }) metadata?: AuditLogMetadata @ApiProperty() createdAt: Date static fromAuditLog(auditLog: AuditLog): AuditLogDto { return { id: auditLog.id, actorId: auditLog.actorId, actorEmail: auditLog.actorEmail, organizationId: auditLog.organizationId, action: auditLog.action, targetType: auditLog.targetType, targetId: auditLog.targetId, statusCode: auditLog.statusCode, errorMessage: auditLog.errorMessage, ipAddress: auditLog.ipAddress, userAgent: auditLog.userAgent, source: auditLog.source, metadata: auditLog.metadata, createdAt: auditLog.createdAt, } } } ================================================ FILE: apps/api/src/audit/dto/create-audit-log-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AuditLogMetadata } from '../entities/audit-log.entity' import { AuditAction } from '../enums/audit-action.enum' import { AuditTarget } from '../enums/audit-target.enum' export class CreateAuditLogInternalDto { actorId: string actorEmail: string organizationId?: string action: AuditAction targetType?: AuditTarget targetId?: string statusCode?: number errorMessage?: string ipAddress?: string userAgent?: string source?: string metadata?: AuditLogMetadata } ================================================ FILE: apps/api/src/audit/dto/create-audit-log.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsEnum, IsOptional } from 'class-validator' import { AuditAction } from '../enums/audit-action.enum' import { AuditTarget } from '../enums/audit-target.enum' @ApiSchema({ name: 'CreateAuditLog' }) export class CreateAuditLogDto { @ApiProperty() actorId: string @ApiProperty() actorEmail: string @ApiPropertyOptional() @IsOptional() organizationId?: string @ApiProperty({ enum: AuditAction, }) @IsEnum(AuditAction) action: AuditAction @ApiPropertyOptional({ enum: AuditTarget, }) @IsOptional() @IsEnum(AuditTarget) targetType?: AuditTarget @ApiPropertyOptional() @IsOptional() targetId?: string } ================================================ FILE: apps/api/src/audit/dto/list-audit-logs-query.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { PageNumber } from '../../common/decorators/page-number.decorator' import { PageLimit } from '../../common/decorators/page-limit.decorator' import { IsDate, IsOptional, IsString } from 'class-validator' import { Type } from 'class-transformer' @ApiSchema({ name: 'ListAuditLogsQuery' }) export class ListAuditLogsQueryDto { @PageNumber(1) page = 1 @PageLimit(100) limit = 100 @ApiPropertyOptional({ type: String, format: 'date-time', description: 'From date (ISO 8601 format)' }) @IsOptional() @Type(() => Date) @IsDate() from?: Date @ApiPropertyOptional({ type: String, format: 'date-time', description: 'To date (ISO 8601 format)' }) @IsOptional() @Type(() => Date) @IsDate() to?: Date @ApiPropertyOptional({ type: String, description: 'Token for cursor-based pagination. When provided, takes precedence over page parameter.', }) @IsOptional() @IsString() nextToken?: string } ================================================ FILE: apps/api/src/audit/dto/paginated-audit-logs.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { AuditLogDto } from './audit-log.dto' @ApiSchema({ name: 'PaginatedAuditLogs' }) export class PaginatedAuditLogsDto { @ApiProperty({ type: [AuditLogDto] }) items: AuditLogDto[] @ApiProperty() total: number @ApiProperty() page: number @ApiProperty() totalPages: number @ApiProperty({ required: false, description: 'Token for next page in cursor-based pagination' }) nextToken?: string } ================================================ FILE: apps/api/src/audit/dto/update-audit-log-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class UpdateAuditLogInternalDto { statusCode?: number errorMessage?: string targetId?: string organizationId?: string } ================================================ FILE: apps/api/src/audit/entities/audit-log.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Entity, PrimaryGeneratedColumn, Column, CreateDateColumn, Index } from 'typeorm' import { v4 } from 'uuid' export type AuditLogMetadata = Record @Entity() @Index(['createdAt']) @Index(['organizationId', 'createdAt']) export class AuditLog { @PrimaryGeneratedColumn('uuid') id: string @Column() actorId: string @Column({ default: '', }) actorEmail: string @Column({ nullable: true }) organizationId?: string @Column() action: string @Column({ nullable: true }) targetType?: string @Column({ nullable: true }) targetId?: string @Column({ nullable: true }) statusCode?: number @Column({ nullable: true }) errorMessage?: string @Column({ nullable: true }) ipAddress?: string @Column({ type: 'text', nullable: true }) userAgent?: string @Column({ nullable: true }) source?: string @Column({ type: 'jsonb', nullable: true }) metadata?: AuditLogMetadata @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date constructor(params: { id?: string actorId: string actorEmail: string organizationId?: string action: string targetType?: string targetId?: string statusCode?: number errorMessage?: string ipAddress?: string userAgent?: string source?: string metadata?: AuditLogMetadata createdAt?: Date }) { this.id = params.id || v4() this.actorId = params.actorId this.actorEmail = params.actorEmail this.organizationId = params.organizationId this.action = params.action this.targetType = params.targetType this.targetId = params.targetId this.statusCode = params.statusCode this.errorMessage = params.errorMessage this.ipAddress = params.ipAddress this.userAgent = params.userAgent this.source = params.source this.metadata = params.metadata this.createdAt = params.createdAt || new Date() } } ================================================ FILE: apps/api/src/audit/enums/audit-action.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum AuditAction { CREATE = 'create', READ = 'read', UPDATE = 'update', DELETE = 'delete', LOGIN = 'login', SET_DEFAULT = 'set_default', UPDATE_ACCESS = 'update_access', UPDATE_QUOTA = 'update_quota', UPDATE_REGION_QUOTA = 'update_region_quota', SUSPEND = 'suspend', UNSUSPEND = 'unsuspend', ACCEPT = 'accept', DECLINE = 'decline', LINK_ACCOUNT = 'link_account', UNLINK_ACCOUNT = 'unlink_account', LEAVE_ORGANIZATION = 'leave_organization', REGENERATE_KEY_PAIR = 'regenerate_key_pair', UPDATE_SCHEDULING = 'update_scheduling', UPDATE_DRAINING = 'update_draining', START = 'start', STOP = 'stop', RESIZE = 'resize', REPLACE_LABELS = 'replace_labels', CREATE_BACKUP = 'create_backup', UPDATE_PUBLIC_STATUS = 'update_public_status', SET_AUTO_STOP_INTERVAL = 'set_auto_stop_interval', SET_AUTO_ARCHIVE_INTERVAL = 'set_auto_archive_interval', SET_AUTO_DELETE_INTERVAL = 'set_auto_delete_interval', ARCHIVE = 'archive', GET_PORT_PREVIEW_URL = 'get_port_preview_url', SET_GENERAL_STATUS = 'set_general_status', ACTIVATE = 'activate', DEACTIVATE = 'deactivate', UPDATE_NETWORK_SETTINGS = 'update_network_settings', SEND_WEBHOOK_MESSAGE = 'send_webhook_message', INITIALIZE_WEBHOOKS = 'initialize_webhooks', UPDATE_SANDBOX_DEFAULT_LIMITED_NETWORK_EGRESS = 'update_sandbox_default_limited_network_egress', CREATE_SSH_ACCESS = 'create_ssh_access', REVOKE_SSH_ACCESS = 'revoke_ssh_access', RECOVER = 'recover', REGENERATE_PROXY_API_KEY = 'regenerate_proxy_api_key', REGENERATE_SSH_GATEWAY_API_KEY = 'regenerate_ssh_gateway_api_key', REGENERATE_SNAPSHOT_MANAGER_CREDENTIALS = 'regenerate_snapshot_manager_credentials', // toolbox actions (must be prefixed with 'toolbox_') TOOLBOX_DELETE_FILE = 'toolbox_delete_file', TOOLBOX_DOWNLOAD_FILE = 'toolbox_download_file', TOOLBOX_CREATE_FOLDER = 'toolbox_create_folder', TOOLBOX_MOVE_FILE = 'toolbox_move_file', TOOLBOX_SET_FILE_PERMISSIONS = 'toolbox_set_file_permissions', TOOLBOX_REPLACE_IN_FILES = 'toolbox_replace_in_files', TOOLBOX_UPLOAD_FILE = 'toolbox_upload_file', TOOLBOX_BULK_UPLOAD_FILES = 'toolbox_bulk_upload_files', TOOLBOX_GIT_ADD_FILES = 'toolbox_git_add_files', TOOLBOX_GIT_CREATE_BRANCH = 'toolbox_git_create_branch', TOOLBOX_GIT_DELETE_BRANCH = 'toolbox_git_delete_branch', TOOLBOX_GIT_CLONE_REPOSITORY = 'toolbox_git_clone_repository', TOOLBOX_GIT_COMMIT_CHANGES = 'toolbox_git_commit_changes', TOOLBOX_GIT_PULL_CHANGES = 'toolbox_git_pull_changes', TOOLBOX_GIT_PUSH_CHANGES = 'toolbox_git_push_changes', TOOLBOX_GIT_CHECKOUT_BRANCH = 'toolbox_git_checkout_branch', TOOLBOX_EXECUTE_COMMAND = 'toolbox_execute_command', TOOLBOX_CREATE_SESSION = 'toolbox_create_session', TOOLBOX_SESSION_EXECUTE_COMMAND = 'toolbox_session_execute_command', TOOLBOX_DELETE_SESSION = 'toolbox_delete_session', TOOLBOX_COMPUTER_USE_START = 'toolbox_computer_use_start', TOOLBOX_COMPUTER_USE_STOP = 'toolbox_computer_use_stop', TOOLBOX_COMPUTER_USE_RESTART_PROCESS = 'toolbox_computer_use_restart_process', } ================================================ FILE: apps/api/src/audit/enums/audit-target.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum AuditTarget { API_KEY = 'api_key', ORGANIZATION = 'organization', ORGANIZATION_INVITATION = 'organization_invitation', ORGANIZATION_ROLE = 'organization_role', ORGANIZATION_USER = 'organization_user', DOCKER_REGISTRY = 'docker_registry', RUNNER = 'runner', SANDBOX = 'sandbox', SNAPSHOT = 'snapshot', USER = 'user', VOLUME = 'volume', REGION = 'region', } ================================================ FILE: apps/api/src/audit/events/audit-log-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AuditLog } from '../entities/audit-log.entity' export class AuditLogCreatedEvent { constructor(public readonly auditLog: AuditLog) {} } ================================================ FILE: apps/api/src/audit/events/audit-log-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AuditLog } from '../entities/audit-log.entity' export class AuditLogUpdatedEvent { constructor(public readonly auditLog: AuditLog) {} } ================================================ FILE: apps/api/src/audit/interceptors/audit.interceptor.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NestInterceptor, ExecutionContext, CallHandler, Logger, UnauthorizedException, InternalServerErrorException, HttpException, HttpStatus, } from '@nestjs/common' import { Reflector } from '@nestjs/core' import { Request, Response } from 'express' import { Observable, Subscriber, firstValueFrom } from 'rxjs' import { AUDIT_CONTEXT_KEY, AuditContext } from '../decorators/audit.decorator' import { AuditLog, AuditLogMetadata } from '../entities/audit-log.entity' import { AuditAction } from '../enums/audit-action.enum' import { AuditService } from '../services/audit.service' import { AuthContext } from '../../common/interfaces/auth-context.interface' import { CustomHeaders } from '../../common/constants/header.constants' import { TypedConfigService } from '../../config/typed-config.service' type RequestWithUser = Request & { user?: AuthContext } @Injectable() export class AuditInterceptor implements NestInterceptor { private readonly logger = new Logger(AuditInterceptor.name) constructor( private readonly reflector: Reflector, private readonly auditService: AuditService, private readonly configService: TypedConfigService, ) {} intercept(context: ExecutionContext, next: CallHandler): Observable { const request = context.switchToHttp().getRequest() const response = context.switchToHttp().getResponse() const auditContext = this.reflector.get(AUDIT_CONTEXT_KEY, context.getHandler()) // Non-audited request if (!auditContext) { return next.handle() } // Toolbox requests are not audited by default if (this.isToolboxAction(auditContext.action) && !this.configService.get('audit.toolboxRequestsEnabled')) { return next.handle() } if (!request.user) { this.logger.error('No user context found for audited request:', request.url) throw new UnauthorizedException() } return new Observable((observer) => { this.handleAuditedRequest(auditContext, request, response, next, observer) }) } // An audit log must be created before the request is passed to the request handler // After the request handler returns, the audit log is optimistically updated with the outcome private async handleAuditedRequest( auditContext: AuditContext, request: RequestWithUser, response: Response, next: CallHandler, observer: Subscriber, ): Promise { try { const auditLog = await this.auditService.createLog({ actorId: request.user.userId, actorEmail: request.user.email, organizationId: request.user.organizationId, action: auditContext.action, targetType: auditContext.targetType, targetId: this.resolveTargetId(auditContext, request), ipAddress: request.ip, userAgent: request.get('user-agent'), source: request.get(CustomHeaders.SOURCE.name), metadata: this.resolveRequestMetadata(auditContext, request), }) try { const result = await firstValueFrom(next.handle()) const organizationId = this.resolveOrganizationId(request, result) const targetId = this.resolveTargetId(auditContext, request, result) const statusCode = response.statusCode || HttpStatus.NO_CONTENT await this.recordHandlerSuccess(auditLog, organizationId, targetId, statusCode) observer.next(result) observer.complete() } catch (handlerError) { const errorMessage = handlerError instanceof HttpException ? handlerError.message : 'An unexpected error occurred.' const statusCode = this.resolveErrorStatusCode(handlerError) await this.recordHandlerError(auditLog, errorMessage, statusCode) observer.error(handlerError) } } catch (createLogError) { this.logger.error('Failed to create audit log:', createLogError) observer.error(new InternalServerErrorException()) } } private resolveOrganizationId(request: RequestWithUser, result?: any): string | null { return result?.organizationId || request.user.organizationId } /** * Resolves the identifier of the target resource from the initial request or the response object. * * Prioritizes resolving the ID from the response object as the request may not include a unique resource identifier (e.g. delete sandbox by name). */ private resolveTargetId(auditContext: AuditContext, request: RequestWithUser, result?: any): string | null { if (auditContext.targetIdFromResult && result) { const targetId = auditContext.targetIdFromResult(result) if (targetId) { return targetId } } if (auditContext.targetIdFromRequest) { const targetId = auditContext.targetIdFromRequest(request) if (targetId) { return targetId } } return null } private resolveRequestMetadata(auditContext: AuditContext, request: RequestWithUser): AuditLogMetadata | null { if (!auditContext.requestMetadata) { return null } const resolvedMetadata: AuditLogMetadata = {} for (const [key, resolver] of Object.entries(auditContext.requestMetadata)) { try { resolvedMetadata[key] = resolver(request) } catch (error) { this.logger.warn(`Failed to resolve audit log metadata key "${key}":`, error) resolvedMetadata[key] = null } } return Object.keys(resolvedMetadata).length > 0 ? resolvedMetadata : null } private isToolboxAction(action: AuditAction): boolean { return action.startsWith('toolbox_') } private async recordHandlerSuccess( auditLog: AuditLog, organizationId: string | null, targetId: string | null, statusCode: number, ): Promise { try { await this.auditService.updateLog(auditLog.id, { organizationId, targetId, statusCode, }) } catch (error) { this.logger.error('Failed to record handler result:', error) } } private async recordHandlerError(auditLog: AuditLog, errorMessage: string, statusCode: number): Promise { try { await this.auditService.updateLog(auditLog.id, { errorMessage, statusCode, }) } catch (error) { this.logger.error('Failed to record handler error:', error) } } private resolveErrorStatusCode(error: any): number { if (error instanceof HttpException) { return error.getStatus() } return HttpStatus.INTERNAL_SERVER_ERROR } } ================================================ FILE: apps/api/src/audit/interfaces/audit-filter.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface AuditLogFilter { from?: Date to?: Date } ================================================ FILE: apps/api/src/audit/interfaces/audit-publisher.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AuditLog } from '../entities/audit-log.entity' /** * Interface for audit log publisher operations * Handles publishing audit logs */ export interface AuditLogPublisher { /** * Publish audit logs */ write(auditLogs: AuditLog[]): Promise } ================================================ FILE: apps/api/src/audit/interfaces/audit-storage.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AuditLog } from '../entities/audit-log.entity' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' import { AuditLogFilter } from './audit-filter.interface' /** * Interface for audit log storage operations * Handles persistent storage and audit logs queries */ export interface AuditLogStorageAdapter { /** * Write audit logs to storage */ write(auditLogs: AuditLog[]): Promise /** * Get all audit logs * @param page - Page number (1-based) for offset-based pagination * @param limit - Number of items per page * @param filters - Optional filters * @param nextToken - Cursor token for cursor-based pagination (takes precedence over page) */ getAllLogs( page?: number, limit?: number, filters?: AuditLogFilter, nextToken?: string, ): Promise> /** * Get audit logs for organization * @param organizationId - Organization ID * @param page - Page number (1-based) for offset-based pagination * @param limit - Number of items per page * @param filters - Optional filters * @param nextToken - Cursor token for cursor-based pagination (takes precedence over page) */ getOrganizationLogs( organizationId: string, page?: number, limit?: number, filters?: AuditLogFilter, nextToken?: string, ): Promise> } ================================================ FILE: apps/api/src/audit/providers/audit-publisher.provider.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Provider } from '@nestjs/common' import { AuditKafkaPublisher } from '../publishers/kafka/audit-kafka-publisher' import { AuditDirectPublisher } from '../publishers/audit-direct-publisher' import { AuditLogStorageAdapter } from '../interfaces/audit-storage.interface' import { AuditLogPublisher } from '../interfaces/audit-publisher.interface' import { AUDIT_KAFKA_SERVICE, AUDIT_STORAGE_ADAPTER, AUDIT_LOG_PUBLISHER } from '../constants/audit-tokens' import { TypedConfigService } from '../../config/typed-config.service' import { ClientKafka } from '@nestjs/microservices' export const AuditPublisherProvider: Provider = { provide: AUDIT_LOG_PUBLISHER, useFactory: ( configService: TypedConfigService, kafkaService: ClientKafka, auditStorageAdapter: AuditLogStorageAdapter, ): AuditLogPublisher => { const auditConfig = configService.get('audit') if (!auditConfig.publish.enabled) { return } switch (auditConfig.publish.mode) { case 'direct': return new AuditDirectPublisher(auditStorageAdapter) case 'kafka': if (!configService.get('kafka.enabled')) { throw new Error('Kafka must be enabled to publish audit logs to Kafka') } return new AuditKafkaPublisher(kafkaService) default: throw new Error(`Invalid publish mode: ${auditConfig.publish.mode}`) } }, inject: [TypedConfigService, AUDIT_KAFKA_SERVICE, AUDIT_STORAGE_ADAPTER], } ================================================ FILE: apps/api/src/audit/providers/audit-storage.provider.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Provider } from '@nestjs/common' import { getRepositoryToken } from '@nestjs/typeorm' import { AuditOpenSearchStorageAdapter } from '../adapters/audit-opensearch.adapter' import { AuditLogStorageAdapter } from '../interfaces/audit-storage.interface' import { AUDIT_STORAGE_ADAPTER } from '../constants/audit-tokens' import { TypedConfigService } from '../../config/typed-config.service' import { OpensearchClient } from 'nestjs-opensearch' import { AuditTypeormStorageAdapter } from '../adapters/audit-typeorm.adapter' import { Repository } from 'typeorm' import { AuditLog } from '../entities/audit-log.entity' export const AuditStorageAdapterProvider: Provider = { provide: AUDIT_STORAGE_ADAPTER, useFactory: ( configService: TypedConfigService, opensearchClient: OpensearchClient, auditLogRepository: Repository, ): AuditLogStorageAdapter => { const auditConfig = configService.get('audit') if (auditConfig.publish.enabled) { switch (auditConfig.publish.storageAdapter) { case 'opensearch': { return new AuditOpenSearchStorageAdapter(configService, opensearchClient) } default: throw new Error(`Invalid storage adapter: ${auditConfig.publish.storageAdapter}`) } } else { return new AuditTypeormStorageAdapter(auditLogRepository) } }, inject: [TypedConfigService, OpensearchClient, getRepositoryToken(AuditLog)], } ================================================ FILE: apps/api/src/audit/publishers/audit-direct-publisher.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, Inject, OnModuleInit } from '@nestjs/common' import { AuditLog } from '../entities/audit-log.entity' import { AuditLogPublisher } from '../interfaces/audit-publisher.interface' import { AuditLogStorageAdapter } from '../interfaces/audit-storage.interface' import { AUDIT_STORAGE_ADAPTER } from '../constants/audit-tokens' @Injectable() export class AuditDirectPublisher implements AuditLogPublisher, OnModuleInit { private readonly logger = new Logger(AuditDirectPublisher.name) constructor(@Inject(AUDIT_STORAGE_ADAPTER) private readonly storageAdapter: AuditLogStorageAdapter) {} async onModuleInit(): Promise { this.logger.log('Direct storage publisher initialized') } async write(auditLogs: AuditLog[]): Promise { await this.storageAdapter.write(auditLogs) this.logger.debug( `Written ${auditLogs.length} audit logs directly to ${this.storageAdapter.constructor.name} publisher`, ) } } ================================================ FILE: apps/api/src/audit/publishers/kafka/audit-kafka-consumer.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Inject, Logger, UseFilters } from '@nestjs/common' import { Ctx, EventPattern, KafkaContext, Payload } from '@nestjs/microservices' import { AuditLog } from '../../entities/audit-log.entity' import { AuditLogStorageAdapter } from '../../interfaces/audit-storage.interface' import { AutoCommitOffset } from '../../../common/decorators/autocommit-offset.decorator' import { AUDIT_KAFKA_TOPIC, AUDIT_STORAGE_ADAPTER } from '../../constants/audit-tokens' import { KafkaMaxRetryExceptionFilter } from '../../../filters/kafka-exception.filter' @Controller('kafka-audit') @UseFilters(new KafkaMaxRetryExceptionFilter({ retries: 3, sendToDlq: true })) export class AuditKafkaConsumerController { private readonly logger = new Logger(AuditKafkaConsumerController.name) constructor(@Inject(AUDIT_STORAGE_ADAPTER) private readonly auditStorageAdapter: AuditLogStorageAdapter) {} @EventPattern(AUDIT_KAFKA_TOPIC) @AutoCommitOffset() public async handleAuditLogMessage(@Payload() message: AuditLog, @Ctx() context: KafkaContext): Promise { this.logger.debug('Handling audit log message', { message }) await this.auditStorageAdapter.write([message]) } } ================================================ FILE: apps/api/src/audit/publishers/kafka/audit-kafka-publisher.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logger, OnModuleInit } from '@nestjs/common' import { ClientKafkaProxy } from '@nestjs/microservices' import { CompressionTypes, Message } from 'kafkajs' import { AuditLog } from '../../entities/audit-log.entity' import { AuditLogPublisher } from '../../interfaces/audit-publisher.interface' import { AUDIT_KAFKA_TOPIC } from '../../constants/audit-tokens' export class AuditKafkaPublisher implements AuditLogPublisher, OnModuleInit { private readonly logger = new Logger(AuditKafkaPublisher.name) constructor(private readonly kafkaService: ClientKafkaProxy) {} async onModuleInit() { await this.kafkaService.connect() this.logger.debug('Kafka audit log publisher initialized') } async write(auditLogs: AuditLog[]): Promise { const messages: Message[] = auditLogs.map((auditLog) => ({ key: auditLog.organizationId, value: JSON.stringify(auditLog), })) try { await this.kafkaService.producer.send({ topic: AUDIT_KAFKA_TOPIC, messages: messages, acks: -1, compression: CompressionTypes.GZIP, }) } catch (error) { this.logger.error('Failed to write audit log to Kafka:', error) throw error } this.logger.debug(`${auditLogs.length} audit logs written to Kafka`) } } ================================================ FILE: apps/api/src/audit/services/audit.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Inject, Injectable, Logger, NotFoundException, OnApplicationBootstrap, Optional } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Cron, CronExpression, SchedulerRegistry } from '@nestjs/schedule' import { LessThan, Repository, IsNull, Not } from 'typeorm' import { CreateAuditLogInternalDto } from '../dto/create-audit-log-internal.dto' import { UpdateAuditLogInternalDto } from '../dto/update-audit-log-internal.dto' import { AuditLog } from '../entities/audit-log.entity' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' import { TypedConfigService } from '../../config/typed-config.service' import { RedisLockProvider } from '../../sandbox/common/redis-lock.provider' import { AUDIT_LOG_PUBLISHER, AUDIT_STORAGE_ADAPTER } from '../constants/audit-tokens' import { AuditLogStorageAdapter } from '../interfaces/audit-storage.interface' import { AuditLogPublisher } from '../interfaces/audit-publisher.interface' import { AuditLogFilter } from '../interfaces/audit-filter.interface' import { DistributedLock } from '../../common/decorators/distributed-lock.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { LogExecution } from '../../common/decorators/log-execution.decorator' @Injectable() export class AuditService implements OnApplicationBootstrap { private readonly logger = new Logger(AuditService.name) constructor( @InjectRepository(AuditLog) private readonly auditLogRepository: Repository, private readonly configService: TypedConfigService, private readonly redisLockProvider: RedisLockProvider, private readonly schedulerRegistry: SchedulerRegistry, @Inject(AUDIT_STORAGE_ADAPTER) private readonly auditStorageAdapter: AuditLogStorageAdapter, @Optional() @Inject(AUDIT_LOG_PUBLISHER) private readonly auditLogPublisher?: AuditLogPublisher, ) {} onApplicationBootstrap() { const auditConfig = this.configService.get('audit') // Enable publish cron job if publish is enabled if (auditConfig.publish.enabled) { this.schedulerRegistry.getCronJob('publish-audit-logs').start() return } // Enable cleanup cron job if retention days is configured and publish is disabled if (auditConfig.retentionDays && auditConfig.retentionDays > 0) { this.schedulerRegistry.getCronJob('cleanup-old-audit-logs').start() } const batchSize = this.configService.getOrThrow('audit.publish.batchSize') if (batchSize > 50000) { throw new Error('Audit publish batch size cannot be greater than 50000') } } async createLog(createDto: CreateAuditLogInternalDto): Promise { const auditLog = new AuditLog(createDto) await this.auditLogRepository.insert(auditLog) return auditLog } async updateLog(id: string, updateDto: UpdateAuditLogInternalDto): Promise { const auditLog = await this.auditLogRepository.findOne({ where: { id } }) if (!auditLog) { throw new NotFoundException(`Audit log with ID ${id} not found`) } if (updateDto.statusCode) { auditLog.statusCode = updateDto.statusCode } if (updateDto.errorMessage) { auditLog.errorMessage = updateDto.errorMessage } if (updateDto.targetId) { auditLog.targetId = updateDto.targetId } if (updateDto.organizationId) { auditLog.organizationId = updateDto.organizationId } if (this.configService.get('audit.consoleLogEnabled')) { this.logger.log(`AUDIT_ENTRY: ${JSON.stringify(auditLog)}`) } return this.auditLogRepository.save(auditLog) } async getAllLogs( page = 1, limit = 10, filters?: AuditLogFilter, nextToken?: string, ): Promise> { return this.auditStorageAdapter.getAllLogs(page, limit, filters, nextToken) } async getOrganizationLogs( organizationId: string, page = 1, limit = 10, filters?: AuditLogFilter, nextToken?: string, ): Promise> { return this.auditStorageAdapter.getOrganizationLogs(organizationId, page, limit, filters, nextToken) } @Cron(CronExpression.EVERY_DAY_AT_2AM, { name: 'cleanup-old-audit-logs', waitForCompletion: true, disabled: true, }) @DistributedLock() @LogExecution('cleanup-old-audit-logs') async cleanupOldAuditLogs(): Promise { try { const retentionDays = this.configService.get('audit.retentionDays') if (!retentionDays) { return } const cutoffDate = new Date(Date.now() - retentionDays * 24 * 60 * 60 * 1000) this.logger.log(`Starting cleanup of audit logs older than ${retentionDays} days`) const deletedLogs = await this.auditLogRepository.delete({ createdAt: LessThan(cutoffDate), }) const totalDeleted = deletedLogs.affected this.logger.log(`Completed cleanup of audit logs older than ${retentionDays} days (${totalDeleted} logs deleted)`) } catch (error) { this.logger.error(`An error occurred during cleanup of old audit logs: ${error.message}`, error.stack) } } // Resolve dangling audit logs where status code is not set and created at is more than half an hour ago @Cron(CronExpression.EVERY_MINUTE, { name: 'resolve-dangling-audit-logs', waitForCompletion: true, }) @DistributedLock() @WithInstrumentation() @LogExecution('resolve-dangling-audit-logs') async resolveDanglingLogs() { const danglingLogs = await this.auditLogRepository.find({ where: { statusCode: IsNull(), createdAt: LessThan(new Date(Date.now() - 30 * 60 * 1000)), }, }) for (const log of danglingLogs) { // set status code to unknown log.statusCode = 0 await this.auditLogRepository.save(log) if (this.configService.get('audit.consoleLogEnabled')) { this.logger.log(`AUDIT_ENTRY: ${JSON.stringify(log)}`) } } this.logger.debug(`Resolved ${danglingLogs.length} dangling audit logs`) } @Cron(CronExpression.EVERY_SECOND, { name: 'publish-audit-logs', waitForCompletion: true, disabled: true, }) @LogExecution('publish-audit-logs') @DistributedLock() @WithInstrumentation() async publishAuditLogs() { // Safeguard if (!this.auditLogPublisher) { this.logger.warn('Audit log publisher not configured, skipping publish') return } const auditLogs = await this.auditLogRepository.find({ where: { statusCode: Not(IsNull()), }, take: this.configService.getOrThrow('audit.publish.batchSize'), order: { createdAt: 'ASC', }, }) if (auditLogs.length === 0) { return } await this.auditLogPublisher.write(auditLogs) await this.auditLogRepository.delete(auditLogs.map((log) => log.id)) } } ================================================ FILE: apps/api/src/audit/subscribers/audit-log.subscriber.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ForbiddenException, Inject, Logger } from '@nestjs/common' import { EventEmitter2 } from '@nestjs/event-emitter' import { DataSource, EntitySubscriberInterface, EventSubscriber, UpdateEvent } from 'typeorm' import { AuditLog } from '../entities/audit-log.entity' @EventSubscriber() export class AuditLogSubscriber implements EntitySubscriberInterface { private readonly logger = new Logger(AuditLogSubscriber.name) @Inject(EventEmitter2) private eventEmitter: EventEmitter2 constructor(dataSource: DataSource) { dataSource.subscribers.push(this) } listenTo() { return AuditLog } beforeUpdate(event: UpdateEvent) { const existingEntity = event.databaseEntity as AuditLog if (!existingEntity) { // This should not happen, throw exception as a fail-safe this.logger.warn('Could not find existing audit log entity, beforeUpdate event:', event) throw new ForbiddenException() } if (existingEntity.statusCode) { throw new ForbiddenException('Finalized audit logs are immutable.') } } } ================================================ FILE: apps/api/src/auth/api-key.strategy.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, UnauthorizedException, Logger, OnModuleInit } from '@nestjs/common' import { PassportStrategy } from '@nestjs/passport' import { Strategy } from 'passport-http-bearer' import { ApiKeyService } from '../api-key/api-key.service' import { ApiKey } from '../api-key/api-key.entity' import { UserService } from '../user/user.service' import { AuthContextType } from '../common/interfaces/auth-context.interface' import { TypedConfigService } from '../config/typed-config.service' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { SystemRole } from '../user/enums/system-role.enum' import { RunnerService } from '../sandbox/services/runner.service' import { generateApiKeyHash } from '../common/utils/api-key' import { RegionService } from '../region/services/region.service' import { JWT_REGEX } from './constants/jwt-regex.constant' type UserCache = { userId: string role: SystemRole email: string } @Injectable() export class ApiKeyStrategy extends PassportStrategy(Strategy, 'api-key') implements OnModuleInit { private readonly logger = new Logger(ApiKeyStrategy.name) constructor( @InjectRedis() private readonly redis: Redis, private readonly apiKeyService: ApiKeyService, private readonly userService: UserService, private readonly configService: TypedConfigService, private readonly runnerService: RunnerService, private readonly regionService: RegionService, ) { super() this.logger.log('ApiKeyStrategy constructor called') } onModuleInit() { this.logger.log('ApiKeyStrategy initialized') } async validate(token: string): Promise { this.logger.debug('Validate method called') this.logger.debug(`Validating API key: ${token.substring(0, 8)}...`) const sshGatewayApiKey = this.configService.getOrThrow('sshGateway.apiKey') if (sshGatewayApiKey === token) { return { role: 'ssh-gateway', } } const proxyApiKey = this.configService.getOrThrow('proxy.apiKey') if (proxyApiKey === token) { return { role: 'proxy', } } const otelCollectorApiKey = this.configService.get('otelCollector.apiKey') if (otelCollectorApiKey && otelCollectorApiKey === token) { return { role: 'otel-collector', } } const healthCheckApiKey = this.configService.get('healthCheck.apiKey') if (healthCheckApiKey && healthCheckApiKey === token) { return { role: 'health-check', } } // Tokens matching JWT structure are not API keys — skip DB lookups and delegate to the JWT strategy. if (JWT_REGEX.test(token)) { return null } try { let apiKey = await this.getApiKeyCache(token) if (!apiKey) { // Cache miss - validate from database apiKey = await this.apiKeyService.getApiKeyByValue(token) this.logger.debug(`API key found for userId: ${apiKey.userId}`) // Check expiry BEFORE caching to prevent storing expired keys if (apiKey.expiresAt && apiKey.expiresAt < new Date()) { throw new UnauthorizedException('This API key has expired') } const validationCacheTtl = this.configService.get('apiKey.validationCacheTtlSeconds') const cacheKey = this.generateValidationCacheKey(token) await this.redis.setex(cacheKey, validationCacheTtl, JSON.stringify(apiKey)) } if (apiKey.expiresAt && apiKey.expiresAt < new Date()) { throw new UnauthorizedException('This API key has expired') } this.logger.debug(`Updating last used timestamp for API key: ${token.substring(0, 8)}...`) await this.apiKeyService.updateLastUsedAt(apiKey.organizationId, apiKey.userId, apiKey.name, new Date()) let userCache = await this.getUserCache(apiKey.userId) if (!userCache) { const user = await this.userService.findOne(apiKey.userId) if (!user) { this.logger.error(`Api key has invalid user: ${apiKey.keySuffix} - ${apiKey.userId}`) throw new UnauthorizedException('User not found') } userCache = { userId: user.id, role: user.role, email: user.email, } const userCacheTtl = this.configService.get('apiKey.userCacheTtlSeconds') await this.redis.setex(this.generateUserCacheKey(apiKey.userId), userCacheTtl, JSON.stringify(userCache)) } const result = { userId: userCache.userId, role: userCache.role, email: userCache.email, apiKey, organizationId: apiKey.organizationId, } this.logger.debug('Authentication successful', result) return result } catch (error) { this.logger.debug('Error checking user API key:', error) // Continue to check runner API keys if user check fails } try { const runner = await this.runnerService.findByApiKey(token) if (runner) { this.logger.debug(`Runner API key found for runner: ${runner.id}`) return { role: 'runner', runnerId: runner.id, runner, } } } catch (error) { this.logger.debug('Error checking runner API key:', error) } try { const region = await this.regionService.findOneByProxyApiKey(token) if (region) { this.logger.debug(`Region proxy API key found for region: ${region.id}`) return { role: 'region-proxy', regionId: region.id, } } } catch (error) { this.logger.debug('Error checking region proxy API key:', error) } try { const region = await this.regionService.findOneBySshGatewayApiKey(token) if (region) { this.logger.debug(`Region SSH gateway API key found for region: ${region.id}`) return { role: 'region-ssh-gateway', regionId: region.id, } } } catch (error) { this.logger.debug('Error checking region SSH gateway API key:', error) } return null } private async getUserCache(userId: string): Promise { try { const userCacheRaw = await this.redis.get(`api-key:user:${userId}`) if (userCacheRaw) { return JSON.parse(userCacheRaw) } return null } catch (error) { this.logger.error('Error getting user cache:', error) return null } } private async getApiKeyCache(token: string): Promise { try { const cacheKey = this.generateValidationCacheKey(token) const cached = await this.redis.get(cacheKey) if (cached) { this.logger.debug('Using cached API key validation') const apiKey = JSON.parse(cached) // Parse Date fields from cached data if (apiKey.createdAt) { apiKey.createdAt = new Date(apiKey.createdAt) } if (apiKey.lastUsedAt) { apiKey.lastUsedAt = new Date(apiKey.lastUsedAt) } if (apiKey.expiresAt) { apiKey.expiresAt = new Date(apiKey.expiresAt) } return apiKey } return null } catch (error) { this.logger.error('Error getting API key cache:', error) return null } } private generateValidationCacheKey(token: string): string { return `api-key:validation:${generateApiKeyHash(token)}` } private generateUserCacheKey(userId: string): string { return `api-key:user:${userId}` } } ================================================ FILE: apps/api/src/auth/auth.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { PassportModule } from '@nestjs/passport' import { JwtStrategy } from './jwt.strategy' import { ApiKeyStrategy } from './api-key.strategy' import { UserModule } from '../user/user.module' import { ApiKeyModule } from '../api-key/api-key.module' import { SandboxModule } from '../sandbox/sandbox.module' import { TypedConfigService } from '../config/typed-config.service' import { HttpModule, HttpService } from '@nestjs/axios' import { OidcMetadata } from 'oidc-client-ts' import { firstValueFrom } from 'rxjs' import { UserService } from '../user/user.service' import { TypedConfigModule } from '../config/typed-config.module' import { catchError, map } from 'rxjs/operators' import { FailedAuthTrackerService } from './failed-auth-tracker.service' import { RegionModule } from '../region/region.module' @Module({ imports: [ PassportModule.register({ defaultStrategy: ['jwt', 'api-key'], property: 'user', session: false, }), TypedConfigModule, UserModule, ApiKeyModule, SandboxModule, RegionModule, HttpModule, ], providers: [ ApiKeyStrategy, { provide: JwtStrategy, useFactory: async (userService: UserService, httpService: HttpService, configService: TypedConfigService) => { if (configService.get('skipConnections')) { return } // Get the OpenID configuration from the issuer const discoveryUrl = `${configService.get('oidc.issuer')}/.well-known/openid-configuration` const metadata = await firstValueFrom( httpService.get(discoveryUrl).pipe( map((response) => response.data as OidcMetadata), catchError((error) => { throw new Error(`Failed to fetch OpenID configuration: ${error.message}`) }), ), ) let jwksUri = metadata.jwks_uri const internalIssuer = configService.getOrThrow('oidc.issuer') const publicIssuer = configService.get('oidc.publicIssuer') if (publicIssuer) { // Replace localhost URLs with Docker network URLs for internal API use jwksUri = metadata.jwks_uri.replace(publicIssuer, internalIssuer) } return new JwtStrategy( { audience: configService.get('oidc.audience'), issuer: metadata.issuer, jwksUri: jwksUri, }, userService, configService, ) }, inject: [UserService, HttpService, TypedConfigService], }, FailedAuthTrackerService, ], exports: [PassportModule, JwtStrategy, ApiKeyStrategy, FailedAuthTrackerService], }) export class AuthModule {} ================================================ FILE: apps/api/src/auth/combined-auth.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, UnauthorizedException } from '@nestjs/common' import { AuthGuard } from '@nestjs/passport' /** * Main authentication guard for the application. * * Strategies are tried in array order. * On first success, the rest are skipped. * * `handleRequest` is invoked once — either when a strategy succeeds or when all strategies fail. * It returns the authenticated user object or throws a generic `UnauthorizedException`. */ @Injectable() export class CombinedAuthGuard extends AuthGuard(['api-key', 'jwt']) { private readonly logger = new Logger(CombinedAuthGuard.name) handleRequest(err: any, user: any) { if (err || !user) { this.logger.debug('Authentication failed', { err, user }) throw new UnauthorizedException('Invalid credentials') } return user } } ================================================ FILE: apps/api/src/auth/constants/jwt-regex.constant.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ /** * Matches the structure of a JWT token: three base64url-encoded segments * separated by dots (header.payload.signature). * * Both header and payload must start with `eyJ` — the base64url encoding of `{"`, * which is guaranteed since both are JSON objects. * * The signature segment has no prefix constraint since it is raw bytes. */ export const JWT_REGEX = /^eyJ[A-Za-z0-9_-]+\.eyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+$/ ================================================ FILE: apps/api/src/auth/failed-auth-tracker.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Inject, Logger } from '@nestjs/common' import { getRedisConnectionToken } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { Request, Response } from 'express' import { ThrottlerException } from '@nestjs/throttler' import { TypedConfigService } from '../config/typed-config.service' import { setRateLimitHeaders } from '../common/utils/rate-limit-headers.util' /** * Service to track failed authentication attempts across all auth guards. * Shared logic for both JWT and API key authentication failures. */ @Injectable() export class FailedAuthTrackerService { private readonly logger = new Logger(FailedAuthTrackerService.name) constructor( @Inject(getRedisConnectionToken('throttler')) private readonly redis: Redis, private readonly configService: TypedConfigService, ) {} async incrementFailedAuth(request: Request, response: Response): Promise { try { const ip = request.ips.length ? request.ips[0] : request.ip const throttlerName = 'failed-auth' const tracker = `${throttlerName}:${ip}` // Get failed-auth config from TypedConfigService const failedAuthConfig = this.configService.get('rateLimit.failedAuth') if (!failedAuthConfig || !failedAuthConfig.ttl || !failedAuthConfig.limit) { // If failed-auth throttler is not configured, skip tracking return } const limit = failedAuthConfig.limit const ttl = failedAuthConfig.ttl * 1000 // Convert seconds to milliseconds const keyPrefix = this.redis.options.keyPrefix || '' const key = `${throttlerName}-${tracker}` const hitKey = `${keyPrefix}{${key}:${throttlerName}}:hits` const blockedKey = `${keyPrefix}{${key}:${throttlerName}}:blocked` // Increment hits const hits = await this.redis.incr(hitKey) if (hits === 1) { await this.redis.pexpire(hitKey, ttl) } const ttlRemaining = await this.redis.pttl(hitKey) // Set rate limit headers setRateLimitHeaders(response, { throttlerName, limit, remaining: Math.max(0, limit - hits), resetSeconds: Math.ceil(ttlRemaining / 1000), }) // Check if blocked if (hits >= limit) { await this.redis.set(blockedKey, '1', 'PX', ttl) setRateLimitHeaders(response, { throttlerName, limit, remaining: 0, resetSeconds: Math.ceil(ttl / 1000), retryAfterSeconds: Math.ceil(ttl / 1000), }) throw new ThrottlerException() } } catch (error) { if (error instanceof ThrottlerException) { throw error } // Log error but don't block auth if rate limiting has issues this.logger.error('Failed to track authentication failure:', error) } } } ================================================ FILE: apps/api/src/auth/get-auth-context.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ExecutionContext, UnauthorizedException } from '@nestjs/common' import { BaseAuthContext } from '../common/interfaces/auth-context.interface' export function getAuthContext( context: ExecutionContext, isFunction: (user: BaseAuthContext) => user is T, ): T { const request = context.switchToHttp().getRequest() if (request.user && isFunction(request.user)) { return request.user } throw new UnauthorizedException('Unauthorized') } ================================================ FILE: apps/api/src/auth/health-check.guard.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger, CanActivate } from '@nestjs/common' import { getAuthContext } from './get-auth-context' import { isHealthCheckContext } from '../common/interfaces/health-check-context.interface' @Injectable() export class HealthCheckGuard implements CanActivate { protected readonly logger = new Logger(HealthCheckGuard.name) async canActivate(context: ExecutionContext): Promise { // Throws if not health check context getAuthContext(context, isHealthCheckContext) return true } } ================================================ FILE: apps/api/src/auth/jwt.strategy.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { PassportStrategy } from '@nestjs/passport' import { ExtractJwt, Strategy } from 'passport-jwt' import { passportJwtSecret } from 'jwks-rsa' import { createRemoteJWKSet, JWTPayload, jwtVerify } from 'jose' import { UserService } from '../user/user.service' import { AuthContext } from '../common/interfaces/auth-context.interface' import { Request } from 'express' import { CustomHeaders } from '../common/constants/header.constants' import { TypedConfigService } from '../config/typed-config.service' interface JwtStrategyConfig { jwksUri: string audience: string issuer: string } @Injectable() export class JwtStrategy extends PassportStrategy(Strategy) { private readonly logger = new Logger(JwtStrategy.name) private JWKS: ReturnType constructor( private readonly options: JwtStrategyConfig, private readonly userService: UserService, private readonly configService: TypedConfigService, ) { super({ secretOrKeyProvider: passportJwtSecret({ cache: true, rateLimit: true, jwksRequestsPerMinute: 5, jwksUri: options.jwksUri, }), jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(), audience: options.audience, issuer: options.issuer, algorithms: ['RS256'], passReqToCallback: true, }) this.JWKS = createRemoteJWKSet(new URL(options.jwksUri)) this.logger.debug('JwtStrategy initialized') } async validate(request: Request, payload: any): Promise { // OKTA does not return the userId in access_token sub claim // real userId is in the uid claim and email is in the sub claim let userId = payload.sub let email = payload.email if (payload.cid && payload.uid) { userId = payload.uid email = payload.sub } let user = await this.userService.findOne(userId) if (user && !user.emailVerified && payload.email_verified) { await this.userService.update(user.id, { emailVerified: payload.email_verified, }) } if (!user) { user = await this.userService.create({ id: userId, name: payload.name || payload.username || 'Unknown', email: email || '', emailVerified: payload.email_verified || false, personalOrganizationQuota: this.configService.getOrThrow('defaultOrganizationQuota'), }) this.logger.debug(`Created new user with ID: ${userId}`) } else if (user.name === 'Unknown' || !user.email) { await this.userService.update(user.id, { name: payload.name || payload.username || 'Unknown', email: email || '', }) this.logger.debug(`Updated name and email address for existing user with ID: ${userId}`) } else if (user.email !== email) { await this.userService.update(user.id, { email: email || '', }) this.logger.debug(`Updated email address for existing user with ID: ${userId}`) } const organizationId = request.get(CustomHeaders.ORGANIZATION_ID.name) return { userId: user.id, role: user.role, email: user.email, organizationId, } } async verifyToken(token: string): Promise { const { payload } = await jwtVerify(token, this.JWKS, { audience: this.options.audience, issuer: this.options.issuer, algorithms: ['RS256'], }) return payload } } ================================================ FILE: apps/api/src/auth/or.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger, CanActivate, Type, mixin } from '@nestjs/common' import { ModuleRef } from '@nestjs/core' /** * Creates an OrGuard that allows access if at least one of the provided guards allows access. * It tries each guard in sequence and returns true on the first successful guard. * If all guards fail, it returns false. * * Usage: * ```typescript * @UseGuards(OrGuard([GuardA, GuardB])) * ``` */ export function OrGuard(guards: Type[]): Type { @Injectable() class OrGuardMixin implements CanActivate { protected readonly logger = new Logger(`OrGuard`) constructor(private readonly moduleRef: ModuleRef) {} async canActivate(context: ExecutionContext): Promise { for (const GuardClass of guards) { try { const guard = this.moduleRef.get(GuardClass, { strict: false }) const result = await guard.canActivate(context) if (result) { this.logger.debug(`Guard ${GuardClass.name} succeeded`) return true } } catch (error) { this.logger.debug(`Guard ${GuardClass.name} failed: ${error.message}`) } } this.logger.debug('All guards in OrGuard failed') return false } } return mixin(OrGuardMixin) } ================================================ FILE: apps/api/src/auth/otel-collector.guard.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger, CanActivate } from '@nestjs/common' import { getAuthContext } from './get-auth-context' import { isOtelCollectorContext } from '../common/interfaces/otel-collector-context.interface' @Injectable() export class OtelCollectorGuard implements CanActivate { protected readonly logger = new Logger(OtelCollectorGuard.name) async canActivate(context: ExecutionContext): Promise { // Throws if not otel collector context getAuthContext(context, isOtelCollectorContext) return true } } ================================================ FILE: apps/api/src/auth/runner-auth.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, Logger } from '@nestjs/common' import { isRunnerContext } from '../common/interfaces/runner-context.interface' import { getAuthContext } from './get-auth-context' @Injectable() export class RunnerAuthGuard implements CanActivate { private readonly logger = new Logger(RunnerAuthGuard.name) async canActivate(context: ExecutionContext): Promise { // Throws if not runner context getAuthContext(context, isRunnerContext) return true } } ================================================ FILE: apps/api/src/auth/system-action.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger, CanActivate } from '@nestjs/common' import { Reflector } from '@nestjs/core' import { RequiredSystemRole, RequiredApiRole } from '../common/decorators/required-role.decorator' import { SystemRole } from '../user/enums/system-role.enum' import { ApiRole, AuthContext } from '../common/interfaces/auth-context.interface' @Injectable() export class SystemActionGuard implements CanActivate { protected readonly logger = new Logger(SystemActionGuard.name) constructor(private readonly reflector: Reflector) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() // TODO: initialize authContext safely const authContext: AuthContext = request.user let requiredRole: SystemRole | SystemRole[] | ApiRole | ApiRole[] = this.reflector.get(RequiredSystemRole, context.getHandler()) || this.reflector.get(RequiredSystemRole, context.getClass()) if (!requiredRole) { requiredRole = this.reflector.get(RequiredApiRole, context.getHandler()) || this.reflector.get(RequiredApiRole, context.getClass()) if (!requiredRole) { return true } } if (!Array.isArray(requiredRole)) { requiredRole = [requiredRole] } return (requiredRole as string[]).includes(authContext.role as string) } } ================================================ FILE: apps/api/src/clickhouse/clickhouse.module.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module, Global } from '@nestjs/common' import { ClickHouseService } from './clickhouse.service' @Global() @Module({ providers: [ClickHouseService], exports: [ClickHouseService], }) export class ClickHouseModule {} ================================================ FILE: apps/api/src/clickhouse/clickhouse.service.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnModuleDestroy } from '@nestjs/common' import { createClient, ClickHouseClient } from '@clickhouse/client' import { TypedConfigService } from '../config/typed-config.service' @Injectable() export class ClickHouseService implements OnModuleDestroy { private readonly logger = new Logger(ClickHouseService.name) private client: ClickHouseClient | null = null constructor(private readonly configService: TypedConfigService) {} private getClient(): ClickHouseClient | null { if (this.client) { return this.client } const config = this.configService.getClickHouseConfig() if (!config) { return null } this.client = createClient({ url: config.url, username: config.username, password: config.password, database: config.database, }) return this.client } async onModuleDestroy() { if (this.client) { await this.client.close() } } isConfigured(): boolean { return this.configService.getClickHouseConfig() !== null } async query(query: string, params?: Record): Promise { const client = this.getClient() if (!client) { this.logger.warn('ClickHouse is not configured') return [] } try { const result = await client.query({ query, query_params: params, format: 'JSONEachRow', clickhouse_settings: { date_time_input_format: 'best_effort', }, }) return (await result.json()) as T[] } catch (error) { this.logger.error('ClickHouse query failed:', error) throw error } } async queryOne(query: string, params?: Record): Promise { const results = await this.query(query, params) return results.length > 0 ? results[0] : null } } ================================================ FILE: apps/api/src/clickhouse/index.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export * from './clickhouse.module' export * from './clickhouse.service' ================================================ FILE: apps/api/src/common/constants/constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const SANDBOX_EVENT_CHANNEL = 'sandbox.event.channel' ================================================ FILE: apps/api/src/common/constants/error-messages.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const UPGRADE_TIER_MESSAGE = (dashboardUrl: string) => `To increase concurrency limits, upgrade your organization's Tier by visiting ${dashboardUrl}/limits.` export const ARCHIVE_SANDBOXES_MESSAGE = 'Consider archiving your unused Sandboxes to free up available storage.' export const PER_SANDBOX_LIMIT_MESSAGE = 'Need higher resource limits per-sandbox? Contact us at support@daytona.io and let us know about your use case.' ================================================ FILE: apps/api/src/common/constants/feature-flags.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const FeatureFlags = { ORGANIZATION_INFRASTRUCTURE: 'organization_infrastructure', SANDBOX_RESIZE: 'sandbox_resize', } as const ================================================ FILE: apps/api/src/common/constants/header.constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const CustomHeaders: { [key: string]: { name: string description?: string required?: boolean schema?: { type?: string } } } = { ORGANIZATION_ID: { name: 'X-Daytona-Organization-ID', description: 'Use with JWT to specify the organization ID', required: false, schema: { type: 'string', }, }, SOURCE: { name: 'X-Daytona-Source', description: 'Use to specify the source of the request', required: false, schema: { type: 'string', }, }, SDK_VERSION: { name: 'X-Daytona-SDK-Version', description: 'Use to specify the version of the SDK', required: false, schema: { type: 'string', }, }, } ================================================ FILE: apps/api/src/common/decorators/auth-context.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { createParamDecorator, ExecutionContext } from '@nestjs/common' export const AuthContext = createParamDecorator((data: unknown, ctx: ExecutionContext) => { const request = ctx.switchToHttp().getRequest() return request.user }) ================================================ FILE: apps/api/src/common/decorators/autocommit-offset.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { KafkaContext } from '@nestjs/microservices' /** * Auto commit offset decorator for Kafka messages. The offset is committed only if the method completes successfully. * @returns A decorator function that commits the offset of the Kafka message. */ export function AutoCommitOffset(): MethodDecorator { return function (target: any, propertyKey: string | symbol, descriptor: PropertyDescriptor): PropertyDescriptor { const originalMethod = descriptor.value descriptor.value = async function (...args: any[]) { const result = await originalMethod.apply(this, args) // Find KafkaContext in arguments const context = args.find((arg) => arg instanceof KafkaContext) if (context) { const message = context.getMessage() const partition = context.getPartition() const topic = context.getTopic() const consumer = context.getConsumer() await consumer.commitOffsets([ { topic, partition, offset: String(Number(message.offset) + 1), }, ]) } return result } return descriptor } } ================================================ FILE: apps/api/src/common/decorators/distributed-lock.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { RedisLockProvider } from '../../sandbox/common/redis-lock.provider' type DistributedLockOptions = { lockKey?: string lockTtl?: number } /** * Redis lock decorator for exclusive execution. The lock is released automatically when the method completes. * redisLockProvider is required to be injected in the class. * @param options - The options for the Redis lock * @param options.lockKey - The key to use for the Redis lock * @param options.lockTtl - Time to live for the lock in seconds * @returns A decorator function that handles Redis locking */ export function DistributedLock(options?: DistributedLockOptions): MethodDecorator { return function (target: any, propertyKey: string, descriptor: PropertyDescriptor) { const originalMethod = descriptor.value descriptor.value = async function (...args: any[]) { if (!this.redisLockProvider) { throw new Error(`@DistributedLock requires 'redisLockProvider' property on ${target.constructor.name}`) } const redisLockProvider: RedisLockProvider = this.redisLockProvider // Generate lock key const lockKey = `lock:${options?.lockKey ?? target.constructor.name}.${propertyKey}` // Set default TTL if not provided const lockTtlMs = options?.lockTtl || 30 // 30 seconds default const hasLock = await redisLockProvider.lock(lockKey, lockTtlMs) if (!hasLock) { return } try { return await originalMethod.apply(this, args) } finally { await redisLockProvider.unlock(lockKey) } } } } ================================================ FILE: apps/api/src/common/decorators/log-execution.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logger } from '@nestjs/common' // Parse threshold once at module load time let LOG_THRESHOLD = parseInt(process.env.LOG_EXECUTION_THRESHOLD_MILLISECONDS, 10) if (isNaN(LOG_THRESHOLD) || LOG_THRESHOLD <= 0) { LOG_THRESHOLD = 1000 // Default to 1000ms if not set or invalid } export function LogExecution(name?: string) { return function (target: any, propertyKey: string, descriptor: PropertyDescriptor) { const shouldLogExecutions = process.env.LOG_EXECUTIONS === 'true' if (!shouldLogExecutions) { return descriptor } // Wrap the original method with logging const originalMethod = descriptor.value const logger = new Logger(`Function:${target.constructor.name}`) descriptor.value = async function (...args: any[]) { const startTime = Date.now() const functionName = name || propertyKey try { const result = await originalMethod.apply(this, args) const duration = Date.now() - startTime if (duration > LOG_THRESHOLD) { logger.warn(`Function ${functionName} took a long time: ${duration}ms`) } return result } catch (error) { const duration = Date.now() - startTime logger.error(`Failed function: ${functionName} (took ${duration}ms)`, error.stack) throw error } } return descriptor } } ================================================ FILE: apps/api/src/common/decorators/on-async-event.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { OnEvent, OnEventMetadata } from '@nestjs/event-emitter' export function OnAsyncEvent({ event, options = {} }: OnEventMetadata): MethodDecorator { return OnEvent(event, { ...options, promisify: true, suppressErrors: false, }) } ================================================ FILE: apps/api/src/common/decorators/otel.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { trace, context, metrics, SpanStatusCode, Histogram } from '@opentelemetry/api' import { applyDecorators } from '@nestjs/common' // Lazy initialization to ensure SDK is started before getting tracer/meter const getTracer = () => trace.getTracer('') const getMeter = () => metrics.getMeter('') const executionHistograms = new Map() /** * Configuration options for span instrumentation */ export interface SpanConfig { /** * Custom name for the span. If not provided, uses `ClassName.methodName` format */ name?: string /** * Additional attributes to attach to the span */ attributes?: Record } /** * Configuration options for metric instrumentation */ export interface MetricConfig { /** * Custom name for the metric. If not provided, uses `ClassName.methodName` format */ name?: string /** * Description for the metrics being collected */ description?: string /** * Additional labels to attach to the metrics */ labels?: Record } /** * Configuration options for the combined instrumentation decorator */ export interface InstrumentationConfig { /** * Custom name for the span and metric. If not provided, uses `ClassName.methodName` format */ name?: string /** * Description for the metrics being collected */ description?: string /** * Additional labels/attributes to attach to spans and metrics */ labels?: Record /** * Enable trace collection (default: true) */ enableTraces?: boolean /** * Enable metrics collection (default: true) */ enableMetrics?: boolean } /** * Converts a string to snake_case for Prometheus-friendly metric names */ function toSnakeCase(str: string): string { return str .replace(/([A-Z])/g, '_$1') .toLowerCase() .replace(/^_/, '') .replace(/\./g, '_') } /** * Decorator for instrumenting methods with OpenTelemetry spans (traces only) * * @param config - Configuration object or string name for the span * */ export function WithSpan(config?: string | SpanConfig) { return (target: object, propertyKey: string | symbol, descriptor: PropertyDescriptor) => { const originalMethod = descriptor.value const methodName = String(propertyKey) descriptor.value = async function (...args: any[]) { const cfg: SpanConfig = typeof config === 'string' ? { name: config } : config || {} const { name, attributes = {} } = cfg const spanName = name || `${target.constructor.name}.${methodName}` const allAttributes = { component: target.constructor.name, method: methodName, ...attributes, } const span = getTracer().startSpan( spanName, { attributes: allAttributes, }, context.active(), ) return context.with(trace.setSpan(context.active(), span), async () => { try { const result = await originalMethod.apply(this, args) span.setStatus({ code: SpanStatusCode.OK }) return result } catch (error) { span.setStatus({ code: SpanStatusCode.ERROR, message: error instanceof Error ? error.message : String(error), }) span.recordException(error instanceof Error ? error : new Error(String(error))) throw error } finally { span.end() } }) } } } /** * Decorator for instrumenting methods with OpenTelemetry metrics (metrics only) * * Collects two metrics: * - Counter: `{name}_executions` - tracks number of executions with status (success/error) * - Histogram: `{name}_duration` - tracks execution duration in milliseconds * * @param config - Configuration object or string name for the metric * */ export function WithMetric(config?: string | MetricConfig) { return (target: object, propertyKey: string | symbol, descriptor: PropertyDescriptor) => { const originalMethod = descriptor.value const methodName = String(propertyKey) descriptor.value = async function (...args: any[]) { const cfg: MetricConfig = typeof config === 'string' ? { name: config } : config || {} const { name, description, labels = {} } = cfg const metricName = toSnakeCase(name || `${target.constructor.name}.${methodName}`) const allLabels = { component: target.constructor.name, method: methodName, ...labels, } // Get or create histogram for this method if (!executionHistograms.has(metricName)) { executionHistograms.set( metricName, getMeter().createHistogram(`${metricName}_duration`, { description: description || `Duration of executions for ${metricName}`, unit: 'ms', }), ) } const histogram = executionHistograms.get(metricName) if (!histogram) { throw new Error(`Histogram not found for metric: ${metricName}`) } const startTime = Date.now() let status: 'success' | 'error' = 'success' try { const result = await originalMethod.apply(this, args) return result } catch (error) { status = 'error' throw error } finally { const duration = Date.now() - startTime histogram.record(duration, { ...allLabels, status }) } } } } /** * Decorator for instrumenting methods with both OpenTelemetry traces and metrics * * This decorator composes @WithSpan and @WithMetric to provide both trace and metric collection. * You can selectively enable/disable traces or metrics using the config options. * * @param config - Configuration object or string name for the instrumentation */ export function WithInstrumentation(config?: string | InstrumentationConfig): MethodDecorator { const cfg: InstrumentationConfig = typeof config === 'string' ? { name: config } : config || {} const { enableTraces = true, enableMetrics = true, name, description, labels } = cfg const decorators: MethodDecorator[] = [] if (enableTraces) { decorators.push(WithSpan({ name, attributes: labels })) } if (enableMetrics) { decorators.push(WithMetric({ name, description, labels })) } return applyDecorators(...decorators) } ================================================ FILE: apps/api/src/common/decorators/page-limit.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { applyDecorators } from '@nestjs/common' import { ApiProperty } from '@nestjs/swagger' import { IsOptional, IsInt, Min, Max } from 'class-validator' import { Type } from 'class-transformer' export function PageLimit(defaultValue = 100) { return applyDecorators( ApiProperty({ name: 'limit', description: 'Number of results per page', required: false, type: Number, minimum: 1, maximum: 200, default: defaultValue, }), IsOptional(), Type(() => Number), IsInt(), Min(1), Max(200), ) } ================================================ FILE: apps/api/src/common/decorators/page-number.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { applyDecorators } from '@nestjs/common' import { ApiProperty } from '@nestjs/swagger' import { IsOptional, IsInt, Min } from 'class-validator' import { Type } from 'class-transformer' export function PageNumber(defaultValue = 1) { return applyDecorators( ApiProperty({ name: 'page', description: 'Page number of the results', required: false, type: Number, minimum: 1, default: defaultValue, }), IsOptional(), Type(() => Number), IsInt(), Min(1), ) } ================================================ FILE: apps/api/src/common/decorators/required-role.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Reflector } from '@nestjs/core' import { SystemRole } from '../../user/enums/system-role.enum' import { ApiRole } from '../interfaces/auth-context.interface' export const RequiredSystemRole = Reflector.createDecorator() export const RequiredApiRole = Reflector.createDecorator() ================================================ FILE: apps/api/src/common/decorators/runner-context.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { createParamDecorator, ExecutionContext } from '@nestjs/common' import { RunnerContext } from '../interfaces/runner-context.interface' export const RunnerContextDecorator = createParamDecorator((data: unknown, ctx: ExecutionContext): RunnerContext => { const request = ctx.switchToHttp().getRequest() return request.user as RunnerContext }) ================================================ FILE: apps/api/src/common/decorators/throttler-scope.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SetMetadata } from '@nestjs/common' export const THROTTLER_SCOPE_KEY = 'throttler:scope' /** * Marks a route or controller with specific throttler scopes. * Only the specified throttlers will be applied to this route. * The 'authenticated' throttler always applies to authenticated routes. * * @example * // Apply sandbox-create throttler * @ThrottlerScope('sandbox-create') * @Post() * createSandbox() {} * * @example * // Apply multiple throttlers * @ThrottlerScope('sandbox-create', 'sandbox-lifecycle') * @Post() * createAndStart() {} */ export const ThrottlerScope = (...scopes: string[]) => SetMetadata(THROTTLER_SCOPE_KEY, scopes) ================================================ FILE: apps/api/src/common/decorators/to-array.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Transform } from 'class-transformer' /** * Decorator that transforms a value to an array. Useful for query parameters that can be a single value or an array of values. * * If the value is a primitive, it will return a single element array. If the value is already an array, it will return it as is. */ export function ToArray() { return Transform(({ value }) => { return value ? (Array.isArray(value) ? value : [value]) : undefined }) } ================================================ FILE: apps/api/src/common/decorators/track-job-execution.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ /** * Track job execution in activeJobs set. * @returns A decorator function that tracks execution of a job. */ export function TrackJobExecution() { return function (target: any, propertyKey: string, descriptor: PropertyDescriptor) { const original = descriptor.value descriptor.value = async function (...args: any[]) { if (!this.activeJobs) { throw new Error(`@TrackExecution requires 'activeJobs' property on ${target.constructor.name}`) } this.activeJobs.add(propertyKey) try { return await original.apply(this, args) } finally { this.activeJobs.delete(propertyKey) } } } } ================================================ FILE: apps/api/src/common/dto/url.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString } from 'class-validator' @ApiSchema({ name: 'Url' }) export class UrlDto { @ApiProperty({ description: 'URL response', }) @IsString() url: string constructor(url: string) { this.url = url } } ================================================ FILE: apps/api/src/common/guards/anonymous-rate-limit.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext } from '@nestjs/common' import { Reflector } from '@nestjs/core' import { ThrottlerGuard, ThrottlerModuleOptions, ThrottlerRequest, ThrottlerStorage } from '@nestjs/throttler' import { Request } from 'express' @Injectable() export class AnonymousRateLimitGuard extends ThrottlerGuard { constructor(options: ThrottlerModuleOptions, storageService: ThrottlerStorage, reflector: Reflector) { super(options, storageService, reflector) } protected async getTracker(req: Request): Promise { // For anonymous requests, use IP address as tracker const ip = req.ips.length ? req.ips[0] : req.ip return `anonymous:${ip}` } protected generateKey(context: ExecutionContext, suffix: string, name: string): string { // Override to make rate limiting per-rate-limit-type, not per-route // This ensures all routes share the same counter for anonymous rate limiting return `${name}-${suffix}` } async handleRequest(requestProps: ThrottlerRequest): Promise { const { throttler } = requestProps // Apply anonymous throttler to ALL requests (with or without Bearer tokens) // This ensures we catch invalid/malicious tokens before they reach authentication if (throttler.name === 'anonymous') { return super.handleRequest(requestProps) } // Skip other throttlers in this guard return true } } ================================================ FILE: apps/api/src/common/guards/authenticated-rate-limit.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, Inject, ExecutionContext, Optional } from '@nestjs/common' import { ThrottlerGuard, ThrottlerRequest, ThrottlerModuleOptions, ThrottlerStorage } from '@nestjs/throttler' import { Reflector } from '@nestjs/core' import { Request } from 'express' import { getRedisConnectionToken } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { OrganizationService } from '../../organization/services/organization.service' import { THROTTLER_SCOPE_KEY } from '../decorators/throttler-scope.decorator' @Injectable() export class AuthenticatedRateLimitGuard extends ThrottlerGuard { private readonly logger = new Logger(AuthenticatedRateLimitGuard.name) constructor( options: ThrottlerModuleOptions, storageService: ThrottlerStorage, reflector: Reflector, @Inject(getRedisConnectionToken('throttler')) private readonly redis: Redis, @Optional() private readonly organizationService?: OrganizationService, ) { super(options, storageService, reflector) } protected async getTracker(req: Request): Promise { const user = req.user as any // Track by organization ID when available (shared quota per org) if (user?.organizationId) { return `auth:org:${user.organizationId}` } // Fallback to user ID for non-org routes (e.g., /users/me) if (user?.userId) { return `auth:user:${user.userId}` } // Ultimate fallback (shouldn't happen in normal flow) const ip = req.ips.length ? req.ips[0] : req.ip return `fallback:${ip}` } protected generateKey(context: ExecutionContext, suffix: string, name: string): string { // Override to make rate limiting per-rate-limit-type, not per-route // This ensures all routes share the same counter per rate limit type (authenticated, sandbox-create, sandbox-lifecycle) return `${name}-${suffix}` } async handleRequest(requestProps: ThrottlerRequest): Promise { const { context, throttler } = requestProps const request = context.switchToHttp().getRequest() const isAuthenticated = request.user && this.isValidAuthContext(request.user) // Skip rate limiting for M2M system roles (checked AFTER auth runs) if (this.isSystemRole(request.user)) { return true } // Skip anonymous throttler (handled by AnonymousRateLimitGuard on public routes) if (throttler.name === 'anonymous') { return true } // Skip failed-auth throttler (handled by FailedAuthRateLimitMiddleware and auth guards) if (throttler.name === 'failed-auth') { return true } // Check authenticated throttlers const authenticatedThrottlers = ['authenticated', 'sandbox-create', 'sandbox-lifecycle'] if (authenticatedThrottlers.includes(throttler.name)) { if (isAuthenticated) { // Only 'authenticated' applies to all routes by default // 'sandbox-create' and 'sandbox-lifecycle' only apply if explicitly configured via @SkipThrottle or @Throttle const isDefaultThrottler = throttler.name === 'authenticated' if (!isDefaultThrottler) { // Sandbox throttlers (sandbox-create, sandbox-lifecycle) are opt-in only // Check if this route declares this throttler scope via @ThrottlerScope() decorator const scopes = this.reflector.getAllAndOverride(THROTTLER_SCOPE_KEY, [ context.getHandler(), context.getClass(), ]) // If the route hasn't declared this throttler in its scope, skip it if (!scopes || !scopes.includes(throttler.name)) { return true } } const user = request.user as any const orgId = user?.organizationId if (orgId) { const orgLimits = await this.getCachedOrganizationRateLimits(orgId) if (orgLimits) { const customLimit = throttler.name === 'authenticated' ? orgLimits.authenticated : throttler.name === 'sandbox-create' ? orgLimits.sandboxCreate : throttler.name === 'sandbox-lifecycle' ? orgLimits.sandboxLifecycle : undefined const customTtlSeconds = throttler.name === 'authenticated' ? orgLimits.authenticatedTtlSeconds : throttler.name === 'sandbox-create' ? orgLimits.sandboxCreateTtlSeconds : throttler.name === 'sandbox-lifecycle' ? orgLimits.sandboxLifecycleTtlSeconds : undefined if (customLimit != null || customTtlSeconds != null) { const modifiedProps = { ...requestProps, ...(customLimit != null && { limit: customLimit }), ...(customTtlSeconds != null && { ttl: customTtlSeconds * 1000, blockDuration: customTtlSeconds * 1000, }), } return super.handleRequest(modifiedProps) } } } return super.handleRequest(requestProps) } return true } // For any other throttlers, defer to base ThrottlerGuard if (isAuthenticated) { return super.handleRequest(requestProps) } return true } private isValidAuthContext(user: any): boolean { return user && (user.userId || user.role) } private isSystemRole(user: any): boolean { // Skip rate limiting for M2M system roles (proxy, runner, ssh-gateway) return user?.role === 'ssh-gateway' || user?.role === 'proxy' || user?.role === 'runner' } private async getCachedOrganizationRateLimits(organizationId: string): Promise<{ authenticated: number | null sandboxCreate: number | null sandboxLifecycle: number | null authenticatedTtlSeconds: number | null sandboxCreateTtlSeconds: number | null sandboxLifecycleTtlSeconds: number | null } | null> { // If OrganizationService is not available (e.g., in UserModule), use default rate limits if (!this.organizationService) { return null } try { const cacheKey = `organization:rate-limits:${organizationId}` const cachedLimits = await this.redis.get(cacheKey) if (cachedLimits) { return JSON.parse(cachedLimits) } const organization = await this.organizationService.findOne(organizationId) if (organization) { const limits = { authenticated: organization.authenticatedRateLimit, sandboxCreate: organization.sandboxCreateRateLimit, sandboxLifecycle: organization.sandboxLifecycleRateLimit, authenticatedTtlSeconds: organization.authenticatedRateLimitTtlSeconds, sandboxCreateTtlSeconds: organization.sandboxCreateRateLimitTtlSeconds, sandboxLifecycleTtlSeconds: organization.sandboxLifecycleRateLimitTtlSeconds, } await this.redis.set(cacheKey, JSON.stringify(limits), 'EX', 60) return limits } return null } catch (error) { this.logger.error('Error getting cached organization rate limits:', error) return null } } } ================================================ FILE: apps/api/src/common/interceptors/content-type.interceptors.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NestInterceptor, ExecutionContext, CallHandler, Logger } from '@nestjs/common' import { Observable } from 'rxjs' @Injectable() export class ContentTypeInterceptor implements NestInterceptor { private readonly logger = new Logger(ContentTypeInterceptor.name) async intercept(context: ExecutionContext, next: CallHandler): Promise> { const request = context.switchToHttp().getRequest() // Check if we have raw body data but no parsed body if (request.readable) { // Create a promise to handle the body parsing await new Promise((resolve, reject) => { let rawBody = '' // Collect the raw body data request.on('data', (chunk: Buffer) => { rawBody += chunk.toString() }) // Once we have all the data, try to parse it as JSON request.on('end', () => { try { if (rawBody) { request.body = JSON.parse(rawBody) request.headers['content-type'] = 'application/json' } resolve() } catch (e) { this.logger.error('Failed to parse JSON body:', e) resolve() // Still resolve even on error to prevent hanging } }) // Handle potential errors request.on('error', (error) => { this.logger.error('Error reading request body:', error) reject(error) }) }) } // Add Content-Type header if it's missing and there's a request body if (request.body && Object.keys(request.body).length > 0 && !request.get('content-type')) { request.headers['content-type'] = 'application/json' } return next.handle() } } ================================================ FILE: apps/api/src/common/interfaces/auth-context.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiKey } from '../../api-key/api-key.entity' import { OrganizationUser } from '../../organization/entities/organization-user.entity' import { Organization } from '../../organization/entities/organization.entity' import { SystemRole } from '../../user/enums/system-role.enum' import { ProxyContext } from './proxy-context.interface' import { RunnerContext } from './runner-context.interface' import { SshGatewayContext } from './ssh-gateway-context.interface' import { RegionProxyContext } from './region-proxy.interface' import { RegionSSHGatewayContext } from './region-ssh-gateway.interface' import { OtelCollectorContext } from './otel-collector-context.interface' import { HealthCheckContext } from './health-check-context.interface' export interface BaseAuthContext { role: ApiRole } export type ApiRole = | SystemRole | 'proxy' | 'runner' | 'ssh-gateway' | 'region-proxy' | 'region-ssh-gateway' | 'otel-collector' | 'health-check' export interface AuthContext extends BaseAuthContext { userId: string email: string apiKey?: ApiKey organizationId?: string runnerId?: string } export function isAuthContext(user: BaseAuthContext): user is AuthContext { return 'userId' in user } export interface OrganizationAuthContext extends AuthContext { organizationId: string organization: Organization organizationUser?: OrganizationUser } export function isOrganizationAuthContext(user: BaseAuthContext): user is OrganizationAuthContext { return 'organizationId' in user } export type AuthContextType = | AuthContext | OrganizationAuthContext | ProxyContext | RunnerContext | SshGatewayContext | RegionProxyContext | RegionSSHGatewayContext | OtelCollectorContext | HealthCheckContext ================================================ FILE: apps/api/src/common/interfaces/health-check-context.interface.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' export interface HealthCheckContext extends BaseAuthContext { role: 'health-check' } export function isHealthCheckContext(user: BaseAuthContext): user is HealthCheckContext { return 'role' in user && user.role === 'health-check' } ================================================ FILE: apps/api/src/common/interfaces/otel-collector-context.interface.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' export interface OtelCollectorContext extends BaseAuthContext { role: 'otel-collector' } export function isOtelCollectorContext(user: BaseAuthContext): user is OtelCollectorContext { return 'role' in user && user.role === 'otel-collector' } ================================================ FILE: apps/api/src/common/interfaces/otel-config.interface.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface OTELConfig { enabled: boolean endpoint: string headers: Record } ================================================ FILE: apps/api/src/common/interfaces/paginated-list.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface PaginatedList { items: T[] total: number page: number totalPages: number nextToken?: string } ================================================ FILE: apps/api/src/common/interfaces/proxy-context.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' export interface ProxyContext extends BaseAuthContext { role: 'proxy' } export function isProxyContext(user: BaseAuthContext): user is ProxyContext { return 'role' in user && user.role === 'proxy' } ================================================ FILE: apps/api/src/common/interfaces/region-proxy.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' export interface RegionProxyContext extends BaseAuthContext { role: 'region-proxy' regionId: string } export function isRegionProxyContext(user: BaseAuthContext): user is RegionProxyContext { return 'role' in user && user.role === 'region-proxy' } ================================================ FILE: apps/api/src/common/interfaces/region-ssh-gateway.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' export interface RegionSSHGatewayContext extends BaseAuthContext { role: 'region-ssh-gateway' regionId: string } export function isRegionSSHGatewayContext(user: BaseAuthContext): user is RegionSSHGatewayContext { return 'role' in user && user.role === 'region-ssh-gateway' } ================================================ FILE: apps/api/src/common/interfaces/runner-context.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' import { Runner } from '../../sandbox/entities/runner.entity' export interface RunnerContext extends BaseAuthContext { role: 'runner' runnerId: string runner: Runner } export function isRunnerContext(user: BaseAuthContext): user is RunnerContext { return 'role' in user && user.role === 'runner' } ================================================ FILE: apps/api/src/common/interfaces/ssh-gateway-context.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BaseAuthContext } from './auth-context.interface' export interface SshGatewayContext extends BaseAuthContext { role: 'ssh-gateway' } export function isSshGatewayContext(user: BaseAuthContext): user is SshGatewayContext { return 'role' in user && user.role === 'ssh-gateway' } ================================================ FILE: apps/api/src/common/interfaces/trackable-job-executions.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface TrackableJobExecutions { activeJobs: Set } ================================================ FILE: apps/api/src/common/middleware/failed-auth-rate-limit.middleware.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NestMiddleware, Inject, Logger } from '@nestjs/common' import { Request, Response, NextFunction } from 'express' import { ThrottlerException } from '@nestjs/throttler' import { getRedisConnectionToken } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { TypedConfigService } from '../../config/typed-config.service' import { setRateLimitHeaders } from '../utils/rate-limit-headers.util' /** * Middleware that checks if an IP is blocked due to too many failed auth attempts. * Runs BEFORE auth guards to block requests early and prevent wasting resources on auth. * * Flow: * 1. Request comes in * 2. This middleware checks Redis if IP has exceeded failed auth limit (isBlocked) * 3. If blocked: return 429 with rate limit headers immediately * 4. If not blocked: continue to auth guards */ @Injectable() export class FailedAuthRateLimitMiddleware implements NestMiddleware { private readonly logger = new Logger(FailedAuthRateLimitMiddleware.name) constructor( @Inject(getRedisConnectionToken('throttler')) private readonly redis: Redis, private readonly configService: TypedConfigService, ) {} async use(req: Request, res: Response, next: NextFunction) { const ip = req.ips.length ? req.ips[0] : req.ip const throttlerName = 'failed-auth' const tracker = `${throttlerName}:${ip}` // Get failed-auth config from TypedConfigService const failedAuthConfig = this.configService.get('rateLimit.failedAuth') if (!failedAuthConfig || !failedAuthConfig.ttl || !failedAuthConfig.limit) { // If failed-auth throttler is not configured, skip return next() } try { // Build the Redis key (same format as ThrottlerStorageRedisService) const keyPrefix = this.redis.options.keyPrefix || '' const key = `${throttlerName}-${tracker}` const blockedKey = `${keyPrefix}{${key}:${throttlerName}}:blocked` // Check if IP is blocked const isBlocked = await this.redis.get(blockedKey) if (isBlocked) { // Get TTL for the blocked key const ttl = await this.redis.pttl(blockedKey) const ttlSeconds = Math.ceil(ttl / 1000) // Set rate limit headers to inform client setRateLimitHeaders(res, { throttlerName, limit: failedAuthConfig.limit, remaining: 0, resetSeconds: ttlSeconds, retryAfterSeconds: ttlSeconds, }) throw new ThrottlerException() } // Not blocked, continue to auth guards next() } catch (error) { if (error instanceof ThrottlerException) { throw error } // If there's an error checking the rate limit, log it and allow the request to continue // We don't want rate limiting failures to block legitimate requests this.logger.error('Failed to check failed-auth rate limit:', error) next() } } } ================================================ FILE: apps/api/src/common/middleware/maintenance.middleware.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NestMiddleware, HttpException, HttpStatus } from '@nestjs/common' import { Request, Response, NextFunction } from 'express' import { TypedConfigService } from '../../config/typed-config.service' @Injectable() export class MaintenanceMiddleware implements NestMiddleware { constructor(private readonly configService: TypedConfigService) {} use(req: Request, res: Response, next: NextFunction) { const isMaintenanceMode = this.configService.get('maintananceMode') if (isMaintenanceMode) { throw new HttpException( { statusCode: HttpStatus.SERVICE_UNAVAILABLE, message: 'Service is currently under maintenance. Please try again later.', error: 'Service Unavailable', }, HttpStatus.SERVICE_UNAVAILABLE, ) } next() } } ================================================ FILE: apps/api/src/common/middleware/version-header.middleware.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NestMiddleware } from '@nestjs/common' import { Request, Response, NextFunction } from 'express' import { TypedConfigService } from '../../config/typed-config.service' @Injectable() export class VersionHeaderMiddleware implements NestMiddleware { private readonly version: string | undefined constructor(private readonly configService: TypedConfigService) { this.version = this.configService.get('version') } use(req: Request, res: Response, next: NextFunction) { if (this.version) { res.setHeader('X-Daytona-Api-Version', `${this.version}`) } next() } } ================================================ FILE: apps/api/src/common/modules/body-parser-error.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module, OnModuleInit, BadRequestException } from '@nestjs/common' import { HttpAdapterHost } from '@nestjs/core' import { Request, Response, NextFunction } from 'express' @Module({}) export class BodyParserErrorModule implements OnModuleInit { constructor(private readonly httpAdapterHost: HttpAdapterHost) {} onModuleInit() { const app = this.httpAdapterHost.httpAdapter.getInstance() app.use((err: Error & { body?: unknown }, req: Request, res: Response, next: NextFunction) => { if (err instanceof SyntaxError && 'body' in err) { const response = new BadRequestException('Invalid JSON in request body').getResponse() return res.status(400).json(response) } next(err) }) } } ================================================ FILE: apps/api/src/common/providers/openfeature-posthog.provider.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import type { EvaluationContext, Provider, ResolutionDetails, Hook, JsonValue, Logger } from '@openfeature/server-sdk' import { TypeMismatchError, StandardResolutionReasons, ErrorCode } from '@openfeature/server-sdk' import { PostHog } from 'posthog-node' import type { PostHogOptions } from 'posthog-node' export interface OpenFeaturePostHogProviderConfig { /** PostHog project API key (starts with phc_) - if not provided, all flags return default values */ apiKey?: string /** Optional PostHog client options */ clientOptions?: PostHogOptions /** Whether to evaluate flags locally (default: false) */ evaluateLocally?: boolean } export class OpenFeaturePostHogProvider implements Provider { readonly metadata = { name: 'simple-posthog-provider', } as const private readonly client?: PostHog private readonly evaluateLocally: boolean private readonly isConfigured: boolean constructor(config: OpenFeaturePostHogProviderConfig = {}) { this.evaluateLocally = config.evaluateLocally ?? false this.isConfigured = !!config.apiKey if (config.apiKey) { try { this.client = new PostHog(config.apiKey, config.clientOptions) } catch (error) { console.warn('Failed to initialize PostHog client:', error) } } } async resolveBooleanEvaluation( flagKey: string, defaultValue: boolean, context: EvaluationContext, logger: Logger, ): Promise> { logger.debug(`Evaluating flag ${flagKey} with context and default value:`, context, defaultValue) const result = await this.evaluateFlag(flagKey, defaultValue, context, logger) if (typeof result.value === 'boolean') { return result as ResolutionDetails } throw new TypeMismatchError(`Flag ${flagKey} expected boolean, got ${typeof result.value}`) } async resolveStringEvaluation( flagKey: string, defaultValue: string, context: EvaluationContext, logger: Logger, ): Promise> { const result = await this.evaluateFlag(flagKey, defaultValue, context, logger) if (typeof result.value === 'string') { return result as ResolutionDetails } throw new TypeMismatchError(`Flag ${flagKey} expected string, got ${typeof result.value}`) } async resolveNumberEvaluation( flagKey: string, defaultValue: number, context: EvaluationContext, logger: Logger, ): Promise> { const result = await this.evaluateFlag(flagKey, defaultValue, context, logger) if (typeof result.value === 'number') { return result as ResolutionDetails } throw new TypeMismatchError(`Flag ${flagKey} expected number, got ${typeof result.value}`) } async resolveObjectEvaluation( flagKey: string, defaultValue: T, context: EvaluationContext, logger: Logger, ): Promise> { // If PostHog is not configured, return default value if (!this.isConfigured || !this.client) { logger.debug(`PostHog not configured, returning default value for flag ${flagKey}`) return { value: defaultValue, reason: StandardResolutionReasons.DEFAULT, } } const targetingKey = this.getTargetingKey(context) if (!targetingKey) { return { value: defaultValue, reason: StandardResolutionReasons.ERROR, errorCode: ErrorCode.GENERAL, } } try { const flagContext = this.buildFlagContext(context) const payload = await this.client.getFeatureFlagPayload(flagKey, targetingKey, undefined, { onlyEvaluateLocally: this.evaluateLocally, sendFeatureFlagEvents: true, ...flagContext, }) if (payload === undefined) { return { value: defaultValue, reason: StandardResolutionReasons.DEFAULT, errorCode: ErrorCode.FLAG_NOT_FOUND, } } return { value: payload as T, reason: StandardResolutionReasons.TARGETING_MATCH, } } catch (error) { logger.error(`Error evaluating flag ${flagKey}:`, error) return { value: defaultValue, reason: StandardResolutionReasons.ERROR, errorCode: ErrorCode.GENERAL, } } } private async evaluateFlag( flagKey: string, defaultValue: any, context: EvaluationContext, logger: Logger, ): Promise> { // If PostHog is not configured, return default value if (!this.isConfigured || !this.client) { logger.debug(`PostHog not configured, returning default value for flag ${flagKey}`) return { value: defaultValue, reason: StandardResolutionReasons.DEFAULT, } } const targetingKey = this.getTargetingKey(context) if (!targetingKey) { logger.warn('No targetingKey provided in context') return { value: defaultValue, reason: StandardResolutionReasons.ERROR, errorCode: ErrorCode.GENERAL, } } try { const flagContext = this.buildFlagContext(context) const flagValue = await this.client.getFeatureFlag(flagKey, targetingKey, { onlyEvaluateLocally: this.evaluateLocally, sendFeatureFlagEvents: true, ...flagContext, }) if (flagValue === undefined) { return { value: defaultValue, reason: StandardResolutionReasons.DEFAULT, errorCode: ErrorCode.FLAG_NOT_FOUND, } } return { value: flagValue, reason: StandardResolutionReasons.TARGETING_MATCH, } } catch (error) { logger.error(`Error evaluating flag ${flagKey}:`, error) return { value: defaultValue, reason: StandardResolutionReasons.ERROR, errorCode: ErrorCode.GENERAL, } } } private getTargetingKey(context: EvaluationContext): string | undefined { return context.targetingKey } private buildFlagContext(context: EvaluationContext) { const flagContext: { groups?: Record groupProperties?: Record> personProperties?: Record } = {} // Extract groups from context if (context.groups) { flagContext.groups = context.groups as Record } // Extract custom properties if (context.personProperties) { flagContext.personProperties = context.personProperties as Record } if (context.groupProperties) { flagContext.groupProperties = context.groupProperties as Record> } // Use organizationId from context attributes if (context.organizationId && !flagContext.groups?.organization) { flagContext.groups = { ...flagContext.groups, organization: context.organizationId as string, } } return flagContext } get hooks(): Hook[] { return [] } async onClose(): Promise { if (this.client) { await this.client.shutdown() } } } ================================================ FILE: apps/api/src/common/repositories/base.repository.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Repository, DataSource, FindOptionsWhere, FindOneOptions, FindManyOptions, EntityTarget, SelectQueryBuilder, DeleteResult, } from 'typeorm' import { ObjectLiteral } from 'typeorm/common/ObjectLiteral' import { EventEmitter2 } from '@nestjs/event-emitter' /** * Abstract base repository class that provides common CRUD operations with event emission. * * @template TEntity - The entity class this repository manages */ export abstract class BaseRepository { protected repository: Repository constructor( protected readonly dataSource: DataSource, protected readonly eventEmitter: EventEmitter2, protected readonly entityClass: EntityTarget, ) { this.repository = this.dataSource.getRepository(entityClass) } /** * See reference for {@link Repository.findOne} */ async findOne(options: FindOneOptions): Promise { return this.repository.findOne(options) } /** * See reference for {@link Repository.findOneBy} */ async findOneBy(where: FindOptionsWhere | FindOptionsWhere[]): Promise { return this.repository.findOneBy(where) } /** * See reference for {@link Repository.findOneByOrFail} */ async findOneByOrFail(where: FindOptionsWhere | FindOptionsWhere[]): Promise { return this.repository.findOneByOrFail(where) } /** * See reference for {@link Repository.findOneOrFail} */ async findOneOrFail(options: FindOneOptions): Promise { return this.repository.findOneOrFail(options) } /** * See reference for {@link Repository.find} */ async find(options?: FindManyOptions): Promise { return this.repository.find(options) } /** * See reference for {@link Repository.findAndCount} */ async findAndCount(options?: FindManyOptions): Promise<[TEntity[], number]> { return this.repository.findAndCount(options) } /** * See reference for {@link Repository.count} */ async count(options?: FindManyOptions): Promise { return this.repository.count(options) } /** * Returns the entity manager for the repository. Use this only when you need to perform raw SQL queries. * * See reference for {@link Repository.manager} */ get manager() { return this.repository.manager } /** * See reference for {@link Repository.createQueryBuilder} */ createQueryBuilder(alias?: string): SelectQueryBuilder { return this.repository.createQueryBuilder(alias) } /** * See reference for {@link Repository.delete} */ async delete(criteria: FindOptionsWhere | FindOptionsWhere[]): Promise { return this.repository.delete(criteria) } /** * Inserts a new entity into the database. * * Uses {@link Repository.insert} to insert the entity into the database. * * @returns The inserted entity. */ abstract insert(entity: TEntity): Promise /** * Partially updates an entity in the database. * * Uses {@link Repository.update} to update the entity in the database. * * @param id - The ID of the entity to update. * @param params.updateData - The partial data to update. * @param params.entity - Optional pre-fetched entity to use instead of fetching from the database when not performing a raw update. * @param raw - If true, performs only the database update via {@link Repository.update}, * skipping entity fetching, domain logic (validation, derived fields), and event emission. * * @returns The updated entity or void if `raw` is true. */ abstract update(id: string, params: { updateData: Partial; entity?: TEntity }, raw: true): Promise abstract update(id: string, params: { updateData: Partial; entity?: TEntity }, raw?: false): Promise abstract update( id: string, params: { updateData: Partial; entity?: TEntity }, raw?: boolean, ): Promise } ================================================ FILE: apps/api/src/common/utils/api-key.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import * as crypto from 'crypto' export function generateRandomString(size: number): string { return crypto.randomBytes(size).toString('hex') } export function generateApiKeyValue(): string { return `dtn_${generateRandomString(32)}` } export function generateApiKeyHash(value: string): string { return crypto.createHash('sha256').update(value).digest('hex') } ================================================ FILE: apps/api/src/common/utils/app-mode.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ type AppMode = 'api' | 'worker' | 'all' let appMode = process.env.APP_MODE as AppMode // Default to all mode if no app mode is set if (!appMode) { appMode = 'all' } // Validate app mode if (!Object.values(['api', 'worker', 'all']).includes(appMode)) { throw new Error(`Invalid app mode: ${appMode}`) } /** * Returns true if the API should be started */ export function isApiEnabled(): boolean { return appMode === 'api' || appMode === 'all' } /** * Returns true if the worker should be started */ export function isWorkerEnabled(): boolean { return appMode === 'worker' || appMode === 'all' } /** * Returns the app mode */ export function getAppMode(): AppMode { return appMode } ================================================ FILE: apps/api/src/common/utils/delete-s3-bucket.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { S3Client, ListObjectsV2Command, DeleteObjectsCommand, ListObjectVersionsCommand, DeleteBucketCommand, } from '@aws-sdk/client-s3' export async function deleteS3Bucket(s3: S3Client, bucket: string): Promise { // First delete all object versions & delete markers (if any exist) let keyMarker: string | undefined let versionIdMarker: string | undefined do { const versions = await s3.send( new ListObjectVersionsCommand({ Bucket: bucket, KeyMarker: keyMarker, VersionIdMarker: versionIdMarker, }), ) const items = [ ...(versions.Versions || []).map((v) => ({ Key: v.Key, VersionId: v.VersionId })), ...(versions.DeleteMarkers || []).map((d) => ({ Key: d.Key, VersionId: d.VersionId })), ] if (items.length) { await s3.send( new DeleteObjectsCommand({ Bucket: bucket, Delete: { Objects: items, Quiet: true }, }), ) } keyMarker = versions.NextKeyMarker versionIdMarker = versions.NextVersionIdMarker } while (keyMarker || versionIdMarker) // Then delete any remaining live objects (for unversioned buckets) let continuationToken: string | undefined do { const list = await s3.send( new ListObjectsV2Command({ Bucket: bucket, ContinuationToken: continuationToken, }), ) if (list.Contents && list.Contents.length) { await s3.send( new DeleteObjectsCommand({ Bucket: bucket, Delete: { Objects: list.Contents.map((o) => ({ Key: o.Key })), Quiet: true, }, }), ) } continuationToken = list.NextContinuationToken } while (continuationToken) // Finally delete the (now-empty) bucket await s3.send(new DeleteBucketCommand({ Bucket: bucket })) } ================================================ FILE: apps/api/src/common/utils/docker-image.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ /** * Interface representing parsed Docker image information */ export interface DockerImageInfo { /** The full registry hostname (e.g. 'registry:5000' or 'docker.io') */ registry?: string /** The project/organization name (e.g. 'test' in 'registry:5000/test/image') */ project?: string /** The repository/image name (e.g. 'image' in 'registry:5000/test/image') */ repository: string /** The tag or digest (e.g. 'latest' or 'sha256:123...') */ tag?: string /** The full original image name */ originalName: string } export class DockerImage implements DockerImageInfo { registry?: string project?: string repository: string tag?: string originalName: string constructor(info: DockerImageInfo) { this.registry = info.registry this.project = info.project this.repository = info.repository this.tag = info.tag this.originalName = info.originalName } getFullName(): string { let name = this.repository if (this.project) { name = `${this.project}/${name}` } if (this.registry) { name = `${this.registry}/${name}` } if (this.tag) { name = `${name}:${this.tag}` } return name } } /** * Parses a Docker image name into its component parts * * @param imageName - The full image name (e.g. 'registry:5000/test/image:latest') * @returns Parsed image information * * Examples: * - registry:5000/test/image:latest -> { registry: 'registry:5000', project: 'test', repository: 'image', tag: 'latest' } * - docker.io/library/ubuntu:20.04 -> { registry: 'docker.io', project: 'library', repository: 'ubuntu', tag: '20.04' } * - ubuntu:20.04 -> { registry: undefined, project: undefined, repository: 'ubuntu', tag: '20.04' } * - ubuntu -> { registry: undefined, project: undefined, repository: 'ubuntu', tag: undefined } */ export function parseDockerImage(imageName: string): DockerImage { // Handle empty or invalid input if (!imageName) { throw new Error('Image name cannot be empty') } const result: DockerImageInfo = { originalName: imageName, repository: '', } // Check for digest format first let parts: string[] = [] if (imageName.includes('@sha256:')) { const [nameWithoutDigest, digest] = imageName.split('@sha256:') if (!nameWithoutDigest || !digest || !/^[a-f0-9]{64}$/.test(digest)) { throw new Error('Invalid digest format. Must be image@sha256:64_hex_characters') } result.tag = `sha256:${digest}` // Split remaining parts parts = nameWithoutDigest.split('/') // Throw if a part is empty if (parts.some((part) => part === '')) { throw new Error('Invalid image name. A part is empty') } } else { const lastSlashIndex = imageName.lastIndexOf('/') const lastColonIndex = imageName.lastIndexOf(':') const hasTag = lastColonIndex > lastSlashIndex const nameWithoutTag = hasTag ? imageName.substring(0, lastColonIndex) : imageName if (hasTag) { result.tag = imageName.substring(lastColonIndex + 1) } // Split remaining parts parts = nameWithoutTag.split('/') } // Check if first part looks like a registry hostname (contains '.' or ':' or is 'localhost') if (parts.length >= 2 && (parts[0].includes('.') || parts[0].includes(':') || parts[0] === 'localhost')) { result.registry = parts[0] parts.shift() // Remove registry part } // Handle remaining parts if (parts.length >= 2) { // Format: [registry/]project/repository result.project = parts.slice(0, -1).join('/') result.repository = parts[parts.length - 1] } else { // Format: repository result.repository = parts[0] } return new DockerImage(result) } /** * Checks if the Dockerfile content contains any FROM images that may require registry credentials. * This includes: * - Private registry images (e.g., 'myregistry.com/image', 'registry:5000/image') * - Private Docker Hub images (e.g., 'username/my-private-image') * * @param dockerfileContent - The full Dockerfile content as a string * @returns true if any FROM image may require credentials, false otherwise * * Example: * - FROM node:18 -> false (public Docker Hub library image) * - FROM username/my-image:0.0.1 -> true (private Docker Hub image) * - FROM myregistry.com/myimage:latest -> true (private registry) * - FROM registry:5000/test/image -> true (private registry) */ export function checkDockerfileHasRegistryPrefix(dockerfileContent: string): boolean { const lines = dockerfileContent.split('\n') // Regex to match FROM statements const fromRegex = /^\s*FROM\s+(?:--[a-z-]+=[^\s]+\s+)*([^\s]+)(?:\s+AS\s+[^\s]+)?/i for (const line of lines) { // Remove inline comments (everything after #) const lineWithoutComment = line.split('#')[0] const trimmedLine = lineWithoutComment.trim() // Skip empty lines and comment-only lines if (!trimmedLine) { continue } const match = fromRegex.exec(trimmedLine) if (match && match[1]) { const imageName = match[1].trim() // Check if image has a path component (contains '/') // This covers both private registries and private Docker Hub images (namespace/image) if (imageName.includes('/')) { return true } } } return false } ================================================ FILE: apps/api/src/common/utils/email.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class EmailUtils { static normalize(email: string): string { return email.toLowerCase().trim() } static areEqual(email1: string, email2: string): boolean { return this.normalize(email1) === this.normalize(email2) } } ================================================ FILE: apps/api/src/common/utils/from-axios-error.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export function fromAxiosError(error: any): Error { return new Error(error.response?.data?.message || error.response?.data || error.message || error) } ================================================ FILE: apps/api/src/common/utils/naming-strategy.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DefaultNamingStrategy, NamingStrategyInterface, Table } from 'typeorm' export class CustomNamingStrategy extends DefaultNamingStrategy implements NamingStrategyInterface { primaryKeyName(tableOrName: Table | string, columnNames: string[]) { const table = tableOrName instanceof Table ? tableOrName.name : tableOrName const columnsSnakeCase = columnNames.join('_') return `${table}_${columnsSnakeCase}_pk` } foreignKeyName(tableOrName: Table | string, columnNames: string[]): string { const table = tableOrName instanceof Table ? tableOrName.name : tableOrName const columnsSnakeCase = columnNames.join('_') return `${table}_${columnsSnakeCase}_fk` } uniqueConstraintName(tableOrName: Table | string, columnNames: string[]): string { const table = tableOrName instanceof Table ? tableOrName.name : tableOrName const columnsSnakeCase = columnNames.join('_') return `${table}_${columnsSnakeCase}_unique` } indexName(tableOrName: Table | string, columnNames: string[]): string { const table = tableOrName instanceof Table ? tableOrName.name : tableOrName const columnsSnakeCase = columnNames.join('_') return `${table}_${columnsSnakeCase}_index` } } ================================================ FILE: apps/api/src/common/utils/pino.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import pino, { TransportSingleOptions } from 'pino' import { TypedConfigService } from '../../config/typed-config.service' /* * This is a workaround to swap the message and object in the arguments array. * It is needed because the logger in nestjs-pino is not compatible with nestjs console logger. * ref: https://github.com/iamolegga/nestjs-pino/issues/2004 * */ export function swapMessageAndObject( this: pino.Logger, args: Parameters, method: pino.LogFn, level: number, ): void { // Type guard helper const isPlainObject = (val: unknown): val is Record => { return typeof val === 'object' && val !== null && !Array.isArray(val) } // NestJS Logger adds context as first arg, so check args[1] and args[2] if (args.length >= 3 && isPlainObject(args[0])) { const contextObj = args[0] const firstArg: unknown = args[1] const secondArg: unknown = args[2] // Case 1: message + Error if (typeof firstArg === 'string' && secondArg instanceof Error) { method.apply(this, [{ ...contextObj, err: secondArg }, firstArg, ...args.slice(3)]) return } // Case 2: message + additional context object if (typeof firstArg === 'string' && isPlainObject(secondArg)) { method.apply(this, [{ ...contextObj, ...secondArg }, firstArg, ...args.slice(3)]) return } // Case 3: message + stack trace string if ( typeof firstArg === 'string' && typeof secondArg === 'string' && secondArg.includes('\n') && secondArg.includes('at ') ) { method.apply(this, [{ ...contextObj, stack: secondArg }, firstArg, ...args.slice(3)]) return } } // Handle case without context (direct Pino usage) if (args.length >= 2) { const firstArg: unknown = args[0] const secondArg: unknown = args[1] // Case 1: message + Error if (typeof firstArg === 'string' && secondArg instanceof Error) { method.apply(this, [{ err: secondArg }, firstArg, ...args.slice(2)]) return } // Case 2: message + additional context object if (typeof firstArg === 'string' && isPlainObject(secondArg)) { method.apply(this, [secondArg, firstArg, ...args.slice(2)]) return } // Case 3: message + stack trace string if ( typeof firstArg === 'string' && typeof secondArg === 'string' && secondArg.includes('\n') && secondArg.includes('at ') ) { method.apply(this, [{ stack: secondArg }, firstArg, ...args.slice(2)]) return } } // Default behavior for other cases method.apply(this, args) } type LogConfig = ReturnType> /* * Get the pino transport based on the configuration * @param isProduction - whether the application is in production mode * @param logConfig - the log configuration * @returns the pino transport */ export function getPinoTransport( isProduction: boolean, logConfig: LogConfig, ): TransportSingleOptions> { switch (true) { // if console disabled, set destination to /dev/null case logConfig.console.disabled: return { target: 'pino/file', options: { destination: '/dev/null', }, } // if production mode, no transport => raw NDJSON case isProduction: return undefined // if non-production use pino-pretty default: return { target: 'pino-pretty', options: { colorize: true, singleLine: true, ignore: 'pid,hostname', }, } } } ================================================ FILE: apps/api/src/common/utils/range-filter.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MoreThanOrEqual, LessThanOrEqual, Between } from 'typeorm' /** * Creates a TypeORM range filter from min/max values * @param minValue - Minimum value (inclusive) * @param maxValue - Maximum value (inclusive) * @returns TypeORM comparison operator (Between, MoreThanOrEqual, LessThanOrEqual, or undefined) */ export function createRangeFilter(minValue?: T, maxValue?: T) { if (minValue !== undefined && maxValue !== undefined) { return Between(minValue, maxValue) } else if (minValue !== undefined) { return MoreThanOrEqual(minValue) } else if (maxValue !== undefined) { return LessThanOrEqual(maxValue) } return undefined } ================================================ FILE: apps/api/src/common/utils/rate-limit-headers.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Response } from 'express' /** * Utility functions for setting rate limit headers consistently across middleware and services */ export interface RateLimitHeadersOptions { throttlerName: string limit: number remaining: number resetSeconds: number retryAfterSeconds?: number } /** * Sets standard rate limit headers on a response * Follows the pattern: X-RateLimit-{Limit|Remaining|Reset}-{throttlerName} */ export function setRateLimitHeaders(response: Response, options: RateLimitHeadersOptions): void { const { throttlerName, limit, remaining, resetSeconds, retryAfterSeconds } = options response.setHeader(`X-RateLimit-Limit-${throttlerName}`, limit.toString()) response.setHeader(`X-RateLimit-Remaining-${throttlerName}`, remaining.toString()) response.setHeader(`X-RateLimit-Reset-${throttlerName}`, resetSeconds.toString()) if (retryAfterSeconds !== undefined) { response.setHeader('Retry-After', retryAfterSeconds.toString()) } } ================================================ FILE: apps/api/src/common/utils/uuid.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export function isValidUuid(value: string): boolean { return /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test(value) } ================================================ FILE: apps/api/src/config/config.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get } from '@nestjs/common' import { TypedConfigService } from './typed-config.service' import { ApiOperation, ApiTags, ApiResponse } from '@nestjs/swagger' import { ConfigurationDto } from './dto/configuration.dto' @ApiTags('config') @Controller('config') export class ConfigController { constructor(private readonly configService: TypedConfigService) {} @Get() @ApiOperation({ summary: 'Get config' }) @ApiResponse({ status: 200, description: 'Daytona configuration', type: ConfigurationDto, }) getConfig() { return new ConfigurationDto(this.configService) } } ================================================ FILE: apps/api/src/config/configuration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ const configuration = { production: process.env.NODE_ENV === 'production', version: process.env.VERSION || '0.0.0-dev', environment: process.env.ENVIRONMENT, runMigrations: process.env.RUN_MIGRATIONS === 'true', port: parseInt(process.env.PORT, 10), appUrl: process.env.APP_URL, database: { host: process.env.DB_HOST, port: parseInt(process.env.DB_PORT || '5432', 10), username: process.env.DB_USERNAME, password: process.env.DB_PASSWORD, database: process.env.DB_DATABASE, tls: { enabled: process.env.DB_TLS_ENABLED === 'true', rejectUnauthorized: process.env.DB_TLS_REJECT_UNAUTHORIZED !== 'false', }, pool: { max: process.env.DB_POOL_MAX && parseInt(process.env.DB_POOL_MAX, 10), min: process.env.DB_POOL_MIN && parseInt(process.env.DB_POOL_MIN, 10), idleTimeoutMillis: process.env.DB_POOL_IDLE_TIMEOUT_MS && parseInt(process.env.DB_POOL_IDLE_TIMEOUT_MS, 10), connectionTimeoutMillis: process.env.DB_POOL_CONNECTION_TIMEOUT_MS && parseInt(process.env.DB_POOL_CONNECTION_TIMEOUT_MS, 10), }, }, redis: { host: process.env.REDIS_HOST, port: parseInt(process.env.REDIS_PORT || '6379', 10), username: process.env.REDIS_USERNAME, password: process.env.REDIS_PASSWORD, tls: process.env.REDIS_TLS === 'true' ? {} : undefined, }, posthog: { apiKey: process.env.POSTHOG_API_KEY, host: process.env.POSTHOG_HOST, environment: process.env.POSTHOG_ENVIRONMENT, }, oidc: { clientId: process.env.OIDC_CLIENT_ID || process.env.OID_CLIENT_ID, issuer: process.env.OIDC_ISSUER_BASE_URL || process.env.OID_ISSUER_BASE_URL, publicIssuer: process.env.PUBLIC_OIDC_DOMAIN, audience: process.env.OIDC_AUDIENCE || process.env.OID_AUDIENCE, managementApi: { enabled: process.env.OIDC_MANAGEMENT_API_ENABLED === 'true', clientId: process.env.OIDC_MANAGEMENT_API_CLIENT_ID, clientSecret: process.env.OIDC_MANAGEMENT_API_CLIENT_SECRET, audience: process.env.OIDC_MANAGEMENT_API_AUDIENCE, }, }, smtp: { host: process.env.SMTP_HOST, port: parseInt(process.env.SMTP_PORT || '587', 10), user: process.env.SMTP_USER, password: process.env.SMTP_PASSWORD, secure: process.env.SMTP_SECURE === 'true', from: process.env.SMTP_EMAIL_FROM || 'noreply@mail.daytona.io', }, defaultSnapshot: process.env.DEFAULT_SNAPSHOT, dashboardUrl: process.env.DASHBOARD_URL, // Default to empty string - dashboard will then hit '/api' dashboardBaseApiUrl: process.env.DASHBOARD_BASE_API_URL || '', transientRegistry: { url: process.env.TRANSIENT_REGISTRY_URL, admin: process.env.TRANSIENT_REGISTRY_ADMIN, password: process.env.TRANSIENT_REGISTRY_PASSWORD, projectId: process.env.TRANSIENT_REGISTRY_PROJECT_ID, }, internalRegistry: { url: process.env.INTERNAL_REGISTRY_URL, admin: process.env.INTERNAL_REGISTRY_ADMIN, password: process.env.INTERNAL_REGISTRY_PASSWORD, projectId: process.env.INTERNAL_REGISTRY_PROJECT_ID, }, s3: { endpoint: process.env.S3_ENDPOINT, stsEndpoint: process.env.S3_STS_ENDPOINT, region: process.env.S3_REGION, accessKey: process.env.S3_ACCESS_KEY, secretKey: process.env.S3_SECRET_KEY, defaultBucket: process.env.S3_DEFAULT_BUCKET, accountId: process.env.S3_ACCOUNT_ID, roleName: process.env.S3_ROLE_NAME, }, notificationGatewayDisabled: process.env.NOTIFICATION_GATEWAY_DISABLED === 'true', skipConnections: process.env.SKIP_CONNECTIONS === 'true', maxAutoArchiveInterval: parseInt(process.env.MAX_AUTO_ARCHIVE_INTERVAL || '43200', 10), maintananceMode: process.env.MAINTENANCE_MODE === 'true', disableCronJobs: process.env.DISABLE_CRON_JOBS === 'true', appRole: process.env.APP_ROLE || 'all', proxy: { domain: process.env.PROXY_DOMAIN, protocol: process.env.PROXY_PROTOCOL, apiKey: process.env.PROXY_API_KEY, templateUrl: process.env.PROXY_TEMPLATE_URL, toolboxUrl: (process.env.PROXY_TOOLBOX_BASE_URL || `${process.env.PROXY_PROTOCOL}://${process.env.PROXY_DOMAIN}`) + '/toolbox', }, audit: { toolboxRequestsEnabled: process.env.AUDIT_TOOLBOX_REQUESTS_ENABLED === 'true', retentionDays: process.env.AUDIT_LOG_RETENTION_DAYS ? parseInt(process.env.AUDIT_LOG_RETENTION_DAYS, 10) : undefined, consoleLogEnabled: process.env.AUDIT_CONSOLE_LOG_ENABLED === 'true', publish: { enabled: process.env.AUDIT_PUBLISH_ENABLED === 'true', batchSize: process.env.AUDIT_PUBLISH_BATCH_SIZE ? parseInt(process.env.AUDIT_PUBLISH_BATCH_SIZE, 10) : 1000, mode: (process.env.AUDIT_PUBLISH_MODE || 'direct') as 'direct' | 'kafka', storageAdapter: process.env.AUDIT_PUBLISH_STORAGE_ADAPTER || 'opensearch', opensearchIndexName: process.env.AUDIT_PUBLISH_OPENSEARCH_INDEX_NAME || 'audit-logs', }, }, kafka: { enabled: process.env.KAFKA_ENABLED === 'true', brokers: process.env.KAFKA_BROKERS || 'localhost:9092', clientId: process.env.KAFKA_CLIENT_ID, sasl: { mechanism: process.env.KAFKA_SASL_MECHANISM, username: process.env.KAFKA_SASL_USERNAME, password: process.env.KAFKA_SASL_PASSWORD, }, tls: { enabled: process.env.KAFKA_TLS_ENABLED === 'true', rejectUnauthorized: process.env.KAFKA_TLS_REJECT_UNAUTHORIZED !== 'false', }, }, opensearch: { nodes: process.env.OPENSEARCH_NODES || 'https://localhost:9200', username: process.env.OPENSEARCH_USERNAME, password: process.env.OPENSEARCH_PASSWORD, aws: { roleArn: process.env.OPENSEARCH_AWS_ROLE_ARN, region: process.env.OPENSEARCH_AWS_REGION, }, tls: { rejectUnauthorized: process.env.OPENSEARCH_TLS_REJECT_UNAUTHORIZED !== 'false', }, }, cronTimeZone: process.env.CRON_TIMEZONE, maxConcurrentBackupsPerRunner: parseInt(process.env.MAX_CONCURRENT_BACKUPS_PER_RUNNER || '6', 10), webhook: { authToken: process.env.SVIX_AUTH_TOKEN, serverUrl: process.env.SVIX_SERVER_URL, }, healthCheck: { apiKey: process.env.HEALTH_CHECK_API_KEY, }, sshGateway: { apiKey: process.env.SSH_GATEWAY_API_KEY, command: process.env.SSH_GATEWAY_COMMAND, publicKey: process.env.SSH_GATEWAY_PUBLIC_KEY, url: process.env.SSH_GATEWAY_URL, }, organizationSandboxDefaultLimitedNetworkEgress: process.env.ORGANIZATION_SANDBOX_DEFAULT_LIMITED_NETWORK_EGRESS === 'true', pylonAppId: process.env.PYLON_APP_ID, billingApiUrl: process.env.BILLING_API_URL, analyticsApiUrl: process.env.ANALYTICS_API_URL, defaultRunner: { domain: process.env.DEFAULT_RUNNER_DOMAIN, apiKey: process.env.DEFAULT_RUNNER_API_KEY, proxyUrl: process.env.DEFAULT_RUNNER_PROXY_URL, apiUrl: process.env.DEFAULT_RUNNER_API_URL, cpu: parseInt(process.env.DEFAULT_RUNNER_CPU || '4', 10), memory: parseInt(process.env.DEFAULT_RUNNER_MEMORY || '8', 10), disk: parseInt(process.env.DEFAULT_RUNNER_DISK || '50', 10), apiVersion: (process.env.DEFAULT_RUNNER_API_VERSION || '2') as '0' | '2', name: process.env.DEFAULT_RUNNER_NAME, }, runnerScore: { thresholds: { declarativeBuild: parseInt(process.env.RUNNER_DECLARATIVE_BUILD_SCORE_THRESHOLD || '10', 10), availability: parseInt(process.env.RUNNER_AVAILABILITY_SCORE_THRESHOLD || '10', 10), start: parseInt(process.env.RUNNER_START_SCORE_THRESHOLD || '3', 10), }, weights: { cpuUsage: parseFloat(process.env.RUNNER_CPU_USAGE_WEIGHT || '0.25'), memoryUsage: parseFloat(process.env.RUNNER_MEMORY_USAGE_WEIGHT || '0.4'), diskUsage: parseFloat(process.env.RUNNER_DISK_USAGE_WEIGHT || '0.4'), allocatedCpu: parseFloat(process.env.RUNNER_ALLOCATED_CPU_WEIGHT || '0.03'), allocatedMemory: parseFloat(process.env.RUNNER_ALLOCATED_MEMORY_WEIGHT || '0.03'), allocatedDisk: parseFloat(process.env.RUNNER_ALLOCATED_DISK_WEIGHT || '0.03'), startedSandboxes: parseFloat(process.env.RUNNER_STARTED_SANDBOXES_WEIGHT || '0.1'), }, penalty: { exponents: { cpuLoadAvg: parseFloat(process.env.RUNNER_CPU_LOAD_AVG_PENALTY_EXPONENT || '0.1'), cpu: parseFloat(process.env.RUNNER_CPU_PENALTY_EXPONENT || '0.15'), memory: parseFloat(process.env.RUNNER_MEMORY_PENALTY_EXPONENT || '0.15'), disk: parseFloat(process.env.RUNNER_DISK_PENALTY_EXPONENT || '0.15'), }, thresholds: { // cpuLoadAvg is a normalized per-CPU load average (e.g. load_avg / num_cpus), not a percentage like the cpu/memory/disk thresholds below. cpuLoadAvg: parseFloat(process.env.RUNNER_CPU_LOAD_AVG_PENALTY_THRESHOLD || '0.7'), cpu: parseInt(process.env.RUNNER_CPU_PENALTY_THRESHOLD || '90', 10), memory: parseInt(process.env.RUNNER_MEMORY_PENALTY_THRESHOLD || '75', 10), disk: parseInt(process.env.RUNNER_DISK_PENALTY_THRESHOLD || '75', 10), }, }, targetValues: { optimal: { cpu: parseInt(process.env.RUNNER_OPTIMAL_CPU || '0', 10), memory: parseInt(process.env.RUNNER_OPTIMAL_MEMORY || '0', 10), disk: parseInt(process.env.RUNNER_OPTIMAL_DISK || '0', 10), allocCpu: parseInt(process.env.RUNNER_OPTIMAL_ALLOC_CPU || '100', 10), allocMem: parseInt(process.env.RUNNER_OPTIMAL_ALLOC_MEM || '100', 10), allocDisk: parseInt(process.env.RUNNER_OPTIMAL_ALLOC_DISK || '100', 10), startedSandboxes: parseInt(process.env.RUNNER_OPTIMAL_STARTED_SANDBOXES || '0', 10), }, critical: { cpu: parseInt(process.env.RUNNER_CRITICAL_CPU || '100', 10), memory: parseInt(process.env.RUNNER_CRITICAL_MEMORY || '100', 10), disk: parseInt(process.env.RUNNER_CRITICAL_DISK || '100', 10), allocCpu: parseInt(process.env.RUNNER_CRITICAL_ALLOC_CPU || '500', 10), allocMem: parseInt(process.env.RUNNER_CRITICAL_ALLOC_MEM || '500', 10), allocDisk: parseInt(process.env.RUNNER_CRITICAL_ALLOC_DISK || '500', 10), startedSandboxes: parseInt(process.env.RUNNER_CRITICAL_STARTED_SANDBOXES || '100', 10), }, }, }, rateLimit: { anonymous: { ttl: process.env.RATE_LIMIT_ANONYMOUS_TTL ? parseInt(process.env.RATE_LIMIT_ANONYMOUS_TTL, 10) : undefined, limit: process.env.RATE_LIMIT_ANONYMOUS_LIMIT ? parseInt(process.env.RATE_LIMIT_ANONYMOUS_LIMIT, 10) : undefined, }, failedAuth: { ttl: process.env.RATE_LIMIT_FAILED_AUTH_TTL ? parseInt(process.env.RATE_LIMIT_FAILED_AUTH_TTL, 10) : undefined, limit: process.env.RATE_LIMIT_FAILED_AUTH_LIMIT ? parseInt(process.env.RATE_LIMIT_FAILED_AUTH_LIMIT, 10) : undefined, }, authenticated: { ttl: process.env.RATE_LIMIT_AUTHENTICATED_TTL ? parseInt(process.env.RATE_LIMIT_AUTHENTICATED_TTL, 10) : undefined, limit: process.env.RATE_LIMIT_AUTHENTICATED_LIMIT ? parseInt(process.env.RATE_LIMIT_AUTHENTICATED_LIMIT, 10) : undefined, }, sandboxCreate: { ttl: process.env.RATE_LIMIT_SANDBOX_CREATE_TTL ? parseInt(process.env.RATE_LIMIT_SANDBOX_CREATE_TTL, 10) : undefined, limit: process.env.RATE_LIMIT_SANDBOX_CREATE_LIMIT ? parseInt(process.env.RATE_LIMIT_SANDBOX_CREATE_LIMIT, 10) : undefined, }, sandboxLifecycle: { ttl: process.env.RATE_LIMIT_SANDBOX_LIFECYCLE_TTL ? parseInt(process.env.RATE_LIMIT_SANDBOX_LIFECYCLE_TTL, 10) : undefined, limit: process.env.RATE_LIMIT_SANDBOX_LIFECYCLE_LIMIT ? parseInt(process.env.RATE_LIMIT_SANDBOX_LIFECYCLE_LIMIT, 10) : undefined, }, }, log: { console: { disabled: process.env.LOG_CONSOLE_DISABLED === 'true', }, level: process.env.LOG_LEVEL || 'info', requests: { enabled: process.env.LOG_REQUESTS_ENABLED === 'true', }, }, defaultOrganizationQuota: { totalCpuQuota: parseInt(process.env.DEFAULT_ORG_QUOTA_TOTAL_CPU_QUOTA || '10', 10), totalMemoryQuota: parseInt(process.env.DEFAULT_ORG_QUOTA_TOTAL_MEMORY_QUOTA || '10', 10), totalDiskQuota: parseInt(process.env.DEFAULT_ORG_QUOTA_TOTAL_DISK_QUOTA || '30', 10), maxCpuPerSandbox: parseInt(process.env.DEFAULT_ORG_QUOTA_MAX_CPU_PER_SANDBOX || '4', 10), maxMemoryPerSandbox: parseInt(process.env.DEFAULT_ORG_QUOTA_MAX_MEMORY_PER_SANDBOX || '8', 10), maxDiskPerSandbox: parseInt(process.env.DEFAULT_ORG_QUOTA_MAX_DISK_PER_SANDBOX || '10', 10), snapshotQuota: parseInt(process.env.DEFAULT_ORG_QUOTA_SNAPSHOT_QUOTA || '100', 10), maxSnapshotSize: parseInt(process.env.DEFAULT_ORG_QUOTA_MAX_SNAPSHOT_SIZE || '20', 10), volumeQuota: parseInt(process.env.DEFAULT_ORG_QUOTA_VOLUME_QUOTA || '100', 10), }, defaultRegion: { id: process.env.DEFAULT_REGION_ID || 'us', name: process.env.DEFAULT_REGION_NAME || 'us', enforceQuotas: process.env.DEFAULT_REGION_ENFORCE_QUOTAS === 'true', }, admin: { apiKey: process.env.ADMIN_API_KEY, totalCpuQuota: parseInt(process.env.ADMIN_TOTAL_CPU_QUOTA || '0', 10), totalMemoryQuota: parseInt(process.env.ADMIN_TOTAL_MEMORY_QUOTA || '0', 10), totalDiskQuota: parseInt(process.env.ADMIN_TOTAL_DISK_QUOTA || '0', 10), maxCpuPerSandbox: parseInt(process.env.ADMIN_MAX_CPU_PER_SANDBOX || '0', 10), maxMemoryPerSandbox: parseInt(process.env.ADMIN_MAX_MEMORY_PER_SANDBOX || '0', 10), maxDiskPerSandbox: parseInt(process.env.ADMIN_MAX_DISK_PER_SANDBOX || '0', 10), snapshotQuota: parseInt(process.env.ADMIN_SNAPSHOT_QUOTA || '100', 10), maxSnapshotSize: parseInt(process.env.ADMIN_MAX_SNAPSHOT_SIZE || '100', 10), volumeQuota: parseInt(process.env.ADMIN_VOLUME_QUOTA || '0', 10), }, skipUserEmailVerification: process.env.SKIP_USER_EMAIL_VERIFICATION === 'true', apiKey: { validationCacheTtlSeconds: parseInt(process.env.API_KEY_VALIDATION_CACHE_TTL_SECONDS || '10', 10), userCacheTtlSeconds: parseInt(process.env.API_KEY_USER_CACHE_TTL_SECONDS || '60', 10), }, runnerHealthTimeout: parseInt(process.env.RUNNER_HEALTH_TIMEOUT_SECONDS || '3', 10), warmPool: { candidateLimit: parseInt(process.env.WARM_POOL_CANDIDATE_LIMIT || '300', 10), }, sandboxOtel: { endpointUrl: process.env.SANDBOX_OTEL_ENDPOINT_URL, }, otelCollector: { apiKey: process.env.OTEL_COLLECTOR_API_KEY, }, clickhouse: { host: process.env.CLICKHOUSE_HOST, port: parseInt(process.env.CLICKHOUSE_PORT || '8123', 10), database: process.env.CLICKHOUSE_DATABASE || 'otel', username: process.env.CLICKHOUSE_USERNAME || 'default', password: process.env.CLICKHOUSE_PASSWORD, protocol: process.env.CLICKHOUSE_PROTOCOL || 'https', }, encryption: { key: process.env.ENCRYPTION_KEY, salt: process.env.ENCRYPTION_SALT, }, } export { configuration } ================================================ FILE: apps/api/src/config/dto/configuration.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiExtraModels, ApiProperty, ApiPropertyOptional, ApiSchema, getSchemaPath } from '@nestjs/swagger' import { IsBoolean, IsNumber, IsOptional, IsString } from 'class-validator' import { TypedConfigService } from '../typed-config.service' @ApiSchema({ name: 'Announcement' }) export class Announcement { @ApiProperty({ description: 'The announcement text', example: 'New feature available!', }) @IsString() text: string @ApiPropertyOptional({ description: 'URL to learn more about the announcement', example: 'https://example.com/learn-more', }) @IsString() @IsOptional() learnMoreUrl?: string } @ApiSchema({ name: 'PosthogConfig' }) export class PosthogConfig { @ApiProperty({ description: 'PostHog API key', example: 'phc_abc123', }) @IsString() apiKey: string @ApiProperty({ description: 'PostHog host URL', example: 'https://app.posthog.com', }) @IsString() host: string } @ApiSchema({ name: 'RateLimitEntry' }) export class RateLimitEntry { @ApiPropertyOptional({ description: 'Rate limit TTL in seconds', example: 60, }) @IsNumber() @IsOptional() ttl?: number @ApiPropertyOptional({ description: 'Rate limit max requests', example: 100, }) @IsNumber() @IsOptional() limit?: number } @ApiSchema({ name: 'RateLimitConfig' }) export class RateLimitConfig { @ApiPropertyOptional({ description: 'Failed authentication rate limit', type: RateLimitEntry, }) @IsOptional() failedAuth?: RateLimitEntry @ApiPropertyOptional({ description: 'Authenticated rate limit', type: RateLimitEntry, }) @IsOptional() authenticated?: RateLimitEntry @ApiPropertyOptional({ description: 'Sandbox create rate limit', type: RateLimitEntry, }) @IsOptional() sandboxCreate?: RateLimitEntry @ApiPropertyOptional({ description: 'Sandbox lifecycle rate limit', type: RateLimitEntry, }) @IsOptional() sandboxLifecycle?: RateLimitEntry } @ApiSchema({ name: 'OidcConfig' }) export class OidcConfig { @ApiProperty({ description: 'OIDC issuer', example: 'https://auth.example.com', }) @IsString() issuer: string @ApiProperty({ description: 'OIDC client ID', example: 'daytona-client', }) @IsString() clientId: string @ApiProperty({ description: 'OIDC audience', example: 'daytona-api', }) @IsString() audience: string } @ApiExtraModels(Announcement) @ApiSchema({ name: 'DaytonaConfiguration' }) export class ConfigurationDto { @ApiProperty({ description: 'Daytona version', example: '0.0.1', }) @IsString() version: string @ApiPropertyOptional({ description: 'PostHog configuration', type: PosthogConfig, }) posthog?: PosthogConfig @ApiProperty({ description: 'OIDC configuration', type: OidcConfig, }) oidc: OidcConfig @ApiProperty({ description: 'Whether linked accounts are enabled', example: true, }) @IsBoolean() linkedAccountsEnabled: boolean @ApiProperty({ description: 'System announcements', type: 'object', additionalProperties: { $ref: getSchemaPath(Announcement) }, example: { 'feature-update': { text: 'New feature available!', learnMoreUrl: 'https://example.com' } }, }) announcements: Record @ApiPropertyOptional({ description: 'Pylon application ID', example: 'pylon-app-123', }) @IsString() @IsOptional() pylonAppId?: string @ApiProperty({ description: 'Proxy template URL', example: 'https://{{PORT}}-{{sandboxId}}.proxy.example.com', }) @IsString() proxyTemplateUrl: string @ApiProperty({ description: 'Toolbox template URL', example: 'https://proxy.example.com/toolbox', }) @IsString() proxyToolboxUrl: string @ApiProperty({ description: 'Default snapshot for sandboxes', example: 'ubuntu:22.04', }) @IsString() defaultSnapshot: string @ApiProperty({ description: 'Dashboard URL', example: 'https://dashboard.example.com', }) @IsString() dashboardUrl: string @ApiProperty({ description: 'Maximum auto-archive interval in minutes', example: 43200, }) @IsNumber() maxAutoArchiveInterval: number @ApiProperty({ description: 'Whether maintenance mode is enabled', example: false, }) @IsBoolean() maintananceMode: boolean @ApiProperty({ description: 'Current environment', example: 'production', }) @IsString() environment: string @ApiPropertyOptional({ description: 'Billing API URL', example: 'https://billing.example.com', }) @IsString() @IsOptional() billingApiUrl?: string @ApiPropertyOptional({ description: 'Analytics API URL', example: 'https://analytics.example.com', }) @IsString() @IsOptional() analyticsApiUrl?: string @ApiPropertyOptional({ description: 'SSH Gateway command', example: 'ssh -p 2222 {{TOKEN}}@localhost', }) @IsOptional() @IsString() sshGatewayCommand?: string @ApiPropertyOptional({ description: 'Base64 encoded SSH Gateway public key', example: 'ssh-gateway-public-key', }) @IsOptional() @IsString() sshGatewayPublicKey?: string @ApiPropertyOptional({ description: 'Rate limit configuration', type: RateLimitConfig, }) @IsOptional() rateLimit?: RateLimitConfig constructor(configService: TypedConfigService) { this.version = configService.getOrThrow('version') this.oidc = { issuer: configService.get('oidc.publicIssuer') || configService.getOrThrow('oidc.issuer'), clientId: configService.getOrThrow('oidc.clientId'), audience: configService.getOrThrow('oidc.audience'), } this.linkedAccountsEnabled = configService.get('oidc.managementApi.enabled') this.proxyTemplateUrl = configService.getOrThrow('proxy.templateUrl') this.proxyToolboxUrl = configService.getOrThrow('proxy.toolboxUrl') this.defaultSnapshot = configService.getOrThrow('defaultSnapshot') this.dashboardUrl = configService.getOrThrow('dashboardUrl') this.maxAutoArchiveInterval = configService.getOrThrow('maxAutoArchiveInterval') this.maintananceMode = configService.getOrThrow('maintananceMode') this.environment = configService.getOrThrow('environment') this.sshGatewayCommand = configService.get('sshGateway.command') this.sshGatewayPublicKey = configService.get('sshGateway.publicKey') if (configService.get('billingApiUrl')) { this.billingApiUrl = configService.get('billingApiUrl') } if (configService.get('analyticsApiUrl')) { this.analyticsApiUrl = configService.get('analyticsApiUrl') } if (configService.get('posthog.apiKey')) { this.posthog = { apiKey: configService.get('posthog.apiKey'), host: configService.get('posthog.host'), } } if (configService.get('pylonAppId')) { this.pylonAppId = configService.get('pylonAppId') } // TODO: announcements this.announcements = {} this.rateLimit = { authenticated: { ttl: configService.get('rateLimit.authenticated.ttl'), limit: configService.get('rateLimit.authenticated.limit'), }, sandboxCreate: { ttl: configService.get('rateLimit.sandboxCreate.ttl'), limit: configService.get('rateLimit.sandboxCreate.limit'), }, sandboxLifecycle: { ttl: configService.get('rateLimit.sandboxLifecycle.ttl'), limit: configService.get('rateLimit.sandboxLifecycle.limit'), }, } } } ================================================ FILE: apps/api/src/config/typed-config.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Global, Module, DynamicModule } from '@nestjs/common' import { ConfigModule as NestConfigModule, ConfigModuleOptions } from '@nestjs/config' import { TypedConfigService } from './typed-config.service' import { configuration } from './configuration' import { ConfigController } from './config.controller' @Global() @Module({ imports: [ NestConfigModule.forRoot({ isGlobal: true, load: [() => configuration], }), ], controllers: [ConfigController], providers: [TypedConfigService], exports: [TypedConfigService], }) export class TypedConfigModule { static forRoot(options: Partial = {}): DynamicModule { return { module: TypedConfigModule, imports: [ NestConfigModule.forRoot({ ...options, }), ], providers: [TypedConfigService], exports: [TypedConfigService], } } } ================================================ FILE: apps/api/src/config/typed-config.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ConfigService } from '@nestjs/config' import { Injectable } from '@nestjs/common' import { configuration } from './configuration' import { KafkaConfig, Mechanism, SASLOptions } from 'kafkajs' import { AwsSigv4Signer, AwsSigv4SignerResponse } from '@opensearch-project/opensearch/aws' import { defaultProvider } from '@aws-sdk/credential-provider-node' import { fromTemporaryCredentials } from '@aws-sdk/credential-providers' import { ClientOptions } from '@opensearch-project/opensearch' import { RedisOptions } from 'ioredis' type Configuration = typeof configuration // Helper type to get nested property paths type Paths = T extends object ? { [K in keyof T]: K extends string ? (T[K] extends object ? `${K}` | `${K}.${Paths}` : `${K}`) : never }[keyof T] : never // Helper type to get the type of a property at a given path type PathValue = P extends `${infer K}.${infer Rest}` ? K extends keyof T ? T[K] extends object ? PathValue : never : never : P extends keyof T ? T[P] : never @Injectable() export class TypedConfigService { constructor(private configService: ConfigService) {} /** * Get a configuration value with type safety * @param key The configuration key (can be nested using dot notation) * @returns The configuration value with proper typing */ get>(key: K): PathValue { return this.configService.get(key) } /** * Get a configuration value with type safety, throwing an error if undefined * @param key The configuration key (can be nested using dot notation) * @returns The configuration value with proper typing * @throws Error if the configuration value is undefined */ getOrThrow>(key: K): NonNullable> { const value = this.get(key) if (value === undefined) { throw new Error(`Configuration key "${key}" is undefined`) } return value as NonNullable> } /** * Get the Kafka configuration * @returns The Kafka configuration */ getKafkaClientConfig(): KafkaConfig { const mechanism = this.get('kafka.sasl.mechanism') || 'plain' const username = this.get('kafka.sasl.username') const password = this.get('kafka.sasl.password') if (mechanism !== 'plain' && mechanism !== 'scram-sha-256' && mechanism !== 'scram-sha-512') { throw new Error(`Invalid Kafka SASL mechanism: ${mechanism}`) } const sasl: SASLOptions | Mechanism | undefined = username && password ? ({ mechanism, username, password, } as SASLOptions) : undefined return { brokers: this.get('kafka.brokers') .split(',') .map((broker) => broker.trim()), ssl: this.get('kafka.tls.enabled') ? { rejectUnauthorized: this.get('kafka.tls.rejectUnauthorized'), } : undefined, sasl, } } /** * Get the OpenSearch configuration * @returns The OpenSearch configuration */ getOpenSearchConfig(): ClientOptions { const nodes = this.get('opensearch.nodes') .split(',') .map((node) => node.trim()) const username = this.get('opensearch.username') const password = this.get('opensearch.password') // Basic auth if (username && password) { return { nodes, auth: { username, password, }, ssl: { rejectUnauthorized: this.get('opensearch.tls.rejectUnauthorized'), }, } } // AWS Sigv4 auth try { let signer: AwsSigv4SignerResponse if (this.get('opensearch.aws.roleArn')) { signer = AwsSigv4Signer({ getCredentials: fromTemporaryCredentials({ params: { RoleArn: this.get('opensearch.aws.roleArn'), RoleSessionName: 'daytona-opensearch', }, }), service: 'es', region: this.get('opensearch.aws.region'), }) } else { signer = AwsSigv4Signer({ getCredentials() { const credentialsProvider = defaultProvider() return credentialsProvider() }, service: 'es', region: this.get('opensearch.aws.region'), }) } return { nodes, ssl: { rejectUnauthorized: this.get('opensearch.tls.rejectUnauthorized'), }, ...signer, } // Try without auth if AWS credentials are not available } catch { return { nodes, ssl: { rejectUnauthorized: this.get('opensearch.tls.rejectUnauthorized'), }, } } } /** * Get the Redis configuration * @param overrides Optional overrides for the Redis configuration * @returns The Redis configuration */ getRedisConfig(overrides?: Partial): RedisOptions { return { host: this.getOrThrow('redis.host'), port: this.getOrThrow('redis.port'), username: this.get('redis.username'), password: this.get('redis.password'), tls: this.get('redis.tls'), lazyConnect: this.get('skipConnections'), ...overrides, } } /** * Get the ClickHouse configuration * @returns The ClickHouse configuration */ getClickHouseConfig() { const host = this.get('clickhouse.host') if (!host) { return null } return { url: `${this.get('clickhouse.protocol')}://${host}:${this.get('clickhouse.port')}`, username: this.get('clickhouse.username'), password: this.get('clickhouse.password'), database: this.get('clickhouse.database'), } } } ================================================ FILE: apps/api/src/docker-registry/controllers/docker-registry.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Post, Body, Patch, Param, Delete, UseGuards, HttpCode, ForbiddenException, Query, } from '@nestjs/common' import { ApiTags, ApiOperation, ApiResponse, ApiOAuth2, ApiHeader, ApiParam, ApiBearerAuth, ApiQuery, } from '@nestjs/swagger' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { DockerRegistryService } from '../services/docker-registry.service' import { CreateDockerRegistryDto } from '../dto/create-docker-registry.dto' import { UpdateDockerRegistryDto } from '../dto/update-docker-registry.dto' import { DockerRegistryDto } from '../dto/docker-registry.dto' import { RegistryPushAccessDto } from '../../sandbox/dto/registry-push-access-dto' import { DockerRegistryAccessGuard } from '../guards/docker-registry-access.guard' import { DockerRegistry } from '../decorators/docker-registry.decorator' import { DockerRegistry as DockerRegistryEntity } from '../entities/docker-registry.entity' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredSystemRole } from '../../common/decorators/required-role.decorator' import { SystemRole } from '../../user/enums/system-role.enum' import { Audit, MASKED_AUDIT_VALUE, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { RegistryType } from '../enums/registry-type.enum' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('docker-registry') @Controller('docker-registry') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, SystemActionGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class DockerRegistryController { constructor(private readonly dockerRegistryService: DockerRegistryService) {} @Post() @ApiOperation({ summary: 'Create registry', operationId: 'createRegistry', }) @ApiResponse({ status: 201, description: 'The docker registry has been successfully created.', type: DockerRegistryDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGISTRIES]) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.DOCKER_REGISTRY, targetIdFromResult: (result: DockerRegistryDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, username: req.body?.username, password: req.body?.password ? MASKED_AUDIT_VALUE : undefined, url: req.body?.url, project: req.body?.project, registryType: req.body?.registryType, isDefault: req.body?.isDefault, }), }, }) async create( @AuthContext() authContext: OrganizationAuthContext, @Body() createDockerRegistryDto: CreateDockerRegistryDto, ): Promise { if (createDockerRegistryDto.registryType !== RegistryType.ORGANIZATION && authContext.role !== SystemRole.ADMIN) { throw new ForbiddenException( `Insufficient permissions for creating ${createDockerRegistryDto.registryType} registries`, ) } if (createDockerRegistryDto.isDefault && authContext.role !== SystemRole.ADMIN) { throw new ForbiddenException('Insufficient permissions for setting a default registry') } const dockerRegistry = await this.dockerRegistryService.create(createDockerRegistryDto, authContext.organizationId) return DockerRegistryDto.fromDockerRegistry(dockerRegistry) } @Get() @ApiOperation({ summary: 'List registries', operationId: 'listRegistries', }) @ApiResponse({ status: 200, description: 'List of all docker registries', type: [DockerRegistryDto], }) async findAll(@AuthContext() authContext: OrganizationAuthContext): Promise { const dockerRegistries = await this.dockerRegistryService.findAll( authContext.organizationId, // only include registries manually created by the organization RegistryType.ORGANIZATION, ) return dockerRegistries.map(DockerRegistryDto.fromDockerRegistry) } @Get('registry-push-access') @HttpCode(200) @ApiOperation({ summary: 'Get temporary registry access for pushing snapshots', operationId: 'getTransientPushAccess', }) @ApiQuery({ name: 'regionId', required: false, description: 'ID of the region where the snapshot will be available (defaults to organization default region)', type: 'string', }) @ApiResponse({ status: 200, description: 'Temporary registry access has been generated', type: RegistryPushAccessDto, }) async getTransientPushAccess( @AuthContext() authContext: OrganizationAuthContext, @Query('regionId') regionId?: string, ): Promise { return this.dockerRegistryService.getRegistryPushAccess(authContext.organizationId, authContext.userId, regionId) } @Get(':id') @ApiOperation({ summary: 'Get registry', operationId: 'getRegistry', }) @ApiParam({ name: 'id', description: 'ID of the docker registry', type: 'string', }) @ApiResponse({ status: 200, description: 'The docker registry', type: DockerRegistryDto, }) @UseGuards(DockerRegistryAccessGuard) async findOne(@DockerRegistry() registry: DockerRegistryEntity): Promise { return DockerRegistryDto.fromDockerRegistry(registry) } @Patch(':id') @ApiOperation({ summary: 'Update registry', operationId: 'updateRegistry', }) @ApiParam({ name: 'id', description: 'ID of the docker registry', type: 'string', }) @ApiResponse({ status: 200, description: 'The docker registry has been successfully updated.', type: DockerRegistryDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGISTRIES]) @UseGuards(DockerRegistryAccessGuard) @Audit({ action: AuditAction.UPDATE, targetType: AuditTarget.DOCKER_REGISTRY, targetIdFromRequest: (req) => req.params.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, url: req.body?.url, username: req.body?.username, password: req.body?.password ? MASKED_AUDIT_VALUE : undefined, project: req.body?.project, }), }, }) async update( @Param('id') registryId: string, @Body() updateDockerRegistryDto: UpdateDockerRegistryDto, ): Promise { const dockerRegistry = await this.dockerRegistryService.update(registryId, updateDockerRegistryDto) return DockerRegistryDto.fromDockerRegistry(dockerRegistry) } @Delete(':id') @ApiOperation({ summary: 'Delete registry', operationId: 'deleteRegistry', }) @ApiParam({ name: 'id', description: 'ID of the docker registry', type: 'string', }) @ApiResponse({ status: 204, description: 'The docker registry has been successfully deleted.', }) @HttpCode(204) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_REGISTRIES]) @UseGuards(DockerRegistryAccessGuard) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.DOCKER_REGISTRY, targetIdFromRequest: (req) => req.params.id, }) async remove(@Param('id') registryId: string): Promise { return this.dockerRegistryService.remove(registryId) } @Post(':id/set-default') @ApiOperation({ summary: 'Set default registry', operationId: 'setDefaultRegistry', }) @ApiParam({ name: 'id', description: 'ID of the docker registry', type: 'string', }) @ApiResponse({ status: 200, description: 'The docker registry has been set as default.', type: DockerRegistryDto, }) @RequiredSystemRole(SystemRole.ADMIN) @UseGuards(DockerRegistryAccessGuard) @Audit({ action: AuditAction.SET_DEFAULT, targetType: AuditTarget.DOCKER_REGISTRY, targetIdFromRequest: (req) => req.params.id, }) async setDefault(@Param('id') registryId: string): Promise { const dockerRegistry = await this.dockerRegistryService.setDefault(registryId) return DockerRegistryDto.fromDockerRegistry(dockerRegistry) } } ================================================ FILE: apps/api/src/docker-registry/decorators/docker-registry.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { createParamDecorator, ExecutionContext } from '@nestjs/common' export const DockerRegistry = createParamDecorator((data: unknown, ctx: ExecutionContext) => { const request = ctx.switchToHttp().getRequest() return request.dockerRegistry }) ================================================ FILE: apps/api/src/docker-registry/docker-registry.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { DockerRegistry } from './entities/docker-registry.entity' import { DockerRegistryService } from './services/docker-registry.service' import { DockerRegistryController } from './controllers/docker-registry.controller' import { HttpModule } from '@nestjs/axios' import { DockerRegistryProvider } from './providers/docker-registry.provider' import { DOCKER_REGISTRY_PROVIDER } from './providers/docker-registry.provider.interface' import { OrganizationModule } from '../organization/organization.module' import { RegionModule } from '../region/region.module' @Module({ imports: [OrganizationModule, RegionModule, TypeOrmModule.forFeature([DockerRegistry]), HttpModule], controllers: [DockerRegistryController], providers: [ { provide: DOCKER_REGISTRY_PROVIDER, useClass: DockerRegistryProvider, }, DockerRegistryService, ], exports: [DockerRegistryService], }) export class DockerRegistryModule {} ================================================ FILE: apps/api/src/docker-registry/dto/create-docker-registry-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { RegistryType } from '../enums/registry-type.enum' export class CreateDockerRegistryInternalDto { name: string url: string username: string password: string project?: string registryType: RegistryType isDefault?: boolean regionId?: string | null } ================================================ FILE: apps/api/src/docker-registry/dto/create-docker-registry.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsString, IsUrl, IsEnum, IsOptional, IsBoolean } from 'class-validator' import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { RegistryType } from './../../docker-registry/enums/registry-type.enum' @ApiSchema({ name: 'CreateDockerRegistry' }) export class CreateDockerRegistryDto { @ApiProperty({ description: 'Registry name' }) @IsString() name: string @ApiProperty({ description: 'Registry URL' }) @IsUrl() url: string @ApiProperty({ description: 'Registry username' }) @IsString() username: string @ApiProperty({ description: 'Registry password' }) @IsString() password: string @ApiPropertyOptional({ description: 'Registry project' }) @IsString() @IsOptional() project?: string @ApiProperty({ description: 'Registry type', enum: RegistryType, default: RegistryType.ORGANIZATION, }) @IsEnum(RegistryType) registryType: RegistryType @ApiPropertyOptional({ description: 'Set as default registry' }) @IsBoolean() @IsOptional() isDefault?: boolean } ================================================ FILE: apps/api/src/docker-registry/dto/docker-registry.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { RegistryType } from './../../docker-registry/enums/registry-type.enum' import { DockerRegistry } from '../entities/docker-registry.entity' @ApiSchema({ name: 'DockerRegistry' }) export class DockerRegistryDto { @ApiProperty({ description: 'Registry ID', example: '123e4567-e89b-12d3-a456-426614174000', }) id: string @ApiProperty({ description: 'Registry name', example: 'My Docker Hub', }) name: string @ApiProperty({ description: 'Registry URL', example: 'https://registry.hub.docker.com', }) url: string @ApiProperty({ description: 'Registry username', example: 'username', }) username: string @ApiProperty({ description: 'Registry project', example: 'my-project', }) project: string @ApiProperty({ description: 'Registry type', enum: RegistryType, example: RegistryType.INTERNAL, }) registryType: RegistryType @ApiProperty({ description: 'Creation timestamp', example: '2024-01-31T12:00:00Z', }) createdAt: Date @ApiProperty({ description: 'Last update timestamp', example: '2024-01-31T12:00:00Z', }) updatedAt: Date static fromDockerRegistry(dockerRegistry: DockerRegistry): DockerRegistryDto { const dto: DockerRegistryDto = { id: dockerRegistry.id, name: dockerRegistry.name, url: dockerRegistry.url, username: dockerRegistry.username, project: dockerRegistry.project, registryType: dockerRegistry.registryType, createdAt: dockerRegistry.createdAt, updatedAt: dockerRegistry.updatedAt, } return dto } } ================================================ FILE: apps/api/src/docker-registry/dto/update-docker-registry.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsString, IsOptional, IsUrl } from 'class-validator' import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'UpdateDockerRegistry' }) export class UpdateDockerRegistryDto { @ApiProperty({ description: 'Registry name' }) @IsString() name: string @ApiProperty({ description: 'Registry URL' }) @IsUrl() url: string @ApiProperty({ description: 'Registry username' }) @IsString() username: string @ApiPropertyOptional({ description: 'Registry password' }) @IsString() @IsOptional() password?: string @ApiPropertyOptional({ description: 'Registry project' }) @IsString() @IsOptional() project?: string } ================================================ FILE: apps/api/src/docker-registry/entities/docker-registry.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { RegistryType } from './../../docker-registry/enums/registry-type.enum' import { Column, CreateDateColumn, Entity, Index, PrimaryGeneratedColumn, UpdateDateColumn } from 'typeorm' @Entity() @Index(['organizationId', 'registryType']) @Index(['region', 'registryType']) @Index(['registryType', 'isDefault']) export class DockerRegistry { @PrimaryGeneratedColumn('uuid') id: string @Column() name: string @Column() url: string @Column() username: string @Column() password: string @Column({ default: false }) isDefault: boolean @Column({ default: false }) isFallback: boolean @Column({ default: '' }) project: string @Column({ nullable: true, type: 'uuid' }) organizationId?: string @Column({ nullable: true }) region: string | null @Column({ type: 'enum', enum: RegistryType, default: RegistryType.INTERNAL, }) registryType: RegistryType @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/docker-registry/enums/registry-type.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum RegistryType { INTERNAL = 'internal', // Used for internal snapshots ORGANIZATION = 'organization', TRANSIENT = 'transient', BACKUP = 'backup', } ================================================ FILE: apps/api/src/docker-registry/guards/docker-registry-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException } from '@nestjs/common' import { DockerRegistryService } from '../services/docker-registry.service' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { RegistryType } from '../enums/registry-type.enum' @Injectable() export class DockerRegistryAccessGuard implements CanActivate { constructor(private readonly dockerRegistryService: DockerRegistryService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const dockerRegistryId: string = request.params.dockerRegistryId || request.params.registryId || request.params.id // TODO: initialize authContext safely const authContext: OrganizationAuthContext = request.user try { const dockerRegistry = await this.dockerRegistryService.findOneOrFail(dockerRegistryId) if (authContext.role !== SystemRole.ADMIN && dockerRegistry.organizationId !== authContext.organizationId) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } if (authContext.role !== SystemRole.ADMIN && dockerRegistry.registryType !== RegistryType.ORGANIZATION) { // only allow access to registries manually created by the organization throw new ForbiddenException(`Requested registry is not type "${RegistryType.ORGANIZATION}"`) } request.dockerRegistry = dockerRegistry return true } catch (error) { throw new NotFoundException(`Docker registry with ID ${dockerRegistryId} not found`) } } } ================================================ FILE: apps/api/src/docker-registry/providers/docker-registry.provider.interface.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const DOCKER_REGISTRY_PROVIDER = 'DOCKER_REGISTRY_PROVIDER' export interface IDockerRegistryProvider { createRobotAccount( url: string, auth: { username: string; password: string }, robotConfig: { name: string description: string duration: number level: string permissions: Array<{ kind: string namespace: string access: Array<{ resource: string; action: string }> }> }, ): Promise<{ name: string; secret: string }> deleteArtifact( baseUrl: string, auth: { username: string; password: string }, params: { project: string; repository: string; tag: string }, ): Promise } ================================================ FILE: apps/api/src/docker-registry/providers/docker-registry.provider.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable } from '@nestjs/common' import { HttpService } from '@nestjs/axios' import { firstValueFrom } from 'rxjs' import { IDockerRegistryProvider } from './docker-registry.provider.interface' @Injectable() export class DockerRegistryProvider implements IDockerRegistryProvider { constructor(private readonly httpService: HttpService) {} async createRobotAccount( url: string, auth: { username: string; password: string }, robotConfig: { name: string description: string duration: number level: string permissions: Array<{ kind: string namespace: string access: Array<{ resource: string; action: string }> }> }, ): Promise<{ name: string; secret: string }> { const response = await firstValueFrom(this.httpService.post(url, robotConfig, { auth })) return { name: response.data.name, secret: response.data.secret, } } async deleteArtifact( baseUrl: string, auth: { username: string; password: string }, params: { project: string; repository: string; tag: string }, ): Promise { const url = `${baseUrl}/api/v2.0/projects/${params.project}/repositories/${params.repository}/artifacts/${params.tag}` try { await firstValueFrom(this.httpService.delete(url, { auth })) } catch (error) { if (error.response?.status === 404) { return // Artifact not found, consider it a success } throw error } } } ================================================ FILE: apps/api/src/docker-registry/providers/mock-docker-registry.provider.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IDockerRegistryProvider } from './docker-registry.provider.interface' export class MockDockerRegistryProvider implements IDockerRegistryProvider { async createRobotAccount(): Promise<{ name: string; secret: string }> { return { name: 'mock-robot', secret: 'mock-secret', } } async deleteArtifact(): Promise { return Promise.resolve() } } ================================================ FILE: apps/api/src/docker-registry/services/docker-registry.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ForbiddenException, Inject, Injectable, Logger, NotFoundException } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { EntityManager, FindOptionsWhere, IsNull, Repository } from 'typeorm' import { DockerRegistry } from '../entities/docker-registry.entity' import { CreateDockerRegistryInternalDto } from '../dto/create-docker-registry-internal.dto' import { UpdateDockerRegistryDto } from '../dto/update-docker-registry.dto' import { ApiOAuth2 } from '@nestjs/swagger' import { RegistryPushAccessDto } from '../../sandbox/dto/registry-push-access-dto' import { DOCKER_REGISTRY_PROVIDER, IDockerRegistryProvider, } from './../../docker-registry/providers/docker-registry.provider.interface' import { RegistryType } from './../../docker-registry/enums/registry-type.enum' import { parseDockerImage, checkDockerfileHasRegistryPrefix } from '../../common/utils/docker-image.util' import axios from 'axios' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { RegionEvents } from '../../region/constants/region-events.constant' import { RegionCreatedEvent } from '../../region/events/region-created.event' import { RegionDeletedEvent } from '../../region/events/region-deleted.event' import { RegionService } from '../../region/services/region.service' import { RegionSnapshotManagerCredsRegeneratedEvent, RegionSnapshotManagerUpdatedEvent, } from '../../region/events/region-snapshot-manager-creds.event' const AXIOS_TIMEOUT_MS = 3000 const DOCKER_HUB_REGISTRY = 'registry-1.docker.io' const DOCKER_HUB_URL = 'docker.io' /** * Normalizes Docker Hub URLs to 'docker.io' for storage. * Empty URLs are assumed to be Docker Hub. */ function normalizeRegistryUrl(url: string): string { if (!url || url.trim() === '' || url.toLowerCase().includes('docker.io')) { return DOCKER_HUB_URL } // Strip trailing slashes for consistent matching return url.trim().replace(/\/+$/, '') } export interface ImageDetails { digest: string sizeGB: number entrypoint: string[] cmd: string[] env: string[] workingDir?: string user?: string } @Injectable() @ApiOAuth2(['openid', 'profile', 'email']) export class DockerRegistryService { private readonly logger = new Logger(DockerRegistryService.name) constructor( @InjectRepository(DockerRegistry) private readonly dockerRegistryRepository: Repository, @Inject(DOCKER_REGISTRY_PROVIDER) private readonly dockerRegistryProvider: IDockerRegistryProvider, private readonly regionService: RegionService, ) {} async create( createDto: CreateDockerRegistryInternalDto, organizationId?: string, isFallback = false, entityManager?: EntityManager, ): Promise { const repository = entityManager ? entityManager.getRepository(DockerRegistry) : this.dockerRegistryRepository // set some limit to the number of registries if (organizationId) { const registries = await repository.find({ where: { organizationId }, }) if (registries.length >= 100) { throw new ForbiddenException('You have reached the maximum number of registries') } } const registry = repository.create({ ...createDto, url: normalizeRegistryUrl(createDto.url), region: createDto.regionId, organizationId, isFallback, }) return repository.save(registry) } async findAll(organizationId: string, registryType: RegistryType): Promise { return this.dockerRegistryRepository.find({ where: { organizationId, registryType }, order: { createdAt: 'DESC', }, }) } async findOne(registryId: string): Promise { return this.dockerRegistryRepository.findOne({ where: { id: registryId }, }) } async findOneOrFail(registryId: string): Promise { return this.dockerRegistryRepository.findOneOrFail({ where: { id: registryId }, }) } async update(registryId: string, updateDto: UpdateDockerRegistryDto): Promise { const registry = await this.dockerRegistryRepository.findOne({ where: { id: registryId }, }) if (!registry) { throw new NotFoundException(`Docker registry with ID ${registryId} not found`) } registry.name = updateDto.name registry.url = normalizeRegistryUrl(updateDto.url) registry.username = updateDto.username if (updateDto.password) { registry.password = updateDto.password } registry.project = updateDto.project return this.dockerRegistryRepository.save(registry) } async remove(registryId: string): Promise { const registry = await this.dockerRegistryRepository.findOne({ where: { id: registryId }, }) if (!registry) { throw new NotFoundException(`Docker registry with ID ${registryId} not found`) } await this.dockerRegistryRepository.remove(registry) } async setDefault(registryId: string): Promise { const registry = await this.dockerRegistryRepository.findOne({ where: { id: registryId }, }) if (!registry) { throw new NotFoundException(`Docker registry with ID ${registryId} not found`) } await this.unsetDefaultRegistry() registry.isDefault = true return this.dockerRegistryRepository.save(registry) } private async unsetDefaultRegistry(): Promise { await this.dockerRegistryRepository.update({ isDefault: true }, { isDefault: false }) } /** * Returns an available internal registry for storing snapshots. * * If a snapshot manager _is_ configured for the region identified by the provided _regionId_, only an internal registry that matches the region snapshot manager can be returned. * If no matching internal registry is found, _null_ will be returned. * * If a snapshot manager _is not_ configured for the provided region, the default internal registry will be returned (if available). * If no default internal registry is found, _null_ will be returned. * * @param regionId - The ID of the region. */ async getAvailableInternalRegistry(regionId: string): Promise { const region = await this.regionService.findOne(regionId) if (!region) { return null } if (region.snapshotManagerUrl) { return this.dockerRegistryRepository.findOne({ where: { region: regionId, registryType: RegistryType.INTERNAL }, }) } return this.dockerRegistryRepository.findOne({ where: { isDefault: true, registryType: RegistryType.INTERNAL }, }) } /** * Returns an available transient registry for pushing snapshots. * * If a snapshot manager _is_ configured for the region identified by the provided _regionId_, only a transient registry that matches the region snapshot manager can be returned. * If no matching transient registry is found, _null_ will be returned. * * If a snapshot manager _is not_ configured for the provided region or no region is provided, the default transient registry will be returned (if available). * If no default transient registry is found, _null_ will be returned. * * @param regionId - (Optional) The ID of the region. */ async getAvailableTransientRegistry(regionId?: string): Promise { if (regionId) { const region = await this.regionService.findOne(regionId) if (!region) { return null } if (region.snapshotManagerUrl) { return this.dockerRegistryRepository.findOne({ where: { region: regionId, registryType: RegistryType.TRANSIENT }, }) } } return this.dockerRegistryRepository.findOne({ where: { isDefault: true, registryType: RegistryType.TRANSIENT }, }) } async getDefaultDockerHubRegistry(): Promise { return this.dockerRegistryRepository.findOne({ where: { organizationId: IsNull(), registryType: RegistryType.INTERNAL, url: DOCKER_HUB_URL, project: '', }, }) } /** * Returns an available backup registry for storing snapshots. * * If a snapshot manager _is_ configured for the region identified by the provided _preferredRegionId_, only a backup registry that matches the region snapshot manager can be returned. * If no matching backup registry is found, _null_ will be returned. * * If a snapshot manager _is not_ configured for the provided region, a backup registry in the preferred region will be returned (if available). * If no backup registry is found in the preferred region, a fallback backup registry will be returned (if available). * If no fallback backup registry is found, _null_ will be returned. * * @param preferredRegionId - The ID of the preferred region. */ async getAvailableBackupRegistry(preferredRegionId: string): Promise { const region = await this.regionService.findOne(preferredRegionId) if (!region) { return null } if (region.snapshotManagerUrl) { return this.dockerRegistryRepository.findOne({ where: { region: preferredRegionId, registryType: RegistryType.BACKUP }, }) } const registries = await this.dockerRegistryRepository.find({ where: { registryType: RegistryType.BACKUP, isDefault: true }, }) if (registries.length === 0) { return null } // Filter registries by preferred region const preferredRegionRegistries = registries.filter((registry) => registry.region === preferredRegionId) // If we have registries in the preferred region, randomly select one if (preferredRegionRegistries.length > 0) { const randomIndex = Math.floor(Math.random() * preferredRegionRegistries.length) return preferredRegionRegistries[randomIndex] } // If no registry found in preferred region, try to find a fallback registry const fallbackRegistries = registries.filter((registry) => registry.isFallback === true) if (fallbackRegistries.length > 0) { const randomIndex = Math.floor(Math.random() * fallbackRegistries.length) return fallbackRegistries[randomIndex] } // If no fallback registry found either, throw an error throw new Error('No backup registry available') } /** * Returns an internal registry that matches the snapshot ref. * * If no matching internal registry is found, _null_ will be returned. * * @param ref - The snapshot ref. * @param regionId - The ID of the region which needs access to the internal registry. */ async findInternalRegistryBySnapshotRef(ref: string, regionId: string): Promise { const region = await this.regionService.findOne(regionId) if (!region) { return null } let registries: DockerRegistry[] if (region.snapshotManagerUrl) { registries = await this.dockerRegistryRepository.find({ where: { region: regionId, registryType: RegistryType.INTERNAL, }, }) } else { registries = await this.dockerRegistryRepository.find({ where: { organizationId: IsNull(), registryType: RegistryType.INTERNAL, }, }) } return this.findRegistryByUrlMatch(registries, ref) } /** * Returns a source registry that matches the snapshot image name and can be used to pull the image. * * If no matching source registry is found, _null_ will be returned. * * @param imageName - The user-provided image. * @param regionId - The ID of the region which needs access to the source registry. */ async findSourceRegistryBySnapshotImageName( imageName: string, regionId: string, organizationId?: string, ): Promise { const region = await this.regionService.findOne(regionId) if (!region) { return null } const whereCondition: FindOptionsWhere[] = [] if (region.organizationId) { // registries manually added by the organization whereCondition.push({ organizationId: region.organizationId, registryType: RegistryType.ORGANIZATION, }) } if (organizationId) { whereCondition.push({ organizationId: organizationId, registryType: RegistryType.ORGANIZATION, }) } if (region.snapshotManagerUrl) { // internal registry associated with region snapshot manager whereCondition.push({ region: regionId, registryType: RegistryType.INTERNAL, }) } else { // shared internal registries whereCondition.push({ organizationId: IsNull(), registryType: RegistryType.INTERNAL, }) } const registries = await this.dockerRegistryRepository.find({ where: whereCondition, }) // Prioritize ORGANIZATION registries over others // This ensures user-configured credentials take precedence over shared internal ones const priority: Partial> = { [RegistryType.ORGANIZATION]: 0, } const sortedRegistries = [...registries].sort( (a, b) => (priority[a.registryType] ?? 1) - (priority[b.registryType] ?? 1), ) return this.findRegistryByUrlMatch(sortedRegistries, imageName) } /** * Returns a transient registry that matches the snapshot image name. * * If no matching transient registry is found, _null_ will be returned. * * @param imageName - The user-provided image. * @param regionId - The ID of the region which needs access to the transient registry. */ async findTransientRegistryBySnapshotImageName(imageName: string, regionId: string): Promise { const region = await this.regionService.findOne(regionId) if (!region) { return null } let registries: DockerRegistry[] if (region.snapshotManagerUrl) { registries = await this.dockerRegistryRepository.find({ where: { region: regionId, registryType: RegistryType.TRANSIENT, }, }) } else { registries = await this.dockerRegistryRepository.find({ where: { organizationId: IsNull(), registryType: RegistryType.TRANSIENT, }, }) } return this.findRegistryByUrlMatch(registries, imageName) } async getRegistryPushAccess( organizationId: string, userId: string, regionId?: string, ): Promise { const transientRegistry = await this.getAvailableTransientRegistry(regionId) if (!transientRegistry) { throw new Error('No default transient registry configured') } const uniqueId = crypto.randomUUID().replace(/-/g, '').slice(0, 12) const robotName = `temp-push-robot-${uniqueId}` const expiresAt = new Date() expiresAt.setHours(expiresAt.getHours() + 1) // Token valid for 1 hour const url = this.getRegistryUrl(transientRegistry) + '/api/v2.0/robots' try { const response = await this.dockerRegistryProvider.createRobotAccount( url, { username: transientRegistry.username, password: transientRegistry.password, }, { name: robotName, description: `Temporary push access for user ${userId} in organization ${organizationId}`, duration: 3600, level: 'project', permissions: [ { kind: 'project', namespace: transientRegistry.project, access: [{ resource: 'repository', action: 'push' }], }, ], }, ) return { username: response.name, secret: response.secret, registryId: transientRegistry.id, registryUrl: new URL(url).host, project: transientRegistry.project, expiresAt: expiresAt.toISOString(), } } catch (error) { let errorMessage = `Failed to generate push token: ${error.message}` if (error.response) { errorMessage += ` - ${error.response.data.message || error.response.statusText}` } throw new Error(errorMessage) } } async removeImage(imageName: string, registryId: string): Promise { const registry = await this.findOne(registryId) if (!registry) { throw new Error('Registry not found') } const parsedImage = parseDockerImage(imageName) if (!parsedImage.project) { throw new Error('Invalid image name format. Expected: [registry]/project/repository[:tag]') } try { await this.dockerRegistryProvider.deleteArtifact( this.getRegistryUrl(registry), { username: registry.username, password: registry.password, }, { project: parsedImage.project, repository: parsedImage.repository, tag: parsedImage.tag, }, ) } catch (error) { const message = error.response?.data?.message || error.message throw new Error(`Failed to remove image ${imageName}: ${message}`) } } getRegistryUrl(registry: DockerRegistry): string { // Dev mode if (registry.url.startsWith('localhost:') || registry.url.startsWith('registry:')) { return `http://${registry.url}` } if (registry.url.startsWith('localhost') || registry.url.startsWith('127.0.0.1')) { return `http://${registry.url}` } return registry.url.startsWith('http') ? registry.url : `https://${registry.url}` } public async findRegistryByImageName( imageName: string, regionId: string, organizationId?: string, ): Promise { // Parse the image to extract potential registry hostname const parsedImage = parseDockerImage(imageName) if (parsedImage.registry) { // Image has registry prefix, try to find matching registry in database first const registry = await this.findSourceRegistryBySnapshotImageName(imageName, regionId, organizationId) if (registry) { return registry } // Not found in database, create temporary registry config for public access return this.createTemporaryRegistryConfig(parsedImage.registry) } else { // Image has no registry prefix (e.g., "alpine:3.21") // Create temporary Docker Hub config return this.createTemporaryRegistryConfig('docker.io') } } /** * Finds a registry with a URL that matches the start of the target string. * * @param registries - The list of registries to search. * @param targetString - The string to match against registry URLs. * @returns The matching registry, or null if no match is found. */ private findRegistryByUrlMatch(registries: DockerRegistry[], targetString: string): DockerRegistry | null { for (const registry of registries) { const strippedUrl = registry.url.replace(/^(https?:\/\/)/, '').replace(/\/+$/, '') if (targetString.startsWith(strippedUrl)) { // Ensure match is at a proper boundary (followed by '/', ':', or end-of-string) // to prevent "registry.depot.dev" from matching "registry.depot.dev-evil.com/..." const nextChar = targetString[strippedUrl.length] if (nextChar === undefined || nextChar === '/' || nextChar === ':') { return registry } } } return null } private createTemporaryRegistryConfig(registryOrigin: string): DockerRegistry { const registry = new DockerRegistry() registry.id = `temp-${registryOrigin}` registry.name = `Temporary ${registryOrigin}` registryOrigin = registryOrigin.replace(/^(https?:\/\/)/, '') registry.url = `https://${registryOrigin}` registry.username = '' registry.password = '' registry.project = '' registry.isDefault = false registry.registryType = RegistryType.INTERNAL return registry } private async getDockerHubToken(repository: string): Promise { try { const tokenUrl = `https://auth.docker.io/token?service=${DOCKER_HUB_REGISTRY}&scope=repository:${repository}:pull` const response = await axios.get(tokenUrl, { timeout: 10000 }) return response.data.token } catch (error) { this.logger.warn(`Failed to get Docker Hub token: ${error.message}`) return null } } private async deleteRepositoryWithPrefix( repository: string, prefix: string, registry: DockerRegistry, ): Promise { const registryUrl = this.getRegistryUrl(registry) const encodedCredentials = Buffer.from(`${registry.username}:${registry.password}`).toString('base64') const repoPath = `${registry.project}/${prefix}${repository}` try { // Step 1: List all tags in the repository const tagsUrl = `${registryUrl}/v2/${repoPath}/tags/list` const tagsResponse = await axios({ method: 'get', url: tagsUrl, headers: { Authorization: `Basic ${encodedCredentials}`, }, validateStatus: (status) => status < 500, timeout: AXIOS_TIMEOUT_MS, }) if (tagsResponse.status === 404) { return } if (tagsResponse.status >= 300) { this.logger.error(`Error listing tags in repository ${repoPath}: ${tagsResponse.statusText}`) throw new Error(`Failed to list tags in repository ${repoPath}: ${tagsResponse.statusText}`) } const tags = tagsResponse.data.tags || [] if (tags.length === 0) { this.logger.debug(`Repository ${repoPath} has no tags to delete`) return } if (tags.length > 500) { this.logger.warn(`Repository ${repoPath} has more than 500 tags, skipping cleanup`) return } // Step 2: Delete each tag for (const tag of tags) { try { // Get the digest for this tag const manifestUrl = `${registryUrl}/v2/${repoPath}/manifests/${tag}` const manifestResponse = await axios({ method: 'head', url: manifestUrl, headers: { Authorization: `Basic ${encodedCredentials}`, Accept: 'application/vnd.docker.distribution.manifest.v2+json', }, validateStatus: (status) => status < 500, timeout: AXIOS_TIMEOUT_MS, }) if (manifestResponse.status >= 300) { this.logger.warn(`Couldn't get manifest for tag ${tag}: ${manifestResponse.statusText}`) continue } const digest = manifestResponse.headers['docker-content-digest'] if (!digest) { this.logger.warn(`Docker content digest not found for tag ${tag}`) continue } // Delete the manifest const deleteUrl = `${registryUrl}/v2/${repoPath}/manifests/${digest}` const deleteResponse = await axios({ method: 'delete', url: deleteUrl, headers: { Authorization: `Basic ${encodedCredentials}`, }, validateStatus: (status) => status < 500, timeout: AXIOS_TIMEOUT_MS, }) if (deleteResponse.status < 300) { this.logger.debug(`Deleted tag ${tag} from repository ${repoPath}`) } else { this.logger.warn(`Failed to delete tag ${tag}: ${deleteResponse.statusText}`) } } catch (error) { this.logger.warn(`Exception when deleting tag ${tag}: ${error.message}`) // Continue with other tags } } this.logger.debug(`Repository ${repoPath} cleanup completed`) } catch (error) { this.logger.error(`Exception when deleting repository ${repoPath}: ${error.message}`) throw error } } async deleteSandboxRepository(repository: string, registry: DockerRegistry): Promise { try { // Delete both backup and snapshot repositories - necessary due to renaming await this.deleteRepositoryWithPrefix(repository, 'backup-', registry) await this.deleteRepositoryWithPrefix(repository, 'snapshot-', registry) } catch (error) { this.logger.error(`Failed to delete repositories for ${repository}: ${error.message}`) throw error } } async deleteBackupImageFromRegistry(imageName: string, registry: DockerRegistry): Promise { const parsedImage = parseDockerImage(imageName) if (!parsedImage.project || !parsedImage.tag) { throw new Error('Invalid image name format. Expected: [registry]/project/repository:tag') } const registryUrl = this.getRegistryUrl(registry) const repoPath = `${parsedImage.project}/${parsedImage.repository}` // First, get the digest for the tag using the manifests endpoint const manifestUrl = `${registryUrl}/v2/${repoPath}/manifests/${parsedImage.tag}` const encodedCredentials = Buffer.from(`${registry.username}:${registry.password}`).toString('base64') try { // Get the digest from the headers const manifestResponse = await axios({ method: 'head', // Using HEAD request to only fetch headers url: manifestUrl, headers: { Authorization: `Basic ${encodedCredentials}`, Accept: 'application/vnd.docker.distribution.manifest.v2+json', }, validateStatus: (status) => status < 500, timeout: AXIOS_TIMEOUT_MS, }) if (manifestResponse.status >= 300) { this.logger.error(`Error getting manifest for image ${imageName}: ${manifestResponse.statusText}`) throw new Error(`Failed to get manifest for image ${imageName}: ${manifestResponse.statusText}`) } // Extract the digest from headers const digest = manifestResponse.headers['docker-content-digest'] if (!digest) { throw new Error(`Docker content digest not found for image ${imageName}`) } // Now delete the image using the digest const deleteUrl = `${registryUrl}/v2/${repoPath}/manifests/${digest}` const deleteResponse = await axios({ method: 'delete', url: deleteUrl, headers: { Authorization: `Basic ${encodedCredentials}`, }, validateStatus: (status) => status < 500, timeout: AXIOS_TIMEOUT_MS, }) if (deleteResponse.status < 300) { this.logger.debug(`Image ${imageName} removed from the registry`) return } this.logger.error(`Error removing image ${imageName} from registry: ${deleteResponse.statusText}`) throw new Error(`Failed to remove image ${imageName} from registry: ${deleteResponse.statusText}`) } catch (error) { this.logger.error(`Exception when deleting image ${imageName}: ${error.message}`) throw error } } /** * Gets source registries for building a Docker image from a Dockerfile * If the Dockerfile has images with registry prefixes, returns all user registries * * @param dockerfileContent - The Dockerfile content * @param organizationId - The organization ID * @returns Array of source registries (private registries + default Docker Hub) */ async getSourceRegistriesForDockerfile(dockerfileContent: string, organizationId: string): Promise { const sourceRegistries: DockerRegistry[] = [] // Check if Dockerfile has any images with a registry prefix // If so, include all user's registries (we can't reliably match specific registries) if (checkDockerfileHasRegistryPrefix(dockerfileContent)) { const userRegistries = await this.findAll(organizationId, RegistryType.ORGANIZATION) sourceRegistries.push(...userRegistries) } // Add default Docker Hub registry only if user doesn't have their own Docker Hub credentials // The auth configs map is keyed by URL, so adding the default last would override user credentials const userHasDockerHubCreds = sourceRegistries.some((registry) => registry.url.includes('docker.io')) if (!userHasDockerHubCreds) { const defaultDockerHubRegistry = await this.getDefaultDockerHubRegistry() if (defaultDockerHubRegistry) { sourceRegistries.push(defaultDockerHubRegistry) } } return sourceRegistries } @OnAsyncEvent({ event: RegionEvents.SNAPSHOT_MANAGER_CREDENTIALS_REGENERATED, }) private async _handleRegionSnapshotManagerCredsRegenerated( payload: RegionSnapshotManagerCredsRegeneratedEvent, ): Promise { const { regionId, snapshotManagerUrl, username, password, entityManager } = payload const em = entityManager ?? this.dockerRegistryRepository.manager const registries = await em.count(DockerRegistry, { where: { region: regionId, url: snapshotManagerUrl }, }) if (registries === 0) { throw new NotFoundException(`No registries found for region ${regionId} with URL ${snapshotManagerUrl}`) } await em.update(DockerRegistry, { region: regionId, url: snapshotManagerUrl }, { username, password }) } @OnAsyncEvent({ event: RegionEvents.SNAPSHOT_MANAGER_UPDATED, }) private async _handleRegionSnapshotManagerUpdated(payload: RegionSnapshotManagerUpdatedEvent): Promise { const { region, organizationId, snapshotManagerUrl, prevSnapshotManagerUrl, entityManager, newUsername, newPassword, } = payload const em = entityManager ?? this.dockerRegistryRepository.manager if (prevSnapshotManagerUrl) { // Update old registries associated with previous snapshot manager URL if (snapshotManagerUrl) { await em.update( DockerRegistry, { region: region.id, url: prevSnapshotManagerUrl, }, { url: snapshotManagerUrl, username: newUsername, password: newPassword, }, ) } else { // If snapshot manager URL is removed, delete associated registries await em.delete(DockerRegistry, { region: region.id, url: prevSnapshotManagerUrl, }) } return } const registries = await em.count(DockerRegistry, { where: { region: region.id, url: snapshotManagerUrl }, }) if (registries === 0) { await this._handleRegionCreatedEvent( new RegionCreatedEvent(entityManager, region, organizationId, newUsername, newPassword), ) } } @OnAsyncEvent({ event: RegionEvents.CREATED, }) private async _handleRegionCreatedEvent(payload: RegionCreatedEvent): Promise { const { entityManager, region, organizationId, snapshotManagerUsername, snapshotManagerPassword } = payload if (!region.snapshotManagerUrl || !snapshotManagerUsername || !snapshotManagerPassword) { return } await this.create( { name: `${region.name}-backup`, url: region.snapshotManagerUrl, username: snapshotManagerUsername, password: snapshotManagerPassword, registryType: RegistryType.BACKUP, regionId: region.id, }, organizationId ?? undefined, false, entityManager, ) await this.create( { name: `${region.name}-internal`, url: region.snapshotManagerUrl, username: snapshotManagerUsername, password: snapshotManagerPassword, registryType: RegistryType.INTERNAL, regionId: region.id, }, organizationId ?? undefined, false, entityManager, ) await this.create( { name: `${region.name}-transient`, url: region.snapshotManagerUrl, username: snapshotManagerUsername, password: snapshotManagerPassword, registryType: RegistryType.TRANSIENT, regionId: region.id, }, organizationId ?? undefined, false, entityManager, ) } @OnAsyncEvent({ event: RegionEvents.DELETED, }) async handleRegionDeletedEvent(payload: RegionDeletedEvent): Promise { const { entityManager, region } = payload if (!region.snapshotManagerUrl) { return } const repository = entityManager.getRepository(DockerRegistry) await repository.delete({ region: region.id }) } } ================================================ FILE: apps/api/src/email/constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const EMAIL_MODULE_OPTIONS = 'EMAIL_MODULE_OPTIONS' ================================================ FILE: apps/api/src/email/email.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DynamicModule, Module } from '@nestjs/common' import { EmailService } from './services/email.service' import { EMAIL_MODULE_OPTIONS } from './constants' export interface EmailModuleOptions { host: string port: number user?: string password?: string secure?: boolean from: string dashboardUrl: string } @Module({}) export class EmailModule { static forRoot(options: EmailModuleOptions): DynamicModule { return { module: EmailModule, providers: [ { provide: EMAIL_MODULE_OPTIONS, useValue: options, }, EmailService, ], exports: [EmailService], } } static forRootAsync(options: { useFactory: (...args: any[]) => Promise | EmailModuleOptions inject?: any[] }): DynamicModule { return { module: EmailModule, providers: [ { provide: EMAIL_MODULE_OPTIONS, useFactory: options.useFactory, inject: options.inject || [], }, EmailService, ], exports: [EmailService], } } } ================================================ FILE: apps/api/src/email/services/email.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Inject, Injectable, Logger } from '@nestjs/common' import { renderFile } from 'ejs' import { createTransport, Transporter } from 'nodemailer' import path from 'path' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { OrganizationEvents } from '../../organization/constants/organization-events.constant' import { OrganizationInvitationCreatedEvent } from '../../organization/events/organization-invitation-created.event' import { EmailModuleOptions } from '../email.module' @Injectable() export class EmailService { private readonly transporter: Transporter | null private readonly logger = new Logger(EmailService.name) constructor(@Inject('EMAIL_MODULE_OPTIONS') private readonly options: EmailModuleOptions) { const { host, port, user, password, secure, from, dashboardUrl } = this.options if (!host || !port || !from) { this.logger.warn('Email configuration not found, email functionality will be disabled') this.transporter = null return } this.transporter = createTransport({ host, port, auth: user && password ? { user, pass: password } : undefined, secure, }) } @OnAsyncEvent({ event: OrganizationEvents.INVITATION_CREATED, }) async handleOrganizationInvitationCreated(payload: OrganizationInvitationCreatedEvent): Promise { if (!this.transporter) { this.logger.warn('Failed to send organization invitation email, email configuration not found') return } try { await this.transporter.sendMail({ from: this.options.from, to: payload.inviteeEmail, subject: 'Invitation to join a Daytona organization', html: await renderFile(path.join(__dirname, 'assets/templates/organization-invitation.template.ejs'), { organizationName: payload.organizationName, invitedBy: payload.invitedBy, invitationLink: `${this.options.dashboardUrl}/user/invitations?id=${payload.invitationId}`, expiresAt: new Date(payload.expiresAt).toLocaleDateString('en-US', { year: 'numeric', month: 'long', day: 'numeric', }), }), }) } catch (error) { // TODO: resilient email sending this.logger.error(`Failed to send organization invitation email to ${payload.inviteeEmail}`, error) } } } ================================================ FILE: apps/api/src/encryption/encryption.module.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { EncryptionService } from './encryption.service' @Module({ providers: [EncryptionService], exports: [EncryptionService], }) export class EncryptionModule {} ================================================ FILE: apps/api/src/encryption/encryption.service.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { createCipheriv, createDecipheriv, randomBytes, scrypt } from 'crypto' import { promisify } from 'node:util' import { TypedConfigService } from '../config/typed-config.service' export interface EncryptedData { data: string iv: string } @Injectable() export class EncryptionService { private readonly algorithm = 'aes-256-ctr' private readonly encoding = 'base64' private readonly secret: string private readonly salt: string private readonly logger = new Logger(EncryptionService.name) constructor(configService: TypedConfigService) { this.logger.debug('Initializing encryption service') this.secret = configService.getOrThrow('encryption.key') this.salt = configService.getOrThrow('encryption.salt') } public async encrypt(input: string): Promise { const key = (await promisify(scrypt)(this.secret, this.salt, 32)) as Buffer const iv = randomBytes(16) const cipher = createCipheriv(this.algorithm, key, iv) return this.serialize({ data: Buffer.concat([cipher.update(input), cipher.final()]).toString(this.encoding), iv: iv.toString(this.encoding), }) } /** * Decrypts the input string. If backwardsCompatible is true, it will return the input string * as is if decryption fails (for handling unencrypted data). */ public async decrypt(input: string, backwardsCompatible = false): Promise { if (backwardsCompatible) { try { return await this._decrypt(input) } catch { return input } } return this._decrypt(input) } private async _decrypt(input: string): Promise { const encryptedData = this.deserialize(input) const key = (await promisify(scrypt)(this.secret, this.salt, 32)) as Buffer const encrypted = Buffer.from(encryptedData.data, this.encoding) const iv = Buffer.from(encryptedData.iv, this.encoding) const decipher = createDecipheriv(this.algorithm, key, iv) const decrypted = Buffer.concat([decipher.update(encrypted), decipher.final()]) return decrypted.toString() } private serialize(data: EncryptedData): string { return JSON.stringify(data) } private deserialize(data: string): EncryptedData { return JSON.parse(data) } } ================================================ FILE: apps/api/src/exceptions/bad-request.exception.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { HttpException, HttpStatus } from '@nestjs/common' export class BadRequestError extends HttpException { constructor(message: string) { super(message, HttpStatus.BAD_REQUEST) } } ================================================ FILE: apps/api/src/exceptions/forbidden-operation.exception.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { HttpException, HttpStatus } from '@nestjs/common' export class ForbiddenOperationError extends HttpException { constructor(message: string) { super(message, HttpStatus.FORBIDDEN) } } ================================================ FILE: apps/api/src/exceptions/not-found.exception.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { HttpException, HttpStatus } from '@nestjs/common' export class ResourceNotFoundError extends HttpException { constructor(message: string) { super(message, HttpStatus.NOT_FOUND) } } ================================================ FILE: apps/api/src/exceptions/sandbox-error.exception.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { HttpException, HttpStatus } from '@nestjs/common' export class SandboxError extends HttpException { constructor(message: string) { super(message, HttpStatus.BAD_REQUEST) } } ================================================ FILE: apps/api/src/filters/all-exceptions.filter.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { join } from 'node:path' import { STATUS_CODES } from 'node:http' import { Request, Response } from 'express' import { ExceptionFilter, Catch, ArgumentsHost, HttpException, Logger, HttpStatus, NotFoundException, UnauthorizedException, } from '@nestjs/common' import { FailedAuthTrackerService } from '../auth/failed-auth-tracker.service' @Catch() export class AllExceptionsFilter implements ExceptionFilter { private readonly logger = new Logger(AllExceptionsFilter.name) constructor(private readonly failedAuthTracker: FailedAuthTrackerService) {} async catch(exception: unknown, host: ArgumentsHost): Promise { const ctx = host.switchToHttp() const response = ctx.getResponse() const request = ctx.getRequest() let statusCode: number let error: string let message: string // If the exception is a NotFoundException and the request path is not an API request, serve the dashboard index.html file if (exception instanceof NotFoundException && !request.path.startsWith('/api/')) { const response = ctx.getResponse() response.sendFile(join(__dirname, '..', 'dashboard', 'index.html')) return } // Track failed authentication attempts if (exception instanceof UnauthorizedException) { try { await this.failedAuthTracker.incrementFailedAuth(request, response) } catch (error) { this.logger.error('Failed to track authentication failure:', error) } } if (exception instanceof HttpException) { statusCode = exception.getStatus() error = STATUS_CODES[statusCode] const exceptionResponse = exception.getResponse() if (typeof exceptionResponse === 'string') { message = exceptionResponse } else { const responseMessage = (exceptionResponse as Record).message message = Array.isArray(responseMessage) ? responseMessage.join(', ') : (responseMessage as string) || exception.message } } else { this.logger.error(exception) error = STATUS_CODES[HttpStatus.INTERNAL_SERVER_ERROR] message = 'An unexpected error occurred.' statusCode = HttpStatus.INTERNAL_SERVER_ERROR } response.status(statusCode).json({ path: request.url, timestamp: new Date().toISOString(), statusCode, error, message, }) } } ================================================ FILE: apps/api/src/filters/kafka-exception.filter.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Catch, ArgumentsHost, Logger } from '@nestjs/common' import { KafkaContext } from '@nestjs/microservices' import NodeCache from 'node-cache' interface KafkaMaxRetryOptions { retries?: number sendToDlq?: boolean commitOffset?: boolean } @Catch() export class KafkaMaxRetryExceptionFilter { private readonly logger = new Logger(KafkaMaxRetryExceptionFilter.name) private readonly maxRetries: number private readonly sendToDlq: boolean private readonly dlqTopicSuffix = '.dlq' private readonly commitOffset: boolean private readonly retryTracker: NodeCache constructor(options: KafkaMaxRetryOptions = {}) { this.maxRetries = options.retries ?? 3 this.sendToDlq = options.sendToDlq ?? false this.commitOffset = options.commitOffset ?? true // Initialize retry tracker with 5 minutes TTL this.retryTracker = new NodeCache({ stdTTL: 300, checkperiod: 60, useClones: false, }) } async catch(exception: unknown, host: ArgumentsHost) { try { const kafkaContext = host.switchToRpc().getContext() const message = kafkaContext.getMessage() const messageKey = this.createMessageKey(kafkaContext) this.logger.debug('Processing message', { messageKey, offset: message.offset }) const currentRetryCount = (this.retryTracker.get(messageKey) as number) || 0 if (currentRetryCount >= this.maxRetries) { await this.handleMaxRetriesExceeded(kafkaContext, message, messageKey, currentRetryCount, exception) return } // Allow retry this.retryTracker.set(messageKey, currentRetryCount + 1, 300) // 5 minutes TTL this.logger.debug(`Allowing retry ${currentRetryCount + 1}/${this.maxRetries} for message ${messageKey}`) throw exception } catch (filterError) { this.logger.error('Error in filter:', filterError) throw exception } } private createMessageKey(context: KafkaContext): string { return `${context.getTopic()}-${context.getPartition()}-${context.getMessage().offset}` } private async handleMaxRetriesExceeded( context: KafkaContext, message: any, messageKey: string, retryCount: number, exception: unknown, ): Promise { this.logger.warn(`Max retries (${this.maxRetries}) exceeded for message ${messageKey}`) // Clean up retry tracker this.retryTracker.del(messageKey) if (this.sendToDlq) { await this.sendToDLQ(context, message, retryCount, exception) } if (this.commitOffset) { await this.commitMessageOffset(context, messageKey) } } private async sendToDLQ(context: KafkaContext, message: any, retryCount: number, exception: unknown): Promise { try { const producer = context.getProducer() if (!producer) { this.logger.warn('Producer not available, cannot send to DLQ') return } const dlqTopic = `${context.getTopic()}${this.dlqTopicSuffix}` const dlqMessage = this.createDLQMessage(message, retryCount, context, exception) await producer.send({ topic: dlqTopic, messages: [dlqMessage], }) this.logger.log(`Message sent to DLQ: ${dlqTopic}`) } catch (error) { this.logger.error('Failed to send message to DLQ:', error) } } private createDLQMessage(message: any, retryCount: number, context: KafkaContext, exception: unknown) { return { value: JSON.stringify(message.value), headers: { ...message.headers, 'retry-count': retryCount.toString(), 'original-topic': context.getTopic(), 'original-offset': String(message.offset), 'failed-at': new Date().toISOString(), 'error-type': exception instanceof Error ? exception.constructor.name : typeof exception, 'error-message': exception instanceof Error ? exception.message : String(exception), 'error-stack': exception instanceof Error ? exception.stack : undefined, }, } } private async commitMessageOffset(context: KafkaContext, messageKey: string): Promise { try { const consumer = context.getConsumer() if (!consumer) { this.logger.warn('Consumer not available, cannot commit offset') return } await consumer.commitOffsets([ { topic: context.getTopic(), partition: context.getPartition(), offset: String(Number(context.getMessage().offset) + 1), }, ]) this.logger.log(`Offset committed for message ${messageKey}`) } catch (error) { this.logger.error(`Failed to commit offset for message ${messageKey}:`, error) } } } ================================================ FILE: apps/api/src/generate-openapi.ts ================================================ #!/usr/bin/env ts-node import * as fs from 'fs' import * as path from 'path' import { NestFactory } from '@nestjs/core' import { AppModule } from './app.module' import { SwaggerModule } from '@nestjs/swagger' import { getOpenApiConfig } from './openapi.config' import { addWebhookDocumentation } from './openapi-webhooks' import { SandboxCreatedWebhookDto, SandboxStateUpdatedWebhookDto, SnapshotCreatedWebhookDto, SnapshotStateUpdatedWebhookDto, SnapshotRemovedWebhookDto, VolumeCreatedWebhookDto, VolumeStateUpdatedWebhookDto, } from './webhook/dto/webhook-event-payloads.dto' async function generateOpenAPI() { try { const app = await NestFactory.create(AppModule, { logger: ['error'], // Reduce logging noise }) const config = getOpenApiConfig('http://localhost:3000') const document = { ...SwaggerModule.createDocument(app, config), } const openapiPath = './dist/apps/api/openapi.json' fs.mkdirSync(path.dirname(openapiPath), { recursive: true }) fs.writeFileSync(openapiPath, JSON.stringify(document, null, 2)) // Generate 3.1.0 version of the OpenAPI specification // Needed for the webhook documentation const document_3_1_0 = { ...SwaggerModule.createDocument(app, config, { extraModels: [ SandboxCreatedWebhookDto, SandboxStateUpdatedWebhookDto, SnapshotCreatedWebhookDto, SnapshotStateUpdatedWebhookDto, SnapshotRemovedWebhookDto, VolumeCreatedWebhookDto, VolumeStateUpdatedWebhookDto, ], }), openapi: '3.1.0', } const documentWithWebhooks = addWebhookDocumentation(document_3_1_0) const openapi310Path = './dist/apps/api/openapi.3.1.0.json' fs.mkdirSync(path.dirname(openapi310Path), { recursive: true }) fs.writeFileSync(openapi310Path, JSON.stringify(documentWithWebhooks, null, 2)) await app.close() console.log('OpenAPI specification generated successfully!') clearTimeout(timeout) process.exit(0) } catch (error) { console.error('Failed to generate OpenAPI specification:', error) clearTimeout(timeout) process.exit(1) } } // Add timeout to prevent hanging const timeout = setTimeout(() => { console.error('Generation timed out after 30 seconds') process.exit(1) }, 30000) // Clear timeout if process exits normally process.on('exit', () => { clearTimeout(timeout) }) generateOpenAPI() ================================================ FILE: apps/api/src/health/health.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Logger, ServiceUnavailableException, UseGuards } from '@nestjs/common' import { HealthCheckService, HealthCheck, TypeOrmHealthIndicator } from '@nestjs/terminus' import { RedisHealthIndicator } from './redis.health' import { AnonymousRateLimitGuard } from '../common/guards/anonymous-rate-limit.guard' import { AuthenticatedRateLimitGuard } from '../common/guards/authenticated-rate-limit.guard' import { CombinedAuthGuard } from '../auth/combined-auth.guard' import { HealthCheckGuard } from '../auth/health-check.guard' @Controller('health') export class HealthController { private readonly logger = new Logger(HealthController.name) constructor( private health: HealthCheckService, private db: TypeOrmHealthIndicator, private redis: RedisHealthIndicator, ) {} @Get() @UseGuards(AnonymousRateLimitGuard) live() { return { status: 'ok' } } @Get('ready') @UseGuards(CombinedAuthGuard, HealthCheckGuard, AuthenticatedRateLimitGuard) @HealthCheck() async check() { try { const result = await this.health.check([() => this.db.pingCheck('database'), () => this.redis.isHealthy('redis')]) return { status: result.status } } catch (error) { this.logger.error(error) throw new ServiceUnavailableException() } } } ================================================ FILE: apps/api/src/health/health.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TerminusModule } from '@nestjs/terminus' import { HealthController } from './health.controller' import { RedisModule } from '@nestjs-modules/ioredis' import { RedisHealthIndicator } from './redis.health' @Module({ imports: [TerminusModule, RedisModule], controllers: [HealthController], providers: [RedisHealthIndicator], }) export class HealthModule {} ================================================ FILE: apps/api/src/health/redis.health.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable } from '@nestjs/common' import { HealthIndicatorService } from '@nestjs/terminus' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' @Injectable() export class RedisHealthIndicator { private readonly redis: Redis constructor( @InjectRedis() redis: Redis, private readonly healthIndicatorService: HealthIndicatorService, ) { this.redis = redis.duplicate({ commandTimeout: 1000, }) } async isHealthy(key: string) { // Start the health indicator check for the given key const indicator = this.healthIndicatorService.check(key) try { await this.redis.ping() return indicator.up() } catch (error) { return indicator.down(error) } } } ================================================ FILE: apps/api/src/interceptors/metrics.interceptor.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NestInterceptor, ExecutionContext, CallHandler, OnApplicationShutdown, Logger, } from '@nestjs/common' import { Observable } from 'rxjs' import { tap } from 'rxjs/operators' import { PostHog } from 'posthog-node' import { SandboxDto } from '../sandbox/dto/sandbox.dto' import { DockerRegistryDto } from '../docker-registry/dto/docker-registry.dto' import { CreateSandboxDto } from '../sandbox/dto/create-sandbox.dto' import { Request } from 'express' import { CreateSnapshotDto } from '../sandbox/dto/create-snapshot.dto' import { SnapshotDto } from '../sandbox/dto/snapshot.dto' import { CreateOrganizationDto } from '../organization/dto/create-organization.dto' import { UpdateOrganizationQuotaDto } from '../organization/dto/update-organization-quota.dto' import { OrganizationDto } from '../organization/dto/organization.dto' import { UpdateOrganizationMemberAccessDto } from '../organization/dto/update-organization-member-access.dto' import { CreateOrganizationRoleDto } from '../organization/dto/create-organization-role.dto' import { UpdateOrganizationRoleDto } from '../organization/dto/update-organization-role.dto' import { CreateOrganizationInvitationDto } from '../organization/dto/create-organization-invitation.dto' import { UpdateOrganizationInvitationDto } from '../organization/dto/update-organization-invitation.dto' import { CustomHeaders } from '../common/constants/header.constants' import { CreateVolumeDto } from '../sandbox/dto/create-volume.dto' import { VolumeDto } from '../sandbox/dto/volume.dto' import { CreateWorkspaceDto } from '../sandbox/dto/create-workspace.deprecated.dto' import { WorkspaceDto } from '../sandbox/dto/workspace.deprecated.dto' import { TypedConfigService } from '../config/typed-config.service' import { UpdateOrganizationRegionQuotaDto } from '../organization/dto/update-organization-region-quota.dto' import { UpdateOrganizationDefaultRegionDto } from '../organization/dto/update-organization-default-region.dto' type RequestWithUser = Request & { user?: { userId: string; organizationId: string } } type CommonCaptureProps = { organizationId?: string distinctId: string durationMs: number statusCode: number userAgent: string error?: string source: string isDeprecated?: boolean sdkVersion?: string environment?: string } @Injectable() export class MetricsInterceptor implements NestInterceptor, OnApplicationShutdown { private readonly posthog?: PostHog private readonly version: string private readonly logger = new Logger(MetricsInterceptor.name) constructor(private readonly configService: TypedConfigService) { this.version = this.configService.getOrThrow('version') if (!this.configService.get('posthog.apiKey')) { this.logger.warn('POSTHOG_API_KEY is not set, metrics will not be recorded') return } if (!this.configService.get('posthog.host')) { this.logger.warn('POSTHOG_HOST is not set, metrics will not be recorded') return } // Initialize PostHog client // Make sure to set POSTHOG_API_KEY in your environment variables this.posthog = new PostHog(this.configService.getOrThrow('posthog.apiKey'), { host: this.configService.getOrThrow('posthog.host'), }) } intercept(context: ExecutionContext, next: CallHandler): Observable { if (!this.posthog) { return next.handle() } const request = context.switchToHttp().getRequest() const startTime = Date.now() return next.handle().pipe( tap({ next: (response) => { // For DELETE requests or empty responses, pass an empty object with statusCode const responseObj = response || { statusCode: 204 } this.recordMetrics(request, responseObj, startTime).catch((err) => this.logger.error(err)) }, error: (error) => { this.recordMetrics( request, { statusCode: error.status || 500 }, startTime, error.message || JSON.stringify(error), ).catch((err) => this.logger.error(err)) }, }), ) } private async recordMetrics(request: RequestWithUser, response: any, startTime: number, error?: string) { const durationMs = Date.now() - startTime const statusCode = error ? response.statusCode : response.statusCode || (request.method === 'DELETE' ? 204 : 200) // Default to 204 for DELETE requests const distinctId = request.user?.userId || 'anonymous' const userAgent = request.get('user-agent') const source = request.get(CustomHeaders.SOURCE.name) const sdkVersion = request.get(CustomHeaders.SDK_VERSION.name) const props: CommonCaptureProps = { distinctId, organizationId: request.user?.organizationId, durationMs, statusCode, userAgent, error, source: Array.isArray(source) ? source[0] : source, isDeprecated: request.route.path.includes('/workspace') || request.route.path.includes('/images'), sdkVersion, environment: this.configService.get('posthog.environment'), } switch (request.method) { case 'POST': switch (request.route.path) { case '/api/api-keys': this.captureCreateApiKey(props) break case '/api/snapshots': this.captureCreateSnapshot(props, request.body, response) break case '/api/snapshots/:snapshotId/activate': this.captureActivateSnapshot(props, request.params.snapshotId) break case '/api/snapshots/:snapshotId/deactivate': this.captureDeactivateSnapshot(props, request.params.snapshotId) break case '/api/docker-registry': this.captureCreateDockerRegistry(props, response) break case '/api/sandbox': this.captureCreateSandbox(props, request.body, response) break case '/api/workspace': this.captureCreateWorkspace_deprecated(props, request.body, response) break case '/api/sandbox/:sandboxIdOrName/start': case '/api/workspace/:workspaceId/start': this.captureStartSandbox(props, request.params.sandboxIdOrName || request.params.workspaceId) break case '/api/sandbox/:sandboxIdOrName/stop': case '/api/workspace/:workspaceId/stop': this.captureStopSandbox(props, request.params.sandboxIdOrName || request.params.workspaceId) break case '/api/sandbox/:sandboxIdOrName/resize': this.captureResizeSandbox(props, request.params.sandboxIdOrName, request.body) break case '/api/sandbox/:sandboxIdOrName/archive': case '/api/workspace/:workspaceId/archive': this.captureArchiveSandbox(props, request.params.sandboxIdOrName || request.params.workspaceId) break case '/api/sandbox/:sandboxIdOrName/backup': this.captureCreateBackup(props, request.params.sandboxIdOrName) break case '/api/sandbox/:sandboxIdOrName/public/:isPublic': case '/api/workspace/:workspaceId/public/:isPublic': this.captureUpdatePublicStatus( props, request.params.sandboxIdOrName || request.params.workspaceId, request.params.isPublic === 'true', ) break case '/api/sandbox/:sandboxIdOrName/autostop/:interval': case '/api/workspace/:workspaceId/autostop/:interval': this.captureSetAutostopInterval( props, request.params.sandboxIdOrName || request.params.workspaceId, parseInt(request.params.interval), ) break case '/api/sandbox/:sandboxIdOrName/autoarchive/:interval': case '/api/workspace/:workspaceId/autoarchive/:interval': this.captureSetAutoArchiveInterval( props, request.params.sandboxIdOrName || request.params.workspaceId, parseInt(request.params.interval), ) break case '/api/sandbox/:sandboxIdOrName/autodelete/:interval': this.captureSetAutoDeleteInterval(props, request.params.sandboxIdOrName, parseInt(request.params.interval)) break case '/api/organizations/invitations/:invitationId/accept': this.captureAcceptInvitation(props, request.params.invitationId) break case '/api/organizations/invitations/:invitationId/decline': this.captureDeclineInvitation(props, request.params.invitationId) break case '/api/organizations': this.captureCreateOrganization(props, request.body, response) break case '/api/organizations/:organizationId/leave': this.captureLeaveOrganization(props, request.params.organizationId) break case '/api/organizations/:organizationId/users/:userId/access': this.captureUpdateOrganizationUserAccess( props, request.params.organizationId, request.params.userId, request.body, ) break case '/api/organizations/:organizationId/roles': this.captureCreateOrganizationRole(props, request.params.organizationId, request.body) break case '/api/organizations/:organizationId/invitations': this.captureCreateOrganizationInvitation(props, request.params.organizationId, request.body) break case '/api/organizations/:organizationId/invitations/:invitationId/cancel': this.captureCancelOrganizationInvitation(props, request.params.organizationId, request.params.invitationId) break case '/api/volumes': this.captureCreateVolume(props, request.body, response) break } break case 'DELETE': switch (request.route.path) { case '/api/sandbox/:sandboxIdOrName': case '/api/workspace/:workspaceId': this.captureDeleteSandbox(props, request.params.sandboxIdOrName || request.params.workspaceId) break case '/api/snapshots/:snapshotId': this.captureDeleteSnapshot(props, request.params.snapshotId) break case '/api/organizations/:organizationId': this.captureDeleteOrganization(props, request.params.organizationId) break case '/api/organizations/:organizationId/users/:userId': this.captureDeleteOrganizationUser(props, request.params.organizationId, request.params.userId) break case '/api/organizations/:organizationId/roles/:roleId': this.captureDeleteOrganizationRole(props, request.params.organizationId, request.params.roleId) break case '/api/volumes/:volumeId': this.captureDeleteVolume(props, request.params.volumeId) break } break case 'PUT': switch (request.route.path) { case '/api/sandbox/:sandboxIdOrName/labels': case '/api/workspace/:workspaceId/labels': this.captureUpdateSandboxLabels(props, request.params.sandboxIdOrName || request.params.workspaceId) break case '/api/organizations/:organizationId/roles/:roleId': this.captureUpdateOrganizationRole( props, request.params.organizationId, request.params.roleId, request.body, ) break case '/api/organizations/:organizationId/invitations/:invitationId': this.captureUpdateOrganizationInvitation( props, request.params.organizationId, request.params.invitationId, request.body, ) break case '/api/organizations/:organizationId/experimental-config': this.captureUpdateOrganizationExperimentalConfig(props, request.body) break } break case 'PATCH': switch (request.route.path) { case '/api/organizations/:organizationId/default-region': this.captureSetOrganizationDefaultRegion(props, request.params.organizationId, request.body) break case '/api/organizations/:organizationId/quota': this.captureUpdateOrganizationQuota(props, request.params.organizationId, request.body) break case '/api/organizations/:organizationId/quota/:regionId': this.captureUpdateOrganizationRegionQuota( props, request.params.organizationId, request.params.regionId, request.body, ) break } break } if (!request.route.path.startsWith('/api/toolbox/:sandboxId/toolbox')) { return } const path = request.route.path.replace('/api/toolbox/:sandboxId/toolbox', '') switch (path) { case '/project-dir': this.captureToolboxCommand(props, request.params.sandboxId, 'project-dir_get') break case '/files': switch (request.method) { case 'GET': this.captureToolboxCommand(props, request.params.sandboxId, 'files_list') break case 'DELETE': this.captureToolboxCommand(props, request.params.sandboxId, 'files_delete') break } break case '/files/download': this.captureToolboxCommand(props, request.params.sandboxId, 'files_download') break case '/files/find': this.captureToolboxCommand(props, request.params.sandboxId, 'files_find') break case '/files/folder': this.captureToolboxCommand(props, request.params.sandboxId, 'files_folder_create') break case '/files/info': this.captureToolboxCommand(props, request.params.sandboxId, 'files_info') break case '/files/move': this.captureToolboxCommand(props, request.params.sandboxId, 'files_move') break case '/files/permissions': this.captureToolboxCommand(props, request.params.sandboxId, 'files_permissions') break case '/files/replace': this.captureToolboxCommand(props, request.params.sandboxId, 'files_replace') break case '/files/search': this.captureToolboxCommand(props, request.params.sandboxId, 'files_search') break case '/files/upload': this.captureToolboxCommand(props, request.params.sandboxId, 'files_upload') break case '/git/add': this.captureToolboxCommand(props, request.params.sandboxId, 'git_add') break case '/git/branches': switch (request.method) { case 'GET': this.captureToolboxCommand(props, request.params.sandboxId, 'git_branches_list') break case 'POST': this.captureToolboxCommand(props, request.params.sandboxId, 'git_branches_create') break } break case '/git/clone': this.captureToolboxCommand(props, request.params.sandboxId, 'git_clone') break case '/git/commit': this.captureToolboxCommand(props, request.params.sandboxId, 'git_commit') break case '/git/history': this.captureToolboxCommand(props, request.params.sandboxId, 'git_history') break case '/git/pull': this.captureToolboxCommand(props, request.params.sandboxId, 'git_pull') break case '/git/push': this.captureToolboxCommand(props, request.params.sandboxId, 'git_push') break case '/git/status': this.captureToolboxCommand(props, request.params.sandboxId, 'git_status') break case '/process/execute': this.captureToolboxCommand(props, request.params.sandboxId, 'process_execute', { command: request.body.command, cwd: request.body.cwd, exit_code: response.exitCode, timeout_sec: request.body.timeout, }) break case '/process/session': switch (request.method) { case 'GET': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_list') break case 'POST': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_create', { session_id: request.body.sessionId, }) break } break case '/process/session/:sessionId': switch (request.method) { case 'GET': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_get', { session_id: request.params.sessionId, }) break case 'DELETE': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_delete', { session_id: request.params.sessionId, }) break } break case '/process/session/:sessionId/exec': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_execute', { session_id: request.params.sessionId, command: request.body.command, }) break case '/process/session/:sessionId/command/:commandId': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_command_get', { session_id: request.params.sessionId, command_id: request.params.commandId, }) break case '/process/session/:sessionId/command/:commandId/logs': this.captureToolboxCommand(props, request.params.sandboxId, 'process_session_command_logs', { session_id: request.params.sessionId, command_id: request.params.commandId, }) break case '/lsp/completions': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_completions') break case '/lsp/did-close': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_did_close') break case '/lsp/did-open': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_did_open') break case '/lsp/document-symbols': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_document_symbols') break case '/lsp/start': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_start', { language_id: request.body.languageId, }) break case '/lsp/stop': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_stop', { language_id: request.body.languageId, }) break case '/lsp/sandbox-symbols': this.captureToolboxCommand(props, request.params.sandboxId, 'lsp_sandbox_symbols', { language_id: request.query.languageId, path_to_project: request.query.pathToProject, query: request.query.query, }) break } } private captureCreateApiKey(props: CommonCaptureProps) { this.capture('api_api_key_created', props, 'api_api_key_creation_failed') } private captureCreateDockerRegistry(props: CommonCaptureProps, response: DockerRegistryDto) { this.capture('api_docker_registry_created', props, 'api_docker_registry_creation_failed', { registry_name: response.name, registry_url: response.url, }) } private captureCreateSnapshot(props: CommonCaptureProps, request: CreateSnapshotDto, response: SnapshotDto) { this.capture('api_snapshot_created', props, 'api_snapshot_creation_failed', { snapshot_id: response.id, snapshot_name: request.name, snapshot_image_name: request.imageName, snapshot_entrypoint: request.entrypoint, snapshot_cpu: request.cpu, snapshot_gpu: request.gpu, snapshot_memory: request.memory, snapshot_disk: request.disk, snapshot_is_build: request.buildInfo ? true : false, snapshot_build_info_context_hashes_length: request.buildInfo?.contextHashes?.length, }) } private captureActivateSnapshot(props: CommonCaptureProps, snapshotId: string) { this.capture('api_snapshot_activated', props, 'api_snapshot_activation_failed', { snapshot_id: snapshotId, }) } private captureDeactivateSnapshot(props: CommonCaptureProps, snapshotId: string) { this.capture('api_snapshot_deactivated', props, 'api_snapshot_deactivation_failed', { snapshot_id: snapshotId, }) } private captureDeleteSnapshot(props: CommonCaptureProps, snapshotId: string) { this.capture('api_snapshot_deleted', props, 'api_snapshot_deletion_failed', { snapshot_id: snapshotId, }) } private captureCreateSandbox(props: CommonCaptureProps, request: CreateSandboxDto, response: SandboxDto) { const envVarsLength = request.env ? Object.keys(request.env).length : 0 const records = { sandbox_id: response.id, sandbox_name_request: request.name, sandbox_name: response.name, sandbox_snapshot_request: request.snapshot, sandbox_snapshot: response.snapshot, sandbox_user_request: request.user, sandbox_user: response.user, sandbox_cpu_request: request.cpu, sandbox_cpu: response.cpu, sandbox_gpu_request: request.gpu, sandbox_gpu: response.gpu, sandbox_memory_mb_request: request.memory * 1024, sandbox_memory_mb: response.memory * 1024, sandbox_disk_gb_request: request.disk, sandbox_disk_gb: response.disk, sandbox_target_request: request.target, sandbox_target: response.target, sandbox_auto_stop_interval_min_request: request.autoStopInterval, sandbox_auto_stop_interval_min: response.autoStopInterval, sandbox_auto_archive_interval_min_request: request.autoArchiveInterval, sandbox_auto_archive_interval_min: response.autoArchiveInterval, sandbox_auto_delete_interval_min_request: request.autoDeleteInterval, sandbox_auto_delete_interval_min: response.autoDeleteInterval, sandbox_public_request: request.public, sandbox_public: response.public, sandbox_labels_request: request.labels, sandbox_labels: response.labels, sandbox_env_vars_length_request: envVarsLength, sandbox_volumes_length_request: request.volumes?.length, sandbox_daemon_version: response.daemonVersion, sandbox_network_block_all_request: request.networkBlockAll, sandbox_network_block_all: response.networkBlockAll, sandbox_network_allow_list_set_request: !!request.networkAllowList, sandbox_network_allow_list_set: !!response.networkAllowList, } if (request.buildInfo) { records['sandbox_is_dynamic_build'] = true records['sandbox_build_info_context_hashes_length'] = request.buildInfo.contextHashes?.length } this.capture('api_sandbox_created', props, 'api_sandbox_creation_failed', records) } private captureCreateWorkspace_deprecated( props: CommonCaptureProps, request: CreateWorkspaceDto, response: WorkspaceDto, ) { const envVarsLength = request.env ? Object.keys(request.env).length : 0 const records = { sandbox_id: response.id, sandbox_snapshot_request: request.image, sandbox_snapshot: response.snapshot, sandbox_user_request: request.user, sandbox_user: response.user, sandbox_cpu_request: request.cpu, sandbox_cpu: response.cpu, sandbox_gpu_request: request.gpu, sandbox_gpu: response.gpu, sandbox_memory_mb_request: request.memory * 1024, sandbox_memory_mb: response.memory * 1024, sandbox_disk_gb_request: request.disk, sandbox_disk_gb: response.disk, sandbox_target_request: request.target, sandbox_target: response.target, sandbox_auto_stop_interval_min_request: request.autoStopInterval, sandbox_auto_stop_interval_min: response.autoStopInterval, sandbox_auto_archive_interval_min_request: request.autoArchiveInterval, sandbox_auto_archive_interval_min: response.autoArchiveInterval, sandbox_public_request: request.public, sandbox_public: response.public, sandbox_labels_request: request.labels, sandbox_labels: response.labels, sandbox_env_vars_length_request: envVarsLength, sandbox_volumes_length_request: request.volumes?.length, sandbox_daemon_version: response.daemonVersion, } if (request.buildInfo) { records['sandbox_is_dynamic_build'] = true records['sandbox_build_info_context_hashes_length'] = request.buildInfo.contextHashes?.length } this.capture('api_sandbox_created', props, 'api_sandbox_creation_failed', records) } private captureDeleteSandbox(props: CommonCaptureProps, sandboxId: string) { this.capture('api_sandbox_deleted', props, 'api_sandbox_deletion_failed', { sandbox_id: sandboxId, }) } private captureStartSandbox(props: CommonCaptureProps, sandboxId: string) { this.capture('api_sandbox_started', props, 'api_sandbox_start_failed', { sandbox_id: sandboxId, }) } private captureStopSandbox(props: CommonCaptureProps, sandboxId: string) { this.capture('api_sandbox_stopped', props, 'api_sandbox_stop_failed', { sandbox_id: sandboxId, }) } private captureResizeSandbox( props: CommonCaptureProps, sandboxId: string, body: { cpu?: number; memory?: number; disk?: number }, ) { this.capture('api_sandbox_resized', props, 'api_sandbox_resize_failed', { sandbox_id: sandboxId, cpu: body?.cpu, memory: body?.memory, disk: body?.disk, }) } private captureArchiveSandbox(props: CommonCaptureProps, sandboxId: string) { this.capture('api_sandbox_archived', props, 'api_sandbox_archive_failed', { sandbox_id: sandboxId, }) } private captureCreateBackup(props: CommonCaptureProps, sandboxId: string) { this.capture('api_sandbox_backup_created', props, 'api_sandbox_backup_creation_failed', { sandbox_id: sandboxId, }) } private captureUpdatePublicStatus(props: CommonCaptureProps, sandboxId: string, isPublic: boolean) { this.capture('api_sandbox_public_status_updated', props, 'api_sandbox_public_status_update_failed', { sandbox_id: sandboxId, sandbox_public: isPublic, }) } private captureSetAutostopInterval(props: CommonCaptureProps, sandboxId: string, interval: number) { this.capture('api_sandbox_autostop_interval_updated', props, 'api_sandbox_autostop_interval_update_failed', { sandbox_id: sandboxId, sandbox_autostop_interval: interval, }) } private captureSetAutoArchiveInterval(props: CommonCaptureProps, sandboxId: string, interval: number) { this.capture('api_sandbox_autoarchive_interval_updated', props, 'api_sandbox_autoarchive_interval_update_failed', { sandbox_id: sandboxId, sandbox_autoarchive_interval: interval, }) } private captureSetAutoDeleteInterval(props: CommonCaptureProps, sandboxId: string, interval: number) { this.capture('api_sandbox_autodelete_interval_updated', props, 'api_sandbox_autodelete_interval_update_failed', { sandbox_id: sandboxId, sandbox_autodelete_interval: interval, }) } private captureUpdateSandboxLabels(props: CommonCaptureProps, sandboxId: string) { this.capture('api_sandbox_labels_update', props, 'api_sandbox_labels_update_failed', { sandbox_id: sandboxId, }) } private captureAcceptInvitation(props: CommonCaptureProps, invitationId: string) { this.capture('api_invitation_accepted', props, 'api_invitation_accept_failed', { invitation_id: invitationId, }) } private captureDeclineInvitation(props: CommonCaptureProps, invitationId: string) { this.capture('api_invitation_declined', props, 'api_invitation_decline_failed', { invitation_id: invitationId, }) } private captureCreateOrganization( props: CommonCaptureProps, request: CreateOrganizationDto, response: OrganizationDto, ) { if (!this.posthog) { return } this.posthog.groupIdentify({ groupType: 'organization', groupKey: response.id, properties: { name: request.name, created_at: response.createdAt, created_by: response.createdBy, personal: response.personal, environment: this.configService.get('posthog.environment'), }, }) this.capture('api_organization_created', props, 'api_organization_creation_failed', { organization_id: response.id, organization_name: request.name, organization_default_region_id: request.defaultRegionId, }) } private captureLeaveOrganization(props: CommonCaptureProps, organizationId: string) { this.capture('api_organization_left', props, 'api_organization_leave_failed', { organization_id: organizationId, }) } private captureSetOrganizationDefaultRegion( props: CommonCaptureProps, organizationId: string, request: UpdateOrganizationDefaultRegionDto, ) { this.capture('api_organization_default_region_set', props, 'api_organization_default_region_set_failed', { organization_id: organizationId, organization_default_region_id: request.defaultRegionId, }) } private captureUpdateOrganizationQuota( props: CommonCaptureProps, organizationId: string, request: UpdateOrganizationQuotaDto, ) { this.capture('api_organization_quota_updated', props, 'api_organization_quota_update_failed', { organization_id: organizationId, organization_max_cpu_per_sandbox: request.maxCpuPerSandbox, organization_max_memory_per_sandbox_mb: request.maxMemoryPerSandbox ? request.maxMemoryPerSandbox * 1024 : null, organization_max_disk_per_sandbox_gb: request.maxDiskPerSandbox, organization_snapshot_quota: request.snapshotQuota, organization_max_snapshot_size_mb: request.maxSnapshotSize ? request.maxSnapshotSize * 1024 : null, organization_volume_quota: request.volumeQuota, }) } private captureUpdateOrganizationRegionQuota( props: CommonCaptureProps, organizationId: string, regionId: string, request: UpdateOrganizationRegionQuotaDto, ) { this.capture('api_organization_region_quota_updated', props, 'api_organization_region_quota_update_failed', { organization_id: organizationId, organization_region_id: regionId, organization_region_total_cpu_quota: request.totalCpuQuota, organization_region_total_memory_quota_mb: request.totalMemoryQuota ? request.totalMemoryQuota * 1024 : null, organization_region_total_disk_quota_gb: request.totalDiskQuota, }) } private captureDeleteOrganization(props: CommonCaptureProps, organizationId: string) { this.capture('api_organization_deleted', props, 'api_organization_deletion_failed', { organization_id: organizationId, }) } private captureUpdateOrganizationUserAccess( props: CommonCaptureProps, organizationId: string, userId: string, request: UpdateOrganizationMemberAccessDto, ) { this.capture('api_organization_user_access_updated', props, 'api_organization_user_access_update_failed', { organization_id: organizationId, organization_user_id: userId, organization_user_role: request.role, organization_user_assigned_role_ids: request.assignedRoleIds, }) } private captureDeleteOrganizationUser(props: CommonCaptureProps, organizationId: string, userId: string) { this.capture('api_organization_user_deleted', props, 'api_organization_user_deletion_failed', { organization_id: organizationId, organization_user_id: userId, }) } private captureCreateOrganizationRole( props: CommonCaptureProps, organizationId: string, request: CreateOrganizationRoleDto, ) { this.capture('api_organization_role_created', props, 'api_organization_role_creation_failed', { organization_id: organizationId, organization_role_name: request.name, organization_role_description: request.description, organization_role_permissions: request.permissions, }) } private captureDeleteOrganizationRole(props: CommonCaptureProps, organizationId: string, roleId: string) { this.capture('api_organization_role_deleted', props, 'api_organization_role_deletion_failed', { organization_id: organizationId, organization_role_id: roleId, }) } private captureUpdateOrganizationRole( props: CommonCaptureProps, organizationId: string, roleId: string, request: UpdateOrganizationRoleDto, ) { this.capture('api_organization_role_updated', props, 'api_organization_role_update_failed', { organization_id: organizationId, organization_role_id: roleId, organization_role_name: request.name, organization_role_description: request.description, organization_role_permissions: request.permissions, }) } private captureCreateOrganizationInvitation( props: CommonCaptureProps, organizationId: string, request: CreateOrganizationInvitationDto, ) { this.capture('api_organization_invitation_created', props, 'api_organization_invitation_creation_failed', { organization_id: organizationId, organization_invitation_email: request.email, organization_invitation_role: request.role, organization_invitation_assigned_role_ids: request.assignedRoleIds, organization_invitation_expires_at: request.expiresAt, }) } private captureUpdateOrganizationInvitation( props: CommonCaptureProps, organizationId: string, invitationId: string, request: UpdateOrganizationInvitationDto, ) { this.capture('api_organization_invitation_updated', props, 'api_organization_invitation_update_failed', { organization_id: organizationId, organization_invitation_id: invitationId, organization_invitation_expires_at: request.expiresAt, organization_invitation_role: request.role, organization_invitation_assigned_role_ids: request.assignedRoleIds, }) } private captureCancelOrganizationInvitation(props: CommonCaptureProps, organizationId: string, invitationId: string) { this.capture('api_organization_invitation_canceled', props, 'api_organization_invitation_cancel_failed', { organization_id: organizationId, organization_invitation_id: invitationId, }) } private captureCreateVolume(props: CommonCaptureProps, request: CreateVolumeDto, response: VolumeDto) { this.capture('api_volume_created', props, 'api_volume_creation_failed', { volume_id: response.id, volume_name_request_set: !!request.name, }) } private captureDeleteVolume(props: CommonCaptureProps, volumeId: string) { this.capture('api_volume_deleted', props, 'api_volume_deletion_failed', { volume_id: volumeId, }) } private captureUpdateOrganizationExperimentalConfig( props: CommonCaptureProps, experimentalConfig: Record | null, ) { this.capture( 'api_organization_experimental_config_updated', props, 'api_organization_experimental_config_update_failed', { experimental_config_empty: !experimentalConfig, experimental_config_otel_set: !!experimentalConfig?.otel, }, ) } private captureToolboxCommand( props: CommonCaptureProps, sandboxId: string, command: string, extraProps?: Record, ) { this.capture('api_toolbox_command', props, 'api_toolbox_command_failed', { sandbox_id: sandboxId, toolbox_command: command, ...extraProps, }) } private capture(event: string, props: CommonCaptureProps, errorEvent?: string, extraProps?: Record) { if (!this.posthog) { return } this.posthog.capture({ distinctId: props.distinctId, event: props.error ? errorEvent || event : event, groups: this.captureCommonGroups(props), properties: { ...this.captureCommonProperties(props), ...extraProps }, }) } private captureCommonProperties(props: CommonCaptureProps) { return { duration_ms: props.durationMs, status_code: props.statusCode, user_agent: props.userAgent, error: props.error, source: props.source, is_deprecated: props.isDeprecated, sdk_version: props.sdkVersion, environment: props.environment, daytona_version: this.version, } } private captureCommonGroups(props: CommonCaptureProps) { return props.organizationId ? { organization: props.organizationId } : undefined } onApplicationShutdown(/*signal?: string*/) { if (this.posthog) { this.posthog.shutdown() } } } ================================================ FILE: apps/api/src/main.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { otelSdk } from './tracing' import { readdirSync, readFileSync, statSync, writeFileSync } from 'node:fs' import { NestFactory } from '@nestjs/core' import { NestExpressApplication } from '@nestjs/platform-express' import { AppModule } from './app.module' import { SwaggerModule } from '@nestjs/swagger' import { INestApplication, Logger, ValidationPipe } from '@nestjs/common' import { AllExceptionsFilter } from './filters/all-exceptions.filter' import { MetricsInterceptor } from './interceptors/metrics.interceptor' import { HttpsOptions } from '@nestjs/common/interfaces/external/https-options.interface' import { TypedConfigService } from './config/typed-config.service' import { FailedAuthTrackerService } from './auth/failed-auth-tracker.service' import { DataSource, MigrationExecutor } from 'typeorm' import { getOpenApiConfig } from './openapi.config' import { AuditInterceptor } from './audit/interceptors/audit.interceptor' import { join } from 'node:path' import { ApiKeyService } from './api-key/api-key.service' import { DAYTONA_ADMIN_USER_ID } from './app.service' import { OrganizationService } from './organization/services/organization.service' import { MicroserviceOptions, Transport } from '@nestjs/microservices' import { Partitioners } from 'kafkajs' import { isApiEnabled, isWorkerEnabled } from './common/utils/app-mode' import cluster from 'node:cluster' import { Logger as PinoLogger, LoggerErrorInterceptor } from 'nestjs-pino' // https options const httpsEnabled = process.env.CERT_PATH && process.env.CERT_KEY_PATH const httpsOptions: HttpsOptions = { cert: process.env.CERT_PATH ? readFileSync(process.env.CERT_PATH) : undefined, key: process.env.CERT_KEY_PATH ? readFileSync(process.env.CERT_KEY_PATH) : undefined, } async function bootstrap() { if (process.env.OTEL_ENABLED === 'true') { await otelSdk.start() } const app = await NestFactory.create(AppModule, { bufferLogs: true, httpsOptions: httpsEnabled ? httpsOptions : undefined, }) app.useLogger(app.get(PinoLogger)) app.flushLogs() app.enableCors({ origin: true, methods: 'GET,HEAD,PUT,PATCH,POST,DELETE,OPTIONS', credentials: true, }) const configService = app.get(TypedConfigService) const failedAuthTracker = app.get(FailedAuthTrackerService) app.set('trust proxy', true) app.useGlobalFilters(new AllExceptionsFilter(failedAuthTracker)) app.useGlobalInterceptors(new LoggerErrorInterceptor()) app.useGlobalInterceptors(new MetricsInterceptor(configService)) app.useGlobalInterceptors(app.get(AuditInterceptor)) app.useGlobalPipes( new ValidationPipe({ transform: true, }), ) // Runtime flags for migrations for run and revert migrations if (process.argv.length > 2) { if (process.argv[2].startsWith('--migration-')) { const dataSource = app.get(DataSource) dataSource.setOptions({ logging: true }) const migrationExecutor = new MigrationExecutor(dataSource) switch (process.argv[2]) { case '--migration-run': await migrationExecutor.executePendingMigrations() break case '--migration-revert': await migrationExecutor.undoLastMigration() break default: Logger.error('Invalid migration flag') process.exit(1) } } else if (process.argv[2] === '--create-admin-api-key') { if (process.argv.length < 4) { Logger.error('Invalid flag. API key name is required.') process.exit(1) } await createAdminApiKey(app, process.argv[3]) } else { Logger.error('Invalid flag') process.exit(1) } process.exit(0) } const globalPrefix = 'api' app.setGlobalPrefix(globalPrefix) const documentFactory = () => SwaggerModule.createDocument(app, getOpenApiConfig(configService.get('oidc.issuer'))) SwaggerModule.setup('api', app, documentFactory, { swaggerOptions: { initOAuth: { clientId: configService.get('oidc.clientId'), appName: 'Daytona AI', scopes: ['openid', 'profile', 'email'], additionalQueryStringParams: { audience: configService.get('oidc.audience'), }, }, }, }) // Replace dashboard api url before serving if (configService.get('production')) { const dashboardDir = join(__dirname, '..', 'dashboard') const replaceInDirectory = (dir: string) => { for (const file of readdirSync(dir)) { const filePath = join(dir, file) if (statSync(filePath).isDirectory()) { if (file === 'assets') { replaceInDirectory(filePath) } continue } Logger.log(`Replacing %DAYTONA_BASE_API_URL% in ${filePath}`) const fileContent = readFileSync(filePath, 'utf8') const newFileContent = fileContent.replaceAll( '%DAYTONA_BASE_API_URL%', configService.get('dashboardBaseApiUrl'), ) writeFileSync(filePath, newFileContent) } } replaceInDirectory(dashboardDir) } // Starts listening for shutdown hooks app.enableShutdownHooks() const host = '0.0.0.0' const port = configService.get('port') if (isApiEnabled()) { await app.listen(port, host) Logger.log(`🚀 Daytona API is running on: http://${host}:${port}/${globalPrefix}`) } else { await app.init() app.flushLogs() } if (isWorkerEnabled() && configService.get('kafka.enabled')) { app.connectMicroservice({ transport: Transport.KAFKA, options: { client: configService.getKafkaClientConfig(), producer: { allowAutoTopicCreation: true, createPartitioner: Partitioners.DefaultPartitioner, idempotent: true, }, consumer: { allowAutoTopicCreation: true, groupId: 'daytona', }, run: { autoCommit: false, }, subscribe: { fromBeginning: true, }, }, }) await app.startAllMicroservices() } // If app running in cluster mode, send ready signal if (cluster.isWorker) { process.send('ready') } } async function createAdminApiKey(app: INestApplication, apiKeyName: string) { const apiKeyService = app.get(ApiKeyService) const organizationService = app.get(OrganizationService) const personalOrg = await organizationService.findPersonal(DAYTONA_ADMIN_USER_ID) const { value } = await apiKeyService.createApiKey(personalOrg.id, DAYTONA_ADMIN_USER_ID, apiKeyName, []) Logger.log( ` ========================================= ========================================= Admin API key created: ${value} ========================================= =========================================`, ) } bootstrap() ================================================ FILE: apps/api/src/migrations/1741087887225-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1741087887225 implements MigrationInterface { name = 'Migration1741087887225' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`CREATE SCHEMA IF NOT EXISTS "public"`) await queryRunner.query( `CREATE TABLE "user" ("id" character varying NOT NULL, "name" character varying NOT NULL, "keyPair" text, "publicKeys" text NOT NULL, "total_cpu_quota" integer NOT NULL DEFAULT '10', "total_memory_quota" integer NOT NULL DEFAULT '40', "total_disk_quota" integer NOT NULL DEFAULT '100', "max_cpu_per_workspace" integer NOT NULL DEFAULT '2', "max_memory_per_workspace" integer NOT NULL DEFAULT '4', "max_disk_per_workspace" integer NOT NULL DEFAULT '10', "max_concurrent_workspaces" integer NOT NULL DEFAULT '10', "workspace_quota" integer NOT NULL DEFAULT '0', "image_quota" integer NOT NULL DEFAULT '5', "max_image_size" integer NOT NULL DEFAULT '2', "total_image_size" integer NOT NULL DEFAULT '5', CONSTRAINT "PK_cace4a159ff9f2512dd42373760" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE TABLE "team" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "name" character varying NOT NULL, CONSTRAINT "PK_f57d8293406df4af348402e4b74" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE TABLE "workspace_usage_periods" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "workspaceId" character varying NOT NULL, "startAt" TIMESTAMP NOT NULL, "endAt" TIMESTAMP, "cpu" double precision NOT NULL, "gpu" double precision NOT NULL, "mem" double precision NOT NULL, "disk" double precision NOT NULL, "storage" double precision NOT NULL, "region" character varying NOT NULL, CONSTRAINT "PK_b8d71f79ee638064397f678e877" PRIMARY KEY ("id"))`, ) await queryRunner.query(`CREATE TYPE "node_class_enum" AS ENUM('small', 'medium', 'large')`) await queryRunner.query(`CREATE TYPE "node_region_enum" AS ENUM('eu', 'us', 'asia')`) await queryRunner.query( `CREATE TYPE "node_state_enum" AS ENUM('initializing', 'ready', 'disabled', 'decommissioned', 'unresponsive')`, ) await queryRunner.query( `CREATE TABLE "node" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "domain" character varying NOT NULL, "apiUrl" character varying NOT NULL, "apiKey" character varying NOT NULL, "cpu" integer NOT NULL, "memory" integer NOT NULL, "disk" integer NOT NULL, "gpu" integer NOT NULL, "gpuType" character varying NOT NULL, "class" "node_class_enum" NOT NULL DEFAULT 'small', "used" integer NOT NULL DEFAULT '0', "capacity" integer NOT NULL, "region" "node_region_enum" NOT NULL, "state" "node_state_enum" NOT NULL DEFAULT 'initializing', "lastChecked" TIMESTAMP, "unschedulable" boolean NOT NULL DEFAULT false, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "UQ_330d74ac3d0e349b4c73c62ad6d" UNIQUE ("domain"), CONSTRAINT "PK_8c8caf5f29d25264abe9eaf94dd" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE TYPE "image_node_state_enum" AS ENUM('pulling_image', 'ready', 'error', 'removing')`, ) await queryRunner.query( `CREATE TABLE "image_node" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "state" "image_node_state_enum" NOT NULL DEFAULT 'pulling_image', "errorReason" character varying, "image" character varying NOT NULL, "internalImageName" character varying NOT NULL DEFAULT '', "nodeId" character varying NOT NULL, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "PK_6c66fc8bd2b9fb41362a50fddd0" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE TYPE "image_state_enum" AS ENUM('pending', 'pulling_image', 'pending_validation', 'validating', 'active', 'error', 'removing')`, ) await queryRunner.query( `CREATE TABLE "image" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "userId" character varying NOT NULL, "general" boolean NOT NULL DEFAULT false, "name" character varying NOT NULL, "internalName" character varying, "enabled" boolean NOT NULL DEFAULT true, "state" "image_state_enum" NOT NULL DEFAULT 'pending', "errorReason" character varying, "size" double precision, "entrypoint" character varying, "internalRegistryId" character varying, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), "lastUsedAt" TIMESTAMP, CONSTRAINT "UQ_9db6fbe71409d80375c32826db3" UNIQUE ("userId", "name"), CONSTRAINT "PK_d6db1ab4ee9ad9dbe86c64e4cc3" PRIMARY KEY ("id"))`, ) await queryRunner.query(`CREATE TYPE "workspace_region_enum" AS ENUM('eu', 'us', 'asia')`) await queryRunner.query(`CREATE TYPE "workspace_class_enum" AS ENUM('small', 'medium', 'large')`) await queryRunner.query( `CREATE TYPE "workspace_state_enum" AS ENUM('creating', 'restoring', 'destroyed', 'destroying', 'started', 'stopped', 'starting', 'stopping', 'resizing', 'error', 'unknown', 'pulling_image', 'archiving', 'archived')`, ) await queryRunner.query( `CREATE TYPE "workspace_desiredstate_enum" AS ENUM('destroyed', 'started', 'stopped', 'resized', 'archived')`, ) await queryRunner.query( `CREATE TYPE "workspace_snapshotstate_enum" AS ENUM('None', 'Pending', 'InProgress', 'Completed', 'Error')`, ) await queryRunner.query( `CREATE TABLE "workspace" ("id" character varying NOT NULL, "name" character varying NOT NULL, "userId" character varying NOT NULL, "region" "workspace_region_enum" NOT NULL DEFAULT 'eu', "nodeId" uuid, "prevNodeId" uuid, "class" "workspace_class_enum" NOT NULL DEFAULT 'small', "state" "workspace_state_enum" NOT NULL DEFAULT 'unknown', "desiredState" "workspace_desiredstate_enum" NOT NULL DEFAULT 'started', "image" character varying NOT NULL, "osUser" character varying NOT NULL, "errorReason" character varying, "env" text NOT NULL DEFAULT '{}', "public" boolean NOT NULL DEFAULT false, "labels" jsonb, "snapshotRegistryId" character varying, "snapshotImage" character varying, "lastSnapshotAt" TIMESTAMP, "snapshotState" "workspace_snapshotstate_enum" NOT NULL DEFAULT 'None', "existingSnapshotImages" jsonb NOT NULL DEFAULT '[]', "cpu" integer NOT NULL DEFAULT '2', "gpu" integer NOT NULL DEFAULT '0', "mem" integer NOT NULL DEFAULT '4', "disk" integer NOT NULL DEFAULT '10', "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), "lastActivityAt" TIMESTAMP, "autoStopInterval" integer NOT NULL DEFAULT '15', CONSTRAINT "PK_ca86b6f9b3be5fe26d307d09b49" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE TABLE "docker_registry" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "name" character varying NOT NULL, "url" character varying NOT NULL, "username" character varying NOT NULL, "password" character varying NOT NULL, "isDefault" boolean NOT NULL DEFAULT false, "project" character varying NOT NULL, "userId" character varying, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "PK_4ad72294240279415eb57799798" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE TABLE "api_key" ("userId" character varying NOT NULL, "name" character varying NOT NULL, "value" character varying NOT NULL, "createdAt" TIMESTAMP NOT NULL, CONSTRAINT "UQ_4b0873b633484d5de20b2d8f852" UNIQUE ("value"), CONSTRAINT "PK_1df0337a701df00e9b2a16c8a0b" PRIMARY KEY ("userId", "name"))`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP TABLE "api_key"`) await queryRunner.query(`DROP TABLE "docker_registry"`) await queryRunner.query(`DROP TABLE "workspace"`) await queryRunner.query(`DROP TYPE "workspace_snapshotstate_enum"`) await queryRunner.query(`DROP TYPE "workspace_desiredstate_enum"`) await queryRunner.query(`DROP TYPE "workspace_state_enum"`) await queryRunner.query(`DROP TYPE "workspace_class_enum"`) await queryRunner.query(`DROP TYPE "workspace_region_enum"`) await queryRunner.query(`DROP TABLE "image"`) await queryRunner.query(`DROP TYPE "image_state_enum"`) await queryRunner.query(`DROP TABLE "image_node"`) await queryRunner.query(`DROP TYPE "image_node_state_enum"`) await queryRunner.query(`DROP TABLE "node"`) await queryRunner.query(`DROP TYPE "node_state_enum"`) await queryRunner.query(`DROP TYPE "node_region_enum"`) await queryRunner.query(`DROP TYPE "node_class_enum"`) await queryRunner.query(`DROP TABLE "workspace_usage_periods"`) await queryRunner.query(`DROP TABLE "team"`) await queryRunner.query(`DROP TABLE "user"`) } } ================================================ FILE: apps/api/src/migrations/1741088165704-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1741088165704 implements MigrationInterface { name = 'Migration1741088165704' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "internalRegistryId"`) await queryRunner.query( `CREATE TYPE "public"."docker_registry_registrytype_enum" AS ENUM('internal', 'user', 'public', 'transient')`, ) await queryRunner.query( `ALTER TABLE "docker_registry" ADD "registryType" "public"."docker_registry_registrytype_enum" NOT NULL DEFAULT 'internal'`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "docker_registry" DROP COLUMN "registryType"`) await queryRunner.query(`DROP TYPE "public"."docker_registry_registrytype_enum"`) await queryRunner.query(`ALTER TABLE "image" ADD "internalRegistryId" character varying`) } } ================================================ FILE: apps/api/src/migrations/1741088883000-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1741088883000 implements MigrationInterface { name = 'Migration1741088883000' public async up(queryRunner: QueryRunner): Promise { // organizations await queryRunner.query( `CREATE TABLE "organization" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "name" character varying NOT NULL, "createdBy" character varying NOT NULL, "personal" boolean NOT NULL DEFAULT false, "telemetryEnabled" boolean NOT NULL DEFAULT true, "total_cpu_quota" integer NOT NULL DEFAULT '10', "total_memory_quota" integer NOT NULL DEFAULT '40', "total_disk_quota" integer NOT NULL DEFAULT '100', "max_cpu_per_workspace" integer NOT NULL DEFAULT '2', "max_memory_per_workspace" integer NOT NULL DEFAULT '4', "max_disk_per_workspace" integer NOT NULL DEFAULT '10', "max_concurrent_workspaces" integer NOT NULL DEFAULT '10', "workspace_quota" integer NOT NULL DEFAULT '0', "image_quota" integer NOT NULL DEFAULT '5', "max_image_size" integer NOT NULL DEFAULT '2', "total_image_size" integer NOT NULL DEFAULT '5', "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "organization_id_pk" PRIMARY KEY ("id"))`, ) // organization users await queryRunner.query(`CREATE TYPE "public"."organization_user_role_enum" AS ENUM('owner', 'member')`) await queryRunner.query( `CREATE TABLE "organization_user" ("organizationId" uuid NOT NULL, "userId" character varying NOT NULL, "role" "public"."organization_user_role_enum" NOT NULL DEFAULT 'member', "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "organization_user_organizationId_userId_pk" PRIMARY KEY ("organizationId", "userId"))`, ) await queryRunner.query( `ALTER TABLE "organization_user" ADD CONSTRAINT "organization_user_organizationId_fk" FOREIGN KEY ("organizationId") REFERENCES "organization"("id") ON DELETE CASCADE ON UPDATE NO ACTION`, ) // organization invitations await queryRunner.query(`CREATE TYPE "public"."organization_invitation_role_enum" AS ENUM('owner', 'member')`) await queryRunner.query( `CREATE TYPE "public"."organization_invitation_status_enum" AS ENUM('pending', 'accepted', 'declined', 'cancelled')`, ) await queryRunner.query( `CREATE TABLE "organization_invitation" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "organizationId" uuid NOT NULL, "email" character varying NOT NULL, "role" "public"."organization_invitation_role_enum" NOT NULL DEFAULT 'member', "expiresAt" TIMESTAMP NOT NULL, "status" "public"."organization_invitation_status_enum" NOT NULL DEFAULT 'pending', "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "organization_invitation_id_pk" PRIMARY KEY ("id"))`, ) await queryRunner.query( `ALTER TABLE "organization_invitation" ADD CONSTRAINT "organization_invitation_organizationId_fk" FOREIGN KEY ("organizationId") REFERENCES "organization"("id") ON DELETE CASCADE ON UPDATE NO ACTION`, ) // organization roles await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes')`, ) await queryRunner.query( `CREATE TABLE "organization_role" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "name" character varying NOT NULL, "description" character varying NOT NULL, "permissions" "public"."organization_role_permissions_enum" array NOT NULL, "isGlobal" boolean NOT NULL DEFAULT false, "organizationId" uuid, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "organization_role_id_pk" PRIMARY KEY ("id"))`, ) await queryRunner.query( `ALTER TABLE "organization_role" ADD CONSTRAINT "organization_role_organizationId_fk" FOREIGN KEY ("organizationId") REFERENCES "organization"("id") ON DELETE CASCADE ON UPDATE NO ACTION`, ) // organization role assignments for members await queryRunner.query( `CREATE TABLE "organization_role_assignment" ("organizationId" uuid NOT NULL, "userId" character varying NOT NULL, "roleId" uuid NOT NULL, CONSTRAINT "organization_role_assignment_organizationId_userId_roleId_pk" PRIMARY KEY ("organizationId", "userId", "roleId"))`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment" ADD CONSTRAINT "organization_role_assignment_organizationId_userId_fk" FOREIGN KEY ("organizationId", "userId") REFERENCES "organization_user"("organizationId","userId") ON DELETE CASCADE ON UPDATE CASCADE`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment" ADD CONSTRAINT "organization_role_assignment_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) await queryRunner.query( `CREATE INDEX "organization_role_assignment_organizationId_userId_index" ON "organization_role_assignment" ("organizationId", "userId") `, ) await queryRunner.query( `CREATE INDEX "organization_role_assignment_roleId_index" ON "organization_role_assignment" ("roleId") `, ) // organization role assignments for invitations await queryRunner.query( `CREATE TABLE "organization_role_assignment_invitation" ("invitationId" uuid NOT NULL, "roleId" uuid NOT NULL, CONSTRAINT "organization_role_assignment_invitation_invitationId_roleId_pk" PRIMARY KEY ("invitationId", "roleId"))`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" ADD CONSTRAINT "organization_role_assignment_invitation_invitationId_fk" FOREIGN KEY ("invitationId") REFERENCES "organization_invitation"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" ADD CONSTRAINT "organization_role_assignment_invitation_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) await queryRunner.query( `CREATE INDEX "organization_role_assignment_invitation_invitationId_index" ON "organization_role_assignment_invitation" ("invitationId") `, ) await queryRunner.query( `CREATE INDEX "organization_role_assignment_invitation_roleId_index" ON "organization_role_assignment_invitation" ("roleId") `, ) // create personal organizations await queryRunner.query(` INSERT INTO "organization" ( name, personal, "createdBy", total_cpu_quota, total_memory_quota, total_disk_quota, max_cpu_per_workspace, max_memory_per_workspace, max_disk_per_workspace, max_concurrent_workspaces, workspace_quota, image_quota, max_image_size, total_image_size ) SELECT 'Personal', true, u.id, u.total_cpu_quota, u.total_memory_quota, u.total_disk_quota, u.max_cpu_per_workspace, u.max_memory_per_workspace, u.max_disk_per_workspace, u.max_concurrent_workspaces, u.workspace_quota, u.image_quota, u.max_image_size, u.total_image_size FROM "user" u `) await queryRunner.query(` INSERT INTO "organization_user" ("organizationId", "userId", role) SELECT o.id, o."createdBy", 'owner' FROM "organization" o WHERE o.personal = true `) // drop user quotas await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "total_cpu_quota"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "total_memory_quota"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "total_disk_quota"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "max_cpu_per_workspace"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "max_memory_per_workspace"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "max_disk_per_workspace"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "max_concurrent_workspaces"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "workspace_quota"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "image_quota"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "max_image_size"`) await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "total_image_size"`) // move existing api keys to corresponding personal organizations await queryRunner.query(`ALTER TABLE "api_key" ADD "organizationId" uuid NULL`) await queryRunner.query(` UPDATE "api_key" ak SET "organizationId" = ( SELECT o.id FROM "organization" o WHERE o."createdBy" = ak."userId" AND o.personal = true LIMIT 1 ) `) await queryRunner.query(`ALTER TABLE "api_key" ALTER COLUMN "organizationId" SET NOT NULL`) // update api key primary key await queryRunner.query(` DO $$ DECLARE constraint_name text; BEGIN SELECT tc.constraint_name INTO constraint_name FROM information_schema.table_constraints tc WHERE tc.table_name = 'api_key' AND tc.constraint_type = 'PRIMARY KEY'; IF constraint_name IS NOT NULL THEN EXECUTE format('ALTER TABLE "api_key" DROP CONSTRAINT "%s"', constraint_name); END IF; END $$; `) await queryRunner.query( `ALTER TABLE "api_key" ADD CONSTRAINT "api_key_userId_name_organizationId_pk" PRIMARY KEY ("userId", "name", "organizationId")`, ) // api key permissions await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes')`, ) await queryRunner.query(`ALTER TABLE "api_key" ADD "permissions" "public"."api_key_permissions_enum" array NULL`) await queryRunner.query(` UPDATE api_key SET permissions = ARRAY[ 'write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes' ]::api_key_permissions_enum[] `) await queryRunner.query(`ALTER TABLE "api_key" ALTER COLUMN "permissions" SET NOT NULL`) // modify docker registry type enum await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" DROP DEFAULT`) await queryRunner.query( `ALTER TYPE "public"."docker_registry_registrytype_enum" RENAME TO "docker_registry_registrytype_enum_old"`, ) await queryRunner.query( `CREATE TYPE "public"."docker_registry_registrytype_enum" AS ENUM('internal', 'organization', 'public', 'transient')`, ) await queryRunner.query(` CREATE OR REPLACE FUNCTION migrate_registry_type(old_type text) RETURNS "public"."docker_registry_registrytype_enum" AS $$ BEGIN IF old_type = 'user' THEN RETURN 'organization'::"public"."docker_registry_registrytype_enum"; ELSE RETURN old_type::"public"."docker_registry_registrytype_enum"; END IF; END; $$ LANGUAGE plpgsql; `) await queryRunner.query(` ALTER TABLE "docker_registry" ALTER COLUMN "registryType" TYPE "public"."docker_registry_registrytype_enum" USING migrate_registry_type("registryType"::text) `) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" SET DEFAULT 'internal'`) await queryRunner.query(`DROP TYPE "public"."docker_registry_registrytype_enum_old"`) await queryRunner.query(`DROP FUNCTION migrate_registry_type`) // move existing docker registries to corresponding personal organizations await queryRunner.query(`ALTER TABLE "docker_registry" ADD "organizationId" uuid NULL`) await queryRunner.query(`UPDATE "docker_registry" SET "organizationId" = NULL WHERE "userId" = 'system'`) await queryRunner.query(` UPDATE "docker_registry" dr SET "organizationId" = ( SELECT o.id FROM "organization" o WHERE o."createdBy" = dr."userId" AND o.personal = true LIMIT 1 ) `) await queryRunner.query(`ALTER TABLE "docker_registry" DROP COLUMN "userId"`) // move existing images to corresponding personal organizations await queryRunner.query(`ALTER TABLE "image" ADD "organizationId" uuid NULL`) await queryRunner.query(` UPDATE "image" i SET "organizationId" = ( SELECT o.id FROM "organization" o WHERE o."createdBy" = i."userId" AND o.personal = true LIMIT 1 ) `) // update image unique constraint await queryRunner.query(` DO $$ DECLARE constraint_name text; BEGIN SELECT tc.constraint_name INTO constraint_name FROM information_schema.table_constraints tc WHERE tc.table_name = 'image' AND tc.constraint_type = 'UNIQUE' AND tc.constraint_name LIKE '%name%'; IF constraint_name IS NOT NULL THEN EXECUTE format('ALTER TABLE "image" DROP CONSTRAINT "%s"', constraint_name); END IF; END $$; `) await queryRunner.query( `ALTER TABLE "image" ADD CONSTRAINT "image_organizationId_name_unique" UNIQUE ("organizationId", "name")`, ) await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "userId"`) // move existing workspaces to corresponding personal organizations await queryRunner.query(`ALTER TABLE "workspace" ADD "organizationId" uuid NULL`) await queryRunner.query(` UPDATE "workspace" w SET "organizationId" = ( SELECT o.id FROM "organization" o WHERE o."createdBy" = w."userId" AND o.personal = true LIMIT 1 ) WHERE w."userId" != 'unassigned' `) await queryRunner.query(` UPDATE "workspace" w SET "organizationId" = '00000000-0000-0000-0000-000000000000' WHERE w."userId" = 'unassigned' `) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "organizationId" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "userId"`) } public async down(queryRunner: QueryRunner): Promise { // workspaces await queryRunner.query(`ALTER TABLE "workspace" ADD "userId" character varying NULL`) await queryRunner.query(` UPDATE "workspace" w SET "userId" = 'unassigned' WHERE w."organizationId" = '00000000-0000-0000-0000-000000000000' `) await queryRunner.query(` UPDATE "workspace" w SET "userId" = ( SELECT o."createdBy" FROM "organization" o WHERE o.id = w."organizationId" ) WHERE w."organizationId" != '00000000-0000-0000-0000-000000000000' `) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "userId" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "organizationId"`) // images await queryRunner.query(`ALTER TABLE "image" ADD "userId" character varying NULL`) await queryRunner.query(` UPDATE "image" i SET "userId" = ( SELECT o."createdBy" FROM "organization" o WHERE o.id = i."organizationId" ) `) await queryRunner.query(`ALTER TABLE "image" ALTER COLUMN "userId" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "image" DROP CONSTRAINT "image_organizationId_name_unique"`) await queryRunner.query(`ALTER TABLE "image" ADD CONSTRAINT "image_userId_name_unique" UNIQUE ("userId", "name")`) await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "organizationId"`) // docker registries await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" DROP DEFAULT`) await queryRunner.query( `ALTER TYPE "public"."docker_registry_registrytype_enum" RENAME TO "docker_registry_registrytype_enum_old"`, ) await queryRunner.query( `CREATE TYPE "public"."docker_registry_registrytype_enum" AS ENUM('internal', 'user', 'public', 'transient')`, ) await queryRunner.query(` CREATE OR REPLACE FUNCTION rollback_registry_type(new_type text) RETURNS "public"."docker_registry_registrytype_enum" AS $$ BEGIN IF new_type = 'organization' THEN RETURN 'user'::"public"."docker_registry_registrytype_enum"; ELSE RETURN new_type::"public"."docker_registry_registrytype_enum"; END IF; END; $$ LANGUAGE plpgsql; `) await queryRunner.query(` ALTER TABLE "docker_registry" ALTER COLUMN "registryType" TYPE "public"."docker_registry_registrytype_enum" USING rollback_registry_type("registryType"::text) `) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" SET DEFAULT 'internal'`) await queryRunner.query(`DROP TYPE "public"."docker_registry_registrytype_enum_old"`) await queryRunner.query(`DROP FUNCTION rollback_registry_type`) await queryRunner.query(`ALTER TABLE "docker_registry" ADD "userId" character varying NULL`) await queryRunner.query(` UPDATE "docker_registry" dr SET "userId" = ( SELECT o."createdBy" FROM "organization" o WHERE o.id = dr."organizationId" ) `) await queryRunner.query(`ALTER TABLE "docker_registry" DROP COLUMN "organizationId"`) // api keys await queryRunner.query(`ALTER TABLE "api_key" DROP CONSTRAINT "api_key_userId_name_organizationId_pk"`) await queryRunner.query( `ALTER TABLE "api_key" ADD CONSTRAINT "api_key_userId_name_pk" PRIMARY KEY ("userId", "name")`, ) await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "organizationId"`) await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "permissions"`) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum"`) // user quotas await queryRunner.query(`ALTER TABLE "user" ADD "total_cpu_quota" integer NOT NULL DEFAULT '10'`) await queryRunner.query(`ALTER TABLE "user" ADD "total_memory_quota" integer NOT NULL DEFAULT '40'`) await queryRunner.query(`ALTER TABLE "user" ADD "total_disk_quota" integer NOT NULL DEFAULT '100'`) await queryRunner.query(`ALTER TABLE "user" ADD "max_cpu_per_workspace" integer NOT NULL DEFAULT '2'`) await queryRunner.query(`ALTER TABLE "user" ADD "max_memory_per_workspace" integer NOT NULL DEFAULT '4'`) await queryRunner.query(`ALTER TABLE "user" ADD "max_disk_per_workspace" integer NOT NULL DEFAULT '10'`) await queryRunner.query(`ALTER TABLE "user" ADD "max_concurrent_workspaces" integer NOT NULL DEFAULT '10'`) await queryRunner.query(`ALTER TABLE "user" ADD "workspace_quota" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "user" ADD "image_quota" integer NOT NULL DEFAULT '5'`) await queryRunner.query(`ALTER TABLE "user" ADD "max_image_size" integer NOT NULL DEFAULT '2'`) await queryRunner.query(`ALTER TABLE "user" ADD "total_image_size" integer NOT NULL DEFAULT '5'`) await queryRunner.query(` UPDATE "user" u SET total_cpu_quota = ( SELECT o.total_cpu_quota FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), total_memory_quota = ( SELECT o.total_memory_quota FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), total_disk_quota = ( SELECT o.total_disk_quota FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), max_cpu_per_workspace = ( SELECT o.max_cpu_per_workspace FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), max_memory_per_workspace = ( SELECT o.max_memory_per_workspace FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), max_disk_per_workspace = ( SELECT o.max_disk_per_workspace FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), max_concurrent_workspaces = ( SELECT o.max_concurrent_workspaces FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), workspace_quota = ( SELECT o.workspace_quota FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), image_quota = ( SELECT o.image_quota FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), max_image_size = ( SELECT o.max_image_size FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ), total_image_size = ( SELECT o.total_image_size FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true LIMIT 1 ) `) // drop organization tables and related constraints await queryRunner.query(`DROP INDEX "organization_role_assignment_invitation_roleId_index"`) await queryRunner.query(`DROP INDEX "organization_role_assignment_invitation_invitationId_index"`) await queryRunner.query(`DROP TABLE "organization_role_assignment_invitation"`) await queryRunner.query(`DROP INDEX "organization_role_assignment_roleId_index"`) await queryRunner.query(`DROP INDEX "organization_role_assignment_organizationId_userId_index"`) await queryRunner.query(`DROP TABLE "organization_role_assignment"`) await queryRunner.query(`DROP TABLE "organization_role"`) await queryRunner.query(`DROP TYPE "organization_role_permissions_enum"`) await queryRunner.query(`DROP TABLE "organization_invitation"`) await queryRunner.query(`DROP TYPE "organization_invitation_status_enum"`) await queryRunner.query(`DROP TYPE "organization_invitation_role_enum"`) await queryRunner.query(`DROP TABLE "organization_user"`) await queryRunner.query(`DROP TYPE "organization_user_role_enum"`) await queryRunner.query(`DROP TABLE "organization"`) } } ================================================ FILE: apps/api/src/migrations/1741088883001-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1741088883001 implements MigrationInterface { name = 'Migration1741088883001' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" ADD "email" character varying NOT NULL DEFAULT ''`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "email"`) } } ================================================ FILE: apps/api/src/migrations/1741088883002-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { GlobalOrganizationRolesIds } from '../organization/constants/global-organization-roles.constant' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' export class Migration1741088883002 implements MigrationInterface { name = 'Migration1741088883002' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.DEVELOPER}', 'Developer', 'Grants the ability to create sandboxes and keys in the organization', ARRAY[ '${OrganizationResourcePermission.WRITE_SANDBOXES}' ]::organization_role_permissions_enum[], TRUE ) `) await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.SANDBOXES_ADMIN}', 'Sandboxes Admin', 'Grants admin access to sandboxes in the organization', ARRAY[ '${OrganizationResourcePermission.WRITE_SANDBOXES}', '${OrganizationResourcePermission.DELETE_SANDBOXES}' ]::organization_role_permissions_enum[], TRUE ) `) await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.SNAPSHOTS_ADMIN}', 'Images Admin', 'Grants admin access to images in the organization', ARRAY[ 'write:images', 'delete:images' ]::organization_role_permissions_enum[], TRUE ) `) await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.REGISTRIES_ADMIN}', 'Registries Admin', 'Grants admin access to registries in the organization', ARRAY[ '${OrganizationResourcePermission.WRITE_REGISTRIES}', '${OrganizationResourcePermission.DELETE_REGISTRIES}' ]::organization_role_permissions_enum[], TRUE ) `) await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.SUPER_ADMIN}', 'Super Admin', 'Grants full access to all resources in the organization', ARRAY[ '${OrganizationResourcePermission.WRITE_REGISTRIES}', '${OrganizationResourcePermission.DELETE_REGISTRIES}', 'write:images', 'delete:images', '${OrganizationResourcePermission.WRITE_SANDBOXES}', '${OrganizationResourcePermission.DELETE_SANDBOXES}' ]::organization_role_permissions_enum[], TRUE ) `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DELETE FROM "organization_role" WHERE "isGlobal" = TRUE`) } } ================================================ FILE: apps/api/src/migrations/1741877019888-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1741877019888 implements MigrationInterface { name = 'Migration1741877019888' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`CREATE TYPE "public"."warm_pool_target_enum" AS ENUM('eu', 'us', 'asia')`) await queryRunner.query(`CREATE TYPE "public"."warm_pool_class_enum" AS ENUM('small', 'medium', 'large')`) await queryRunner.query( `CREATE TABLE "warm_pool" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "pool" integer NOT NULL, "image" character varying NOT NULL, "target" "public"."warm_pool_target_enum" NOT NULL DEFAULT 'eu', "cpu" integer NOT NULL, "mem" integer NOT NULL, "disk" integer NOT NULL, "gpu" integer NOT NULL, "gpuType" character varying NOT NULL, "class" "public"."warm_pool_class_enum" NOT NULL DEFAULT 'small', "osUser" character varying NOT NULL, "errorReason" character varying, "env" text NOT NULL DEFAULT '{}', "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "PK_fb06a13baeb3ac0ced145345d90" PRIMARY KEY ("id"))`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP TABLE "warm_pool"`) await queryRunner.query(`DROP TYPE "public"."warm_pool_class_enum"`) await queryRunner.query(`DROP TYPE "public"."warm_pool_target_enum"`) } } ================================================ FILE: apps/api/src/migrations/1742215525714-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1742215525714 implements MigrationInterface { name = 'Migration1742215525714' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "image_quota" SET DEFAULT '0'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "image_quota" SET DEFAULT '5'`) } } ================================================ FILE: apps/api/src/migrations/1742475055353-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1742475055353 implements MigrationInterface { name = 'Migration1742475055353' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`CREATE TYPE "public"."user_role_enum" AS ENUM('admin', 'user')`) await queryRunner.query(`ALTER TABLE "user" ADD "role" "public"."user_role_enum" NOT NULL DEFAULT 'user'`) await queryRunner.query(`UPDATE "user" SET "role" = 'admin' WHERE "id" = 'daytona-admin'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "role"`) await queryRunner.query(`DROP TYPE "public"."user_role_enum"`) } } ================================================ FILE: apps/api/src/migrations/1742831092942-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1742831092942 implements MigrationInterface { name = 'Migration1742831092942' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" ADD "pending" boolean NOT NULL DEFAULT false`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "pending"`) } } ================================================ FILE: apps/api/src/migrations/1743593463168-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1743593463168 implements MigrationInterface { name = 'Migration1743593463168' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "organization_invitation" ADD "invitedBy" character varying NOT NULL DEFAULT ''`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization_invitation" DROP COLUMN "invitedBy"`) } } ================================================ FILE: apps/api/src/migrations/1743683015304-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1743683015304 implements MigrationInterface { name = 'Migration1743683015304' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "name"`) await queryRunner.query(`ALTER TABLE "workspace" DROP CONSTRAINT "PK_ca86b6f9b3be5fe26d307d09b49"`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "id" SET DEFAULT uuid_generate_v4()`) await queryRunner.query(`ALTER TABLE "workspace" ADD CONSTRAINT "workspace_id_pk" PRIMARY KEY ("id")`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" DROP CONSTRAINT "workspace_id_pk"`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "id" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "workspace" ADD CONSTRAINT "PK_ca86b6f9b3be5fe26d307d09b49" PRIMARY KEY ("id")`, ) await queryRunner.query(`ALTER TABLE "workspace" ADD "name" character varying NOT NULL DEFAULT ''`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "name" DROP DEFAULT`) } } ================================================ FILE: apps/api/src/migrations/1744028841133-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1744028841133 implements MigrationInterface { name = 'Migration1744028841133' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace_usage_periods" DROP COLUMN "storage"`) await queryRunner.query(`ALTER TABLE "workspace_usage_periods" ADD "organizationId" character varying NOT NULL`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace_usage_periods" DROP COLUMN "organizationId"`) await queryRunner.query(`ALTER TABLE "workspace_usage_periods" ADD "storage" double precision NOT NULL`) } } ================================================ FILE: apps/api/src/migrations/1744114341077-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1744114341077 implements MigrationInterface { name = 'Migration1744114341077' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "organization_role_assignment" DROP CONSTRAINT "organization_role_assignment_roleId_fk"`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" DROP CONSTRAINT "organization_role_assignment_invitation_roleId_fk"`, ) await queryRunner.query( `ALTER TABLE "workspace" ADD "authToken" character varying NOT NULL DEFAULT MD5(random()::text)`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment" ADD CONSTRAINT "organization_role_assignment_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE NO ACTION ON UPDATE NO ACTION`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" ADD CONSTRAINT "organization_role_assignment_invitation_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE NO ACTION ON UPDATE NO ACTION`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" DROP CONSTRAINT "organization_role_assignment_invitation_roleId_fk"`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment" DROP CONSTRAINT "organization_role_assignment_roleId_fk"`, ) await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "authToken"`) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" ADD CONSTRAINT "organization_role_assignment_invitation_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment" ADD CONSTRAINT "organization_role_assignment_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) } } ================================================ FILE: apps/api/src/migrations/1744378115901-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1744378115901 implements MigrationInterface { name = 'Migration1744378115901' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ADD "suspended" boolean NOT NULL DEFAULT false`) await queryRunner.query(`ALTER TABLE "organization" ADD "suspensionReason" character varying`) await queryRunner.query(`ALTER TABLE "organization" ADD "suspendedUntil" TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization" ADD "suspendedAt" TIMESTAMP`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "suspendedAt"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "suspendedUntil"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "suspensionReason"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "suspended"`) } } ================================================ FILE: apps/api/src/migrations/1744808444807-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1744808444807 implements MigrationInterface { name = 'Migration1744808444807' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "volume" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "organizationId" uuid, "name" character varying NOT NULL, "state" character varying NOT NULL, "errorReason" character varying, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), "lastUsedAt" TIMESTAMP, CONSTRAINT "volume_organizationId_name_unique" UNIQUE ("organizationId", "name"), CONSTRAINT "volume_id_pk" PRIMARY KEY ("id"))`, ) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "authToken" SET DEFAULT MD5(random()::text)`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "authToken" SET DEFAULT md5((random()))`) await queryRunner.query(`DROP TABLE "volume"`) } } ================================================ FILE: apps/api/src/migrations/1744868914148-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { GlobalOrganizationRolesIds } from '../organization/constants/global-organization-roles.constant' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' export class Migration1744868914148 implements MigrationInterface { name = 'Migration1744868914148' public async up(queryRunner: QueryRunner): Promise { // update enums await queryRunner.query(`ALTER TYPE "public"."api_key_permissions_enum" RENAME TO "api_key_permissions_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes', 'read:volumes', 'write:volumes', 'delete:volumes')`, ) await queryRunner.query( `ALTER TABLE "api_key" ALTER COLUMN "permissions" TYPE "public"."api_key_permissions_enum"[] USING "permissions"::"text"::"public"."api_key_permissions_enum"[]`, ) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum_old"`) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME TO "organization_role_permissions_enum_old"`, ) await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes', 'read:volumes', 'write:volumes', 'delete:volumes')`, ) await queryRunner.query( `ALTER TABLE "organization_role" ALTER COLUMN "permissions" TYPE "public"."organization_role_permissions_enum"[] USING "permissions"::"text"::"public"."organization_role_permissions_enum"[]`, ) await queryRunner.query(`DROP TYPE "public"."organization_role_permissions_enum_old"`) // add volumes admin role await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.VOLUMES_ADMIN}', 'Volumes Admin', 'Grants admin access to volumes in the organization', ARRAY[ '${OrganizationResourcePermission.READ_VOLUMES}', '${OrganizationResourcePermission.WRITE_VOLUMES}', '${OrganizationResourcePermission.DELETE_VOLUMES}' ]::organization_role_permissions_enum[], TRUE ) `) } public async down(queryRunner: QueryRunner): Promise { // remove volumes admin role await queryRunner.query( `DELETE FROM "organization_role" WHERE "id" = '${GlobalOrganizationRolesIds.VOLUMES_ADMIN}'`, ) // revert enums await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum_old" AS ENUM('write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes')`, ) await queryRunner.query( `ALTER TABLE "organization_role" ALTER COLUMN "permissions" TYPE "public"."organization_role_permissions_enum_old"[] USING "permissions"::"text"::"public"."organization_role_permissions_enum_old"[]`, ) await queryRunner.query(`DROP TYPE "public"."organization_role_permissions_enum"`) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum_old" RENAME TO "organization_role_permissions_enum"`, ) await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum_old" AS ENUM('write:registries', 'delete:registries', 'write:images', 'delete:images', 'write:sandboxes', 'delete:sandboxes')`, ) await queryRunner.query( `ALTER TABLE "api_key" ALTER COLUMN "permissions" TYPE "public"."api_key_permissions_enum_old"[] USING "permissions"::"text"::"public"."api_key_permissions_enum_old"[]`, ) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum"`) await queryRunner.query(`ALTER TYPE "public"."api_key_permissions_enum_old" RENAME TO "api_key_permissions_enum"`) } } ================================================ FILE: apps/api/src/migrations/1744971114480-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1744971114480 implements MigrationInterface { name = 'Migration1744971114480' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" ADD "volumes" jsonb NOT NULL DEFAULT '[]'`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "authToken" SET DEFAULT MD5(random()::text)`) await queryRunner.query(`ALTER TABLE "volume" DROP COLUMN "state"`) await queryRunner.query( `CREATE TYPE "public"."volume_state_enum" AS ENUM('creating', 'ready', 'pending_create', 'pending_delete', 'deleting', 'deleted', 'error')`, ) await queryRunner.query( `ALTER TABLE "volume" ADD "state" "public"."volume_state_enum" NOT NULL DEFAULT 'pending_create'`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "volume" DROP COLUMN "state"`) await queryRunner.query(`DROP TYPE "public"."volume_state_enum"`) await queryRunner.query(`ALTER TABLE "volume" ADD "state" character varying NOT NULL`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "authToken" SET DEFAULT md5((random()))`) await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "volumes"`) } } ================================================ FILE: apps/api/src/migrations/1745393243334-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1745393243334 implements MigrationInterface { name = 'Migration1745393243334' public async up(queryRunner: QueryRunner): Promise { // First, get all images with their current entrypoint values const images = await queryRunner.query(`SELECT id, entrypoint FROM "image" WHERE entrypoint IS NOT NULL`) // Rename the column to avoid data loss await queryRunner.query(`ALTER TABLE "image" RENAME COLUMN "entrypoint" TO "entrypoint_old"`) // Add the new jsonb column await queryRunner.query(`ALTER TABLE "image" ADD "entrypoint" text[]`) // Update each image to convert the string entrypoint to a JSON array for (const image of images) { const entrypointValue = image.entrypoint if (entrypointValue) { // Convert the string to a JSON array with a single element await queryRunner.query(`UPDATE "image" SET "entrypoint" = $1 WHERE id = $2`, [ entrypointValue.split(' '), image.id, ]) } } // Drop the old column await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "entrypoint_old"`) } public async down(queryRunner: QueryRunner): Promise { // First, get all images with their current entrypoint values const images = await queryRunner.query(`SELECT id, entrypoint FROM "image" WHERE entrypoint IS NOT NULL`) // Rename the column to avoid data loss await queryRunner.query(`ALTER TABLE "image" RENAME COLUMN "entrypoint" TO "entrypoint_old"`) // Add the new character varying column await queryRunner.query(`ALTER TABLE "image" ADD "entrypoint" character varying`) // Update each image to convert the JSON array to a string for (const image of images) { const entrypointArray = image.entrypoint_old if (entrypointArray && Array.isArray(entrypointArray) && entrypointArray.length > 0) { // Take the first element of the array as the string value await queryRunner.query(`UPDATE "image" SET "entrypoint" = $1 WHERE id = $2`, [entrypointArray[0], image.id]) } } // Drop the old column await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "entrypoint_old"`) } } ================================================ FILE: apps/api/src/migrations/1745494761360-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1745494761360 implements MigrationInterface { name = 'Migration1745494761360' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" ADD "emailVerified" boolean NOT NULL DEFAULT false`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "emailVerified"`) } } ================================================ FILE: apps/api/src/migrations/1745574377029-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1745574377029 implements MigrationInterface { name = 'Migration1745574377029' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "build_info" ("imageRef" character varying NOT NULL, "dockerfileContent" text, "contextHashes" text, "lastUsedAt" TIMESTAMP NOT NULL DEFAULT now(), "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "build_info_imageRef_pk" PRIMARY KEY ("imageRef"))`, ) await queryRunner.renameColumn('image_node', 'internalImageName', 'imageRef') await queryRunner.query(`ALTER TABLE "image_node" DROP COLUMN "image"`) await queryRunner.query(`ALTER TABLE "image" ADD "buildInfoImageRef" character varying`) await queryRunner.query(`ALTER TABLE "workspace" ADD "buildInfoImageRef" character varying`) await queryRunner.query(`ALTER TYPE "public"."image_node_state_enum" RENAME TO "image_node_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."image_node_state_enum" AS ENUM('pulling_image', 'building_image', 'ready', 'error', 'removing')`, ) await queryRunner.query(`ALTER TABLE "image_node" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "image_node" ALTER COLUMN "state" TYPE "public"."image_node_state_enum" USING "state"::"text"::"public"."image_node_state_enum"`, ) await queryRunner.query(`ALTER TABLE "image_node" ALTER COLUMN "state" SET DEFAULT 'pulling_image'`) await queryRunner.query(`DROP TYPE "public"."image_node_state_enum_old"`) await queryRunner.query(`ALTER TYPE "public"."image_state_enum" RENAME TO "image_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."image_state_enum" AS ENUM('build_pending', 'building', 'pending', 'pulling_image', 'pending_validation', 'validating', 'active', 'error', 'removing')`, ) await queryRunner.query(`ALTER TABLE "image" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "image" ALTER COLUMN "state" TYPE "public"."image_state_enum" USING "state"::"text"::"public"."image_state_enum"`, ) await queryRunner.query(`ALTER TABLE "image" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."image_state_enum_old"`) await queryRunner.query(`ALTER TYPE "public"."workspace_state_enum" RENAME TO "workspace_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."workspace_state_enum" AS ENUM('creating', 'restoring', 'destroyed', 'destroying', 'started', 'stopped', 'starting', 'stopping', 'resizing', 'error', 'pending_build', 'building_image', 'unknown', 'pulling_image', 'archiving', 'archived')`, ) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "workspace" ALTER COLUMN "state" TYPE "public"."workspace_state_enum" USING "state"::"text"::"public"."workspace_state_enum"`, ) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "state" SET DEFAULT 'unknown'`) await queryRunner.query(`DROP TYPE "public"."workspace_state_enum_old"`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "image" DROP NOT NULL`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "authToken" SET DEFAULT MD5(random()::text)`) await queryRunner.query( `ALTER TABLE "image" ADD CONSTRAINT "image_buildInfoImageRef_fk" FOREIGN KEY ("buildInfoImageRef") REFERENCES "build_info"("imageRef") ON DELETE NO ACTION ON UPDATE NO ACTION`, ) await queryRunner.query( `ALTER TABLE "workspace" ADD CONSTRAINT "workspace_buildInfoImageRef_fk" FOREIGN KEY ("buildInfoImageRef") REFERENCES "build_info"("imageRef") ON DELETE NO ACTION ON UPDATE NO ACTION`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" DROP CONSTRAINT "workspace_buildInfoImageRef_fk"`) await queryRunner.query(`ALTER TABLE "image" DROP CONSTRAINT "image_buildInfoImageRef_fk"`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "authToken" SET DEFAULT md5((random()))`) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "image" SET NOT NULL`) await queryRunner.query( `CREATE TYPE "public"."workspace_state_enum_old" AS ENUM('archived', 'archiving', 'creating', 'destroyed', 'destroying', 'error', 'pulling_image', 'resizing', 'restoring', 'started', 'starting', 'stopped', 'stopping', 'unknown')`, ) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "workspace" ALTER COLUMN "state" TYPE "public"."workspace_state_enum_old" USING "state"::"text"::"public"."workspace_state_enum_old"`, ) await queryRunner.query(`ALTER TABLE "workspace" ALTER COLUMN "state" SET DEFAULT 'unknown'`) await queryRunner.query(`DROP TYPE "public"."workspace_state_enum"`) await queryRunner.query(`ALTER TYPE "public"."workspace_state_enum_old" RENAME TO "workspace_state_enum"`) await queryRunner.query( `CREATE TYPE "public"."image_state_enum_old" AS ENUM('active', 'error', 'pending', 'pending_validation', 'pulling_image', 'removing', 'validating')`, ) await queryRunner.query(`ALTER TABLE "image" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "image" ALTER COLUMN "state" TYPE "public"."image_state_enum_old" USING "state"::"text"::"public"."image_state_enum_old"`, ) await queryRunner.query(`ALTER TABLE "image" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."image_state_enum"`) await queryRunner.query(`ALTER TYPE "public"."image_state_enum_old" RENAME TO "image_state_enum"`) await queryRunner.query( `CREATE TYPE "public"."image_node_state_enum_old" AS ENUM('error', 'pulling_image', 'ready', 'removing')`, ) await queryRunner.query(`ALTER TABLE "image_node" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "image_node" ALTER COLUMN "state" TYPE "public"."image_node_state_enum_old" USING "state"::"text"::"public"."image_node_state_enum_old"`, ) await queryRunner.query(`ALTER TABLE "image_node" ALTER COLUMN "state" SET DEFAULT 'pulling_image'`) await queryRunner.query(`DROP TYPE "public"."image_node_state_enum"`) await queryRunner.query(`ALTER TYPE "public"."image_node_state_enum_old" RENAME TO "image_node_state_enum"`) await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "buildInfoImageRef"`) await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "buildInfoImageRef"`) await queryRunner.renameColumn('image_node', 'imageRef', 'internalImageName') await queryRunner.query(`ALTER TABLE "image_node" ADD "image" character varying NOT NULL`) await queryRunner.query(`DROP TABLE "build_info"`) } } ================================================ FILE: apps/api/src/migrations/1745840296260-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import * as crypto from 'crypto' export class Migration1745840296260 implements MigrationInterface { name = 'Migration1745840296260' public async up(queryRunner: QueryRunner): Promise { // Add the new columns await queryRunner.query(`ALTER TABLE "api_key" ADD "keyHash" character varying NOT NULL DEFAULT ''`) await queryRunner.query(`ALTER TABLE "api_key" ADD "keyPrefix" character varying NOT NULL DEFAULT ''`) await queryRunner.query(`ALTER TABLE "api_key" ADD "keySuffix" character varying NOT NULL DEFAULT ''`) // Get all existing API keys const existingKeys = await queryRunner.query(`SELECT "value" FROM "api_key"`) // Update each key with its hash, prefix, and suffix for (const key of existingKeys) { const value = key.value const keyHash = crypto.createHash('sha256').update(value).digest('hex') const keyPrefix = value.substring(0, 3) const keySuffix = value.slice(-3) await queryRunner.query( `UPDATE "api_key" SET "keyHash" = $1, "keyPrefix" = $2, "keySuffix" = $3 WHERE "value" = $4`, [keyHash, keyPrefix, keySuffix, value], ) } // Drop value column and its unique constraint await queryRunner.query(`ALTER TABLE "api_key" DROP CONSTRAINT "UQ_4b0873b633484d5de20b2d8f852"`) await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "value"`) // Add unique constraint await queryRunner.query(`ALTER TABLE "api_key" ADD CONSTRAINT "api_key_keyHash_unique" UNIQUE ("keyHash")`) } public async down(queryRunner: QueryRunner): Promise { // Revert the schema changes await queryRunner.query(`ALTER TABLE "api_key" DROP CONSTRAINT "api_key_keyHash_unique"`) await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "keySuffix"`) await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "keyPrefix"`) await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "keyHash"`) await queryRunner.query(`ALTER TABLE "api_key" ADD "value" character varying NOT NULL DEFAULT ''`) } } ================================================ FILE: apps/api/src/migrations/1745864972652-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1745864972652 implements MigrationInterface { name = 'Migration1745864972652' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(` ALTER TABLE "workspace" ALTER COLUMN "env" DROP DEFAULT, ALTER COLUMN "env" TYPE jsonb USING "env"::jsonb, ALTER COLUMN "env" SET DEFAULT '{}'::jsonb, ALTER COLUMN "env" SET NOT NULL; `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(` ALTER TABLE "workspace" ALTER COLUMN "env" DROP DEFAULT, ALTER COLUMN "env" TYPE text USING "env"::text, ALTER COLUMN "env" SET DEFAULT '{}'::text, ALTER COLUMN "env" SET NOT NULL; `) } } ================================================ FILE: apps/api/src/migrations/1746354231722-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1746354231722 implements MigrationInterface { name = 'Migration1746354231722' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "image" ADD "buildNodeId" character varying`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "image" DROP COLUMN "buildNodeId"`) } } ================================================ FILE: apps/api/src/migrations/1746604150910-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1746604150910 implements MigrationInterface { name = 'Migration1746604150910' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ADD "volume_quota" integer NOT NULL DEFAULT '10'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "volume_quota"`) } } ================================================ FILE: apps/api/src/migrations/1747658203010-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1747658203010 implements MigrationInterface { name = 'Migration1747658203010' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "api_key" ADD "lastUsedAt" TIMESTAMP`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "lastUsedAt"`) } } ================================================ FILE: apps/api/src/migrations/1748006546552-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1748006546552 implements MigrationInterface { name = 'Migration1748006546552' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "max_concurrent_workspaces"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "workspace_quota"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "total_image_size"`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "total_memory_quota" SET DEFAULT '10'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "total_disk_quota" SET DEFAULT '30'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "max_cpu_per_workspace" SET DEFAULT '4'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "max_memory_per_workspace" SET DEFAULT '8'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "max_image_size" SET DEFAULT '20'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "image_quota" SET DEFAULT '100'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "volume_quota" SET DEFAULT '100'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "volume_quota" SET DEFAULT '10'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "image_quota" SET DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "max_image_size" SET DEFAULT '2'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "max_memory_per_workspace" SET DEFAULT '4'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "max_cpu_per_workspace" SET DEFAULT '2'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "total_disk_quota" SET DEFAULT '100'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "total_memory_quota" SET DEFAULT '40'`) await queryRunner.query(`ALTER TABLE "organization" ADD "total_image_size" integer NOT NULL DEFAULT '5'`) await queryRunner.query(`ALTER TABLE "organization" ADD "workspace_quota" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "organization" ADD "max_concurrent_workspaces" integer NOT NULL DEFAULT '10'`) } } ================================================ FILE: apps/api/src/migrations/1748866194353-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1748866194353 implements MigrationInterface { name = 'Migration1748866194353' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" ADD "autoArchiveInterval" integer NOT NULL DEFAULT '10080'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "autoArchiveInterval"`) } } ================================================ FILE: apps/api/src/migrations/1749474791343-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1749474791343 implements MigrationInterface { name = 'Migration1749474791343' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "api_key" ADD "expiresAt" TIMESTAMP`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "api_key" DROP COLUMN "expiresAt"`) } } ================================================ FILE: apps/api/src/migrations/1749474791344-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1749474791344 implements MigrationInterface { name = 'Migration1749474791344' public async up(queryRunner: QueryRunner): Promise { // Snapshot to backup rename await queryRunner.renameColumn('workspace', 'snapshotRegistryId', 'backupRegistryId') await queryRunner.renameColumn('workspace', 'snapshotImage', 'backupImage') await queryRunner.renameColumn('workspace', 'lastSnapshotAt', 'lastBackupAt') await queryRunner.renameColumn('workspace', 'snapshotState', 'backupState') await queryRunner.renameColumn('workspace', 'existingSnapshotImages', 'existingBackupImages') // Node to runner rename await queryRunner.renameColumn('image', 'buildNodeId', 'buildRunnerId') await queryRunner.renameTable('image_node', 'image_runner') await queryRunner.renameColumn('image_runner', 'nodeId', 'runnerId') await queryRunner.renameTable('node', 'runner') await queryRunner.renameColumn('workspace', 'nodeId', 'runnerId') await queryRunner.renameColumn('workspace', 'prevNodeId', 'prevRunnerId') // Image to snapshot rename await queryRunner.renameColumn('warm_pool', 'image', 'snapshot') await queryRunner.renameColumn('organization', 'image_quota', 'snapshot_quota') await queryRunner.renameColumn('organization', 'max_image_size', 'max_snapshot_size') await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME VALUE 'write:images' TO 'write:snapshots'`, ) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME VALUE 'delete:images' TO 'delete:snapshots'`, ) await queryRunner.query( `ALTER TYPE "public"."api_key_permissions_enum" RENAME VALUE 'write:images' TO 'write:snapshots'`, ) await queryRunner.query( `ALTER TYPE "public"."api_key_permissions_enum" RENAME VALUE 'delete:images' TO 'delete:snapshots'`, ) await queryRunner.renameTable('image_runner', 'snapshot_runner') await queryRunner.renameColumn('snapshot_runner', 'imageRef', 'snapshotRef') await queryRunner.query( `ALTER TYPE "public"."snapshot_runner_state_enum" RENAME VALUE 'pulling_image' TO 'pulling_snapshot'`, ) await queryRunner.query( `ALTER TYPE "public"."snapshot_runner_state_enum" RENAME VALUE 'building_image' TO 'building_snapshot'`, ) await queryRunner.query(`ALTER TABLE "snapshot_runner" ALTER COLUMN "state" SET DEFAULT 'pulling_snapshot'`) await queryRunner.renameColumn('build_info', 'imageRef', 'snapshotRef') await queryRunner.renameTable('image', 'snapshot') await queryRunner.renameColumn('snapshot', 'buildInfoImageRef', 'buildInfoSnapshotRef') await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME VALUE 'pulling_image' TO 'pulling'`) await queryRunner.renameColumn('workspace', 'image', 'snapshot') await queryRunner.renameColumn('workspace', 'buildInfoImageRef', 'buildInfoSnapshotRef') await queryRunner.renameColumn('workspace', 'backupImage', 'backupSnapshot') await queryRunner.renameColumn('workspace', 'existingBackupImages', 'existingBackupSnapshots') await queryRunner.query( `ALTER TYPE "public"."workspace_state_enum" RENAME VALUE 'pulling_image' TO 'pulling_snapshot'`, ) await queryRunner.query( `ALTER TYPE "public"."workspace_state_enum" RENAME VALUE 'building_image' TO 'building_snapshot'`, ) // Workspace to sandbox rename await queryRunner.renameTable('workspace', 'sandbox') await queryRunner.renameTable('workspace_usage_periods', 'sandbox_usage_periods') await queryRunner.renameColumn('sandbox_usage_periods', 'workspaceId', 'sandboxId') await queryRunner.renameColumn('organization', 'max_cpu_per_workspace', 'max_cpu_per_sandbox') await queryRunner.renameColumn('organization', 'max_memory_per_workspace', 'max_memory_per_sandbox') await queryRunner.renameColumn('organization', 'max_disk_per_workspace', 'max_disk_per_sandbox') // Snapshot fields await queryRunner.query(`ALTER TABLE "snapshot" ADD "imageName" character varying NOT NULL DEFAULT ''`) await queryRunner.query(`ALTER TABLE "snapshot" ADD "cpu" integer NOT NULL DEFAULT '1'`) await queryRunner.query(`ALTER TABLE "snapshot" ADD "gpu" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "snapshot" ADD "mem" integer NOT NULL DEFAULT '1'`) await queryRunner.query(`ALTER TABLE "snapshot" ADD "disk" integer NOT NULL DEFAULT '3'`) await queryRunner.query(`UPDATE "snapshot" SET "imageName" = "name"`) // Add hideFromUsers column await queryRunner.query(`ALTER TABLE "snapshot" ADD "hideFromUsers" boolean NOT NULL DEFAULT false`) // Set hideFromUsers to true for general snapshots with names starting with "daytonaio/" await queryRunner.query( `UPDATE "snapshot" SET "hideFromUsers" = true WHERE "general" = true AND "name" LIKE 'daytonaio/%'`, ) } public async down(queryRunner: QueryRunner): Promise { // Remove hideFromUsers column await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "hideFromUsers"`) // Snapshot fields await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "disk"`) await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "mem"`) await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "gpu"`) await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "cpu"`) await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "imageName"`) // Revert workspace to sandbox rename await queryRunner.renameColumn('organization', 'max_disk_per_sandbox', 'max_disk_per_workspace') await queryRunner.renameColumn('organization', 'max_memory_per_sandbox', 'max_memory_per_workspace') await queryRunner.renameColumn('organization', 'max_cpu_per_sandbox', 'max_cpu_per_workspace') await queryRunner.renameColumn('sandbox_usage_periods', 'sandboxId', 'workspaceId') await queryRunner.renameTable('sandbox_usage_periods', 'workspace_usage_periods') await queryRunner.renameTable('sandbox', 'workspace') // Revert image to snapshot rename await queryRunner.query( `ALTER TYPE "public"."workspace_state_enum" RENAME VALUE 'pulling_snapshot' TO 'pulling_image'`, ) await queryRunner.query( `ALTER TYPE "public"."workspace_state_enum" RENAME VALUE 'building_snapshot' TO 'building_image'`, ) await queryRunner.renameColumn('workspace', 'existingBackupSnapshots', 'existingBackupImages') await queryRunner.renameColumn('workspace', 'backupSnapshot', 'backupImage') await queryRunner.renameColumn('workspace', 'buildInfoSnapshotRef', 'buildInfoImageRef') await queryRunner.renameColumn('workspace', 'snapshot', 'image') await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME VALUE 'pulling' TO 'pulling_image'`) await queryRunner.renameColumn('snapshot', 'buildInfoSnapshotRef', 'buildInfoImageRef') await queryRunner.renameTable('snapshot', 'image') await queryRunner.renameColumn('build_info', 'snapshotRef', 'imageRef') await queryRunner.query( `ALTER TYPE "public"."snapshot_runner_state_enum" RENAME VALUE 'pulling_snapshot' TO 'pulling_image'`, ) await queryRunner.query( `ALTER TYPE "public"."snapshot_runner_state_enum" RENAME VALUE 'building_snapshot' TO 'building_image'`, ) await queryRunner.query(`ALTER TABLE "snapshot_runner" ALTER COLUMN "state" SET DEFAULT 'pulling_image'`) await queryRunner.renameColumn('snapshot_runner', 'snapshotRef', 'imageRef') await queryRunner.renameTable('snapshot_runner', 'image_runner') await queryRunner.query( `ALTER TYPE "public"."api_key_permissions_enum" RENAME VALUE 'write:snapshots' TO 'write:images'`, ) await queryRunner.query( `ALTER TYPE "public"."api_key_permissions_enum" RENAME VALUE 'delete:snapshots' TO 'delete:images'`, ) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME VALUE 'write:snapshots' TO 'write:images'`, ) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME VALUE 'delete:snapshots' TO 'delete:images'`, ) await queryRunner.renameColumn('organization', 'max_snapshot_size', 'max_image_size') await queryRunner.renameColumn('organization', 'snapshot_quota', 'image_quota') await queryRunner.renameColumn('warm_pool', 'snapshot', 'image') // Revert node to runner rename await queryRunner.renameColumn('workspace', 'prevRunnerId', 'prevNodeId') await queryRunner.renameColumn('workspace', 'runnerId', 'nodeId') await queryRunner.renameTable('runner', 'node') await queryRunner.renameColumn('image_runner', 'runnerId', 'nodeId') await queryRunner.renameTable('image_runner', 'image_node') await queryRunner.renameColumn('image', 'buildRunnerId', 'buildNodeId') // Revert snapshot to backup rename await queryRunner.renameColumn('workspace', 'existingBackupImages', 'existingSnapshotImages') await queryRunner.renameColumn('workspace', 'backupState', 'snapshotState') await queryRunner.renameColumn('workspace', 'lastBackupAt', 'lastSnapshotAt') await queryRunner.renameColumn('workspace', 'backupImage', 'snapshotImage') await queryRunner.renameColumn('workspace', 'backupRegistryId', 'snapshotRegistryId') } } ================================================ FILE: apps/api/src/migrations/1749474791345-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1749474791345 implements MigrationInterface { name = 'Migration1749474791345' public async up(queryRunner: QueryRunner): Promise { // For sandbox_state_enum await queryRunner.query(`ALTER TYPE "public"."sandbox_state_enum" RENAME TO "sandbox_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."sandbox_state_enum" AS ENUM('creating', 'restoring', 'destroyed', 'destroying', 'started', 'stopped', 'starting', 'stopping', 'error', 'build_failed', 'pending_build', 'building_snapshot', 'unknown', 'pulling_snapshot', 'archiving', 'archived')`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "sandbox" ALTER COLUMN "state" TYPE "public"."sandbox_state_enum" USING "state"::"text"::"public"."sandbox_state_enum"`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "state" SET DEFAULT 'unknown'`) await queryRunner.query(`DROP TYPE "public"."sandbox_state_enum_old"`) // For snapshot_state_enum await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('build_pending', 'building', 'pending', 'pulling', 'pending_validation', 'validating', 'active', 'error', 'build_failed', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) } public async down(queryRunner: QueryRunner): Promise { // For snapshot_state_enum - recreate without build_failed await queryRunner.query(`UPDATE "snapshot" SET "state" = 'error' WHERE "state" = 'build_failed'`) await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('build_pending', 'building', 'pending', 'pulling', 'pending_validation', 'validating', 'active', 'error', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) // For sandbox_state_enum - recreate without build_failed await queryRunner.query(`UPDATE "sandbox" SET "state" = 'error' WHERE "state" = 'build_failed'`) await queryRunner.query(`ALTER TYPE "public"."sandbox_state_enum" RENAME TO "sandbox_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."sandbox_state_enum" AS ENUM('creating', 'restoring', 'destroyed', 'destroying', 'started', 'stopped', 'starting', 'stopping', 'error', 'pending_build', 'building_snapshot', 'unknown', 'pulling_snapshot', 'archiving', 'archived')`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "sandbox" ALTER COLUMN "state" TYPE "public"."sandbox_state_enum" USING "state"::"text"::"public"."sandbox_state_enum"`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "state" SET DEFAULT 'unknown'`) await queryRunner.query(`DROP TYPE "public"."sandbox_state_enum_old"`) } } ================================================ FILE: apps/api/src/migrations/1750077343089-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1750077343089 implements MigrationInterface { name = 'Migration1750077343089' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" ADD "daemonVersion" character varying`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "daemonVersion"`) } } ================================================ FILE: apps/api/src/migrations/1750436374899-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1750436374899 implements MigrationInterface { name = 'Migration1750436374899' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "audit_log" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "actorId" character varying NOT NULL, "actorEmail" character varying NOT NULL DEFAULT '', "organizationId" character varying, "action" character varying NOT NULL, "targetType" character varying, "targetId" character varying, "statusCode" integer, "errorMessage" character varying, "ipAddress" character varying, "userAgent" text, "source" character varying, "metadata" jsonb, "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), CONSTRAINT "audit_log_id_pk" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE INDEX "audit_log_organizationId_createdAt_index" ON "audit_log" ("organizationId", "createdAt") `, ) await queryRunner.query(`CREATE INDEX "audit_log_createdAt_index" ON "audit_log" ("createdAt") `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP INDEX "public"."audit_log_createdAt_index"`) await queryRunner.query(`DROP INDEX "public"."audit_log_organizationId_createdAt_index"`) await queryRunner.query(`DROP TABLE "audit_log"`) } } ================================================ FILE: apps/api/src/migrations/1750668569562-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1750668569562 implements MigrationInterface { name = 'Migration1750668569562' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" ADD "autoDeleteInterval" integer NOT NULL DEFAULT '-1'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "autoDeleteInterval"`) } } ================================================ FILE: apps/api/src/migrations/1750751712412-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1750751712412 implements MigrationInterface { name = 'Migration1750751712412' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('build_pending', 'building', 'pending', 'pulling', 'pending_validation', 'validating', 'active', 'inactive', 'error', 'build_failed', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum_old" AS ENUM('active', 'build_failed', 'build_pending', 'building', 'error', 'pending', 'pending_validation', 'pulling', 'removing', 'validating')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum_old" USING "state"::"text"::"public"."snapshot_state_enum_old"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum"`) await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum_old" RENAME TO "snapshot_state_enum"`) } } ================================================ FILE: apps/api/src/migrations/1751456907334-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { GlobalOrganizationRolesIds } from '../organization/constants/global-organization-roles.constant' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' export class Migration1751456907334 implements MigrationInterface { name = 'Migration1751456907334' public async up(queryRunner: QueryRunner): Promise { // update enums await queryRunner.query(`ALTER TYPE "public"."api_key_permissions_enum" RENAME TO "api_key_permissions_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:snapshots', 'delete:snapshots', 'write:sandboxes', 'delete:sandboxes', 'read:volumes', 'write:volumes', 'delete:volumes', 'read:audit_logs')`, ) await queryRunner.query( `ALTER TABLE "api_key" ALTER COLUMN "permissions" TYPE "public"."api_key_permissions_enum"[] USING "permissions"::"text"::"public"."api_key_permissions_enum"[]`, ) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum_old"`) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME TO "organization_role_permissions_enum_old"`, ) await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:snapshots', 'delete:snapshots', 'write:sandboxes', 'delete:sandboxes', 'read:volumes', 'write:volumes', 'delete:volumes', 'read:audit_logs')`, ) await queryRunner.query( `ALTER TABLE "organization_role" ALTER COLUMN "permissions" TYPE "public"."organization_role_permissions_enum"[] USING "permissions"::"text"::"public"."organization_role_permissions_enum"[]`, ) await queryRunner.query(`DROP TYPE "public"."organization_role_permissions_enum_old"`) // add auditor role await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.AUDITOR}', 'Auditor', 'Grants access to audit logs in the organization', ARRAY[ '${OrganizationResourcePermission.READ_AUDIT_LOGS}' ]::organization_role_permissions_enum[], TRUE ) `) // update organization role foreign keys await queryRunner.query( `ALTER TABLE "organization_role_assignment" DROP CONSTRAINT "organization_role_assignment_roleId_fk"`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment" ADD CONSTRAINT "organization_role_assignment_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" DROP CONSTRAINT "organization_role_assignment_invitation_roleId_fk"`, ) await queryRunner.query( `ALTER TABLE "organization_role_assignment_invitation" ADD CONSTRAINT "organization_role_assignment_invitation_roleId_fk" FOREIGN KEY ("roleId") REFERENCES "organization_role"("id") ON DELETE CASCADE ON UPDATE CASCADE`, ) } public async down(queryRunner: QueryRunner): Promise { // delete auditor role await queryRunner.query(`DELETE FROM "organization_role" WHERE "id" = '${GlobalOrganizationRolesIds.AUDITOR}'`) // remove read:audit_logs permission from api keys and organization roles await queryRunner.query( `UPDATE "api_key" SET "permissions" = array_remove("permissions", '${OrganizationResourcePermission.READ_AUDIT_LOGS}')`, ) await queryRunner.query( `UPDATE "organization_role" SET "permissions" = array_remove("permissions", '${OrganizationResourcePermission.READ_AUDIT_LOGS}')`, ) // revert enums await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum_old" AS ENUM('delete:registries', 'delete:sandboxes', 'delete:snapshots', 'delete:volumes', 'read:volumes', 'write:registries', 'write:sandboxes', 'write:snapshots', 'write:volumes')`, ) await queryRunner.query( `ALTER TABLE "organization_role" ALTER COLUMN "permissions" TYPE "public"."organization_role_permissions_enum_old"[] USING "permissions"::"text"::"public"."organization_role_permissions_enum_old"[]`, ) await queryRunner.query(`DROP TYPE "public"."organization_role_permissions_enum"`) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum_old" RENAME TO "organization_role_permissions_enum"`, ) await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum_old" AS ENUM('delete:registries', 'delete:sandboxes', 'delete:snapshots', 'delete:volumes', 'read:volumes', 'write:registries', 'write:sandboxes', 'write:snapshots', 'write:volumes')`, ) await queryRunner.query( `ALTER TABLE "api_key" ALTER COLUMN "permissions" TYPE "public"."api_key_permissions_enum_old"[] USING "permissions"::"text"::"public"."api_key_permissions_enum_old"[]`, ) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum"`) await queryRunner.query(`ALTER TYPE "public"."api_key_permissions_enum_old" RENAME TO "api_key_permissions_enum"`) } } ================================================ FILE: apps/api/src/migrations/1752494676200-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1752494676200 implements MigrationInterface { name = 'Migration1752494676200' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP TABLE "team"`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "team" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "name" character varying NOT NULL, CONSTRAINT "PK_f57d8293406df4af348402e4b74" PRIMARY KEY ("id"))`, ) } } ================================================ FILE: apps/api/src/migrations/1752494676205-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1752494676205 implements MigrationInterface { name = 'Migration1752494676205' public async up(queryRunner: QueryRunner): Promise { // Convert runner.region from enum to varchar await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "region" TYPE varchar USING "region"::text`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "region" SET DEFAULT 'us'`) // Convert sandbox.region from enum to varchar await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "region" TYPE varchar USING "region"::text`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "region" SET DEFAULT 'us'`) // Convert warm_pool.target from enum to varchar await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "target" TYPE varchar USING "target"::text`) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "target" SET DEFAULT 'us'`) // Drop the enum type if it exists await queryRunner.query(`DROP TYPE IF EXISTS "public"."runner_region_enum"`) await queryRunner.query(`DROP TYPE IF EXISTS "public"."sandbox_region_enum"`) await queryRunner.query(`DROP TYPE IF EXISTS "public"."warm_pool_target_enum"`) } public async down(queryRunner: QueryRunner): Promise { // Recreate the enum type await queryRunner.query(`CREATE TYPE "public"."warm_pool_target_enum" AS ENUM('eu', 'us', 'asia')`) await queryRunner.query(`CREATE TYPE "public"."sandbox_region_enum" AS ENUM('eu', 'us', 'asia')`) await queryRunner.query(`CREATE TYPE "public"."runner_region_enum" AS ENUM('eu', 'us', 'asia')`) // Convert back to enum for runner.region await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "region" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "runner" ALTER COLUMN "region" TYPE "public"."runner_region_enum" USING "region"::"public"."runner_region_enum"`, ) // Convert back to enum for sandbox.region await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "region" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "sandbox" ALTER COLUMN "region" TYPE "public"."sandbox_region_enum" USING "region"::"public"."sandbox_region_enum"`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "region" SET DEFAULT 'eu'`) // Convert back to enum for warm_pool.target await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "target" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "warm_pool" ALTER COLUMN "target" TYPE "public"."warm_pool_target_enum" USING "target"::"public"."warm_pool_target_enum"`, ) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "target" SET DEFAULT 'eu'`) } } ================================================ FILE: apps/api/src/migrations/1752848014862-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1752848014862 implements MigrationInterface { name = 'Migration1752848014862' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "lastChecked" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "sandbox_usage_periods" ALTER COLUMN "startAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "sandbox_usage_periods" ALTER COLUMN "endAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "snapshot_runner" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "snapshot_runner" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "lastActivityAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "lastBackupAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "build_info" ALTER COLUMN "lastUsedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "build_info" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "build_info" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "volume" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "volume" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization_user" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization_user" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query( `ALTER TABLE "organization_invitation" ALTER COLUMN "expiresAt" TYPE TIMESTAMP WITH TIME ZONE`, ) await queryRunner.query( `ALTER TABLE "organization_invitation" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`, ) await queryRunner.query( `ALTER TABLE "organization_invitation" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`, ) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "suspendedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "suspendedUntil" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization_role" ALTER COLUMN "createdAt" TYPE TIMESTAMP WITH TIME ZONE`) await queryRunner.query(`ALTER TABLE "organization_role" ALTER COLUMN "updatedAt" TYPE TIMESTAMP WITH TIME ZONE`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization_role" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization_role" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "suspendedUntil" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "suspendedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization_invitation" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization_invitation" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization_invitation" ALTER COLUMN "expiresAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization_user" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "organization_user" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "volume" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "volume" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "imageName" SET DEFAULT ''`) await queryRunner.query(`ALTER TABLE "build_info" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "build_info" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "build_info" ALTER COLUMN "lastUsedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "lastBackupAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "lastActivityAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "snapshot_runner" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "snapshot_runner" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "sandbox_usage_periods" ALTER COLUMN "endAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "sandbox_usage_periods" ALTER COLUMN "startAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "lastChecked" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "updatedAt" TYPE TIMESTAMP`) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "createdAt" TYPE TIMESTAMP`) } } ================================================ FILE: apps/api/src/migrations/1753099115783-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1753099115783 implements MigrationInterface { name = 'Migration1753099115783' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "enabled"`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "snapshot" ADD "enabled" boolean NOT NULL DEFAULT true`) } } ================================================ FILE: apps/api/src/migrations/1753100751730-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1753100751730 implements MigrationInterface { name = 'Migration1753100751730' public async up(queryRunner: QueryRunner): Promise { await queryRunner.renameColumn('runner', 'memory', 'memoryGiB') await queryRunner.renameColumn('runner', 'disk', 'diskGiB') await queryRunner.query( `ALTER TABLE "runner" ADD "currentCpuUsagePercentage" double precision NOT NULL DEFAULT '0'`, ) await queryRunner.query( `ALTER TABLE "runner" ADD "currentMemoryUsagePercentage" double precision NOT NULL DEFAULT '0'`, ) await queryRunner.query( `ALTER TABLE "runner" ADD "currentDiskUsagePercentage" double precision NOT NULL DEFAULT '0'`, ) await queryRunner.query(`ALTER TABLE "runner" ADD "currentAllocatedCpu" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "runner" ADD "currentAllocatedMemoryGiB" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "runner" ADD "currentAllocatedDiskGiB" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "runner" ADD "currentSnapshotCount" integer NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "runner" ADD "availabilityScore" integer NOT NULL DEFAULT '0'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "availabilityScore"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentSnapshotCount"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentAllocatedDiskGiB"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentAllocatedMemoryGiB"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentAllocatedCpu"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentDiskUsagePercentage"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentMemoryUsagePercentage"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentCpuUsagePercentage"`) await queryRunner.renameColumn('runner', 'diskGiB', 'disk') await queryRunner.renameColumn('runner', 'memoryGiB', 'memory') } } ================================================ FILE: apps/api/src/migrations/1753100751731-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { GlobalOrganizationRolesIds } from '../organization/constants/global-organization-roles.constant' export class Migration1753100751731 implements MigrationInterface { name = 'Migration1753100751731' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(` UPDATE "organization_role" SET "name" = 'Snapshots Admin', "description" = 'Grants admin access to snapshots in the organization' WHERE "id" = '${GlobalOrganizationRolesIds.SNAPSHOTS_ADMIN}' `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(` UPDATE "organization_role" SET "name" = 'Images Admin', "description" = 'Grants admin access to images in the organization' WHERE "id" = '${GlobalOrganizationRolesIds.SNAPSHOTS_ADMIN}' `) } } ================================================ FILE: apps/api/src/migrations/1753185133351-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1753185133351 implements MigrationInterface { name = 'Migration1753185133351' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ADD "version" character varying NOT NULL DEFAULT '0'`) await queryRunner.query(`ALTER TABLE "runner" ADD "proxyUrl" character varying NOT NULL DEFAULT ''`) // Copy apiUrl to proxyUrl for all existing records await queryRunner.query(`UPDATE "runner" SET "proxyUrl" = "apiUrl"`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "version"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "proxyUrl"`) } } ================================================ FILE: apps/api/src/migrations/1753274135567-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1753274135567 implements MigrationInterface { name = 'Migration1753274135567' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "project" SET DEFAULT ''`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "project" DROP DEFAULT`) } } ================================================ FILE: apps/api/src/migrations/1753430929609-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1753430929609 implements MigrationInterface { name = 'Migration1753430929609' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" ADD "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now()`) // For existing users, set createdAt to match their personal organization's createdAt await queryRunner.query(` UPDATE "user" u SET "createdAt" = o."createdAt" FROM "organization" o WHERE o."createdBy" = u.id AND o.personal = true; `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "user" DROP COLUMN "createdAt"`) } } ================================================ FILE: apps/api/src/migrations/1753717830378-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1753717830378 implements MigrationInterface { name = 'Migration1753717830378' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" ADD "networkBlockAll" boolean NOT NULL DEFAULT false`) await queryRunner.query(`ALTER TABLE "sandbox" ADD "networkAllowList" character varying`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "networkAllowList"`) await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "networkBlockAll"`) } } ================================================ FILE: apps/api/src/migrations/1754042247109-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1754042247109 implements MigrationInterface { name = 'Migration1754042247109' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "organization" ADD "suspensionCleanupGracePeriodHours" integer NOT NULL DEFAULT '24'`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "suspensionCleanupGracePeriodHours"`) } } ================================================ FILE: apps/api/src/migrations/1755003696741-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1755003696741 implements MigrationInterface { name = 'Migration1755003696741' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" ADD "backupErrorReason" text`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "backupErrorReason"`) } } ================================================ FILE: apps/api/src/migrations/1755356869493-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1755356869493 implements MigrationInterface { name = 'Migration1755356869493' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "webhook_initialization" ("organizationId" character varying NOT NULL, "svixApplicationId" character varying, "lastError" text, "retryCount" integer NOT NULL DEFAULT '0', "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), CONSTRAINT "webhook_initialization_organizationId_pk" PRIMARY KEY ("organizationId"))`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP TABLE "webhook_initialization"`) } } ================================================ FILE: apps/api/src/migrations/1755464957487-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1755464957487 implements MigrationInterface { name = 'Migration1755464957487' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "ssh_access" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "sandboxId" character varying NOT NULL, "token" text NOT NULL, "expiresAt" TIMESTAMP NOT NULL, "createdAt" TIMESTAMP NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP NOT NULL DEFAULT now(), CONSTRAINT "ssh_access_id_pk" PRIMARY KEY ("id"))`, ) await queryRunner.query( `ALTER TABLE "ssh_access" ADD CONSTRAINT "ssh_access_sandboxId_fk" FOREIGN KEY ("sandboxId") REFERENCES "sandbox"("id") ON DELETE CASCADE ON UPDATE NO ACTION`, ) await queryRunner.query( `ALTER TABLE "sandbox" ADD "sshPass" character varying(32) NOT NULL DEFAULT REPLACE(uuid_generate_v4()::text, '-', '')`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "sshPass"`) await queryRunner.query(`ALTER TABLE "ssh_access" DROP CONSTRAINT "ssh_access_sandboxId_fk"`) await queryRunner.query(`DROP TABLE "ssh_access"`) } } ================================================ FILE: apps/api/src/migrations/1755521645207-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1755521645207 implements MigrationInterface { name = 'Migration1755521645207' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TABLE "sandbox_usage_periods_archive" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "sandboxId" character varying NOT NULL, "organizationId" character varying NOT NULL, "startAt" TIMESTAMP WITH TIME ZONE NOT NULL, "endAt" TIMESTAMP WITH TIME ZONE NOT NULL, "cpu" double precision NOT NULL, "gpu" double precision NOT NULL, "mem" double precision NOT NULL, "disk" double precision NOT NULL, "region" character varying NOT NULL, CONSTRAINT "sandbox_usage_periods_archive_id_pk" PRIMARY KEY ("id"))`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP TABLE "sandbox_usage_periods_archive"`) } } ================================================ FILE: apps/api/src/migrations/1755860619921-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1755860619921 implements MigrationInterface { name = 'Migration1755860619921' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "organization" ADD "sandboxLimitedNetworkEgress" boolean NOT NULL DEFAULT false`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "sandboxLimitedNetworkEgress"`) } } ================================================ FILE: apps/api/src/migrations/1757513754037-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1757513754037 implements MigrationInterface { name = 'Migration1757513754037' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "docker_registry" ADD "region" character varying`) await queryRunner.query( `ALTER TYPE "public"."docker_registry_registrytype_enum" RENAME TO "docker_registry_registrytype_enum_old"`, ) await queryRunner.query( `CREATE TYPE "public"."docker_registry_registrytype_enum" AS ENUM('internal', 'organization', 'transient', 'backup')`, ) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "docker_registry" ALTER COLUMN "registryType" TYPE "public"."docker_registry_registrytype_enum" USING "registryType"::"text"::"public"."docker_registry_registrytype_enum"`, ) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" SET DEFAULT 'internal'`) await queryRunner.query(`DROP TYPE "public"."docker_registry_registrytype_enum_old"`) // Create the base default registry by copying from the default internal one await queryRunner.query(` INSERT INTO public.docker_registry ( name, url, username, password, "isDefault", project, "createdAt", "updatedAt", "registryType", "organizationId", "region" ) SELECT 'Backup Registry' AS name, url, username, password, "isDefault", project, now() AS "createdAt", now() AS "updatedAt", 'backup' AS "registryType", "organizationId", "region" FROM public.docker_registry WHERE "registryType" = 'internal' AND "isDefault" = true ORDER BY "createdAt" ASC LIMIT 1 `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(` DELETE FROM public.docker_registry WHERE "registryType" = 'backup' `) await queryRunner.query( `CREATE TYPE "public"."docker_registry_registrytype_enum_old" AS ENUM('internal', 'organization', 'transient', 'backup')`, ) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "docker_registry" ALTER COLUMN "registryType" TYPE "public"."docker_registry_registrytype_enum_old" USING "registryType"::"text"::"public"."docker_registry_registrytype_enum_old"`, ) await queryRunner.query(`ALTER TABLE "docker_registry" ALTER COLUMN "registryType" SET DEFAULT 'internal'`) await queryRunner.query(`DROP TYPE "public"."docker_registry_registrytype_enum"`) await queryRunner.query( `ALTER TYPE "public"."docker_registry_registrytype_enum_old" RENAME TO "docker_registry_registrytype_enum"`, ) await queryRunner.query(`ALTER TABLE "docker_registry" DROP COLUMN "region"`) } } ================================================ FILE: apps/api/src/migrations/1757513754038-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1757513754038 implements MigrationInterface { name = 'Migration1757513754038' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "docker_registry" ADD "isFallback" boolean NOT NULL DEFAULT false`) // Update existing registries that have isDefault = true and region = null to be fallback registries await queryRunner.query(` UPDATE "docker_registry" SET "isFallback" = true WHERE "isDefault" = true AND "region" IS NULL AND "registryType" = 'backup' `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "docker_registry" DROP COLUMN "isFallback"`) } } ================================================ FILE: apps/api/src/migrations/1759241690773-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1759241690773 implements MigrationInterface { name = 'Migration1759241690773' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "used"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "capacity"`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ADD "capacity" integer NOT NULL DEFAULT '1000'`) await queryRunner.query(`ALTER TABLE "runner" ADD "used" integer NOT NULL DEFAULT '0'`) } } ================================================ FILE: apps/api/src/migrations/1759768058397-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1759768058397 implements MigrationInterface { name = 'Migration1759768058397' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" ADD "name" character varying`) await queryRunner.query(`UPDATE "sandbox" SET "name" = "id"`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "name" SET NOT NULL`) await queryRunner.query( `ALTER TABLE "sandbox" ALTER COLUMN "name" SET DEFAULT 'sandbox-' || substring(gen_random_uuid()::text, 1, 10)`, ) await queryRunner.query( `ALTER TABLE "sandbox" ADD CONSTRAINT "sandbox_organizationId_name_unique" UNIQUE ("organizationId", "name")`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP CONSTRAINT "sandbox_organizationId_name_unique"`) await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "name"`) } } ================================================ FILE: apps/api/src/migrations/1761912147638-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { configuration } from '../config/configuration' export class Migration1761912147638 implements MigrationInterface { name = 'Migration1761912147638' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ADD "defaultRegion" character varying NULL`) await queryRunner.query(`UPDATE "organization" SET "defaultRegion" = '${configuration.defaultRegion.id}'`) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "defaultRegion" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "region" DROP DEFAULT`) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "target" DROP DEFAULT`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "defaultRegion"`) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "region" SET DEFAULT 'us'`) await queryRunner.query(`ALTER TABLE "warm_pool" ALTER COLUMN "target" SET DEFAULT 'us'`) } } ================================================ FILE: apps/api/src/migrations/1761912147639-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1761912147639 implements MigrationInterface { name = 'Migration1761912147639' public async up(queryRunner: QueryRunner): Promise { await queryRunner.renameColumn('snapshot', 'internalName', 'ref') await queryRunner.renameColumn('snapshot', 'buildRunnerId', 'initialRunnerId') await queryRunner.query(`ALTER TABLE "snapshot" ADD "skipValidation" boolean NOT NULL DEFAULT false`) // Update snapshot states await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('pending', 'pulling', 'pending_validation', 'validating', 'active', 'inactive', 'building', 'error', 'build_failed', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) } public async down(queryRunner: QueryRunner): Promise { // Revert snapshot states await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('build_pending', 'pending', 'pulling', 'pending_validation', 'validating', 'active', 'inactive', 'building', 'error', 'build_failed', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "skipValidation"`) await queryRunner.renameColumn('snapshot', 'initialRunnerId', 'buildRunnerId') await queryRunner.renameColumn('snapshot', 'ref', 'internalName') } } ================================================ FILE: apps/api/src/migrations/1763561822000-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1763561822000 implements MigrationInterface { name = 'Migration1763561822000' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ADD "authenticated_rate_limit" integer`) await queryRunner.query(`ALTER TABLE "organization" ADD "sandbox_create_rate_limit" integer`) await queryRunner.query(`ALTER TABLE "organization" ADD "sandbox_lifecycle_rate_limit" integer`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "sandbox_lifecycle_rate_limit"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "sandbox_create_rate_limit"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "authenticated_rate_limit"`) } } ================================================ FILE: apps/api/src/migrations/1764073472179-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { configuration } from '../config/configuration' export class Migration1764073472179 implements MigrationInterface { name = 'Migration1764073472179' public async up(queryRunner: QueryRunner): Promise { // Create region table await queryRunner.query( `CREATE TABLE "region" ("id" character varying NOT NULL, "name" character varying NOT NULL, "organizationId" uuid, "hidden" boolean NOT NULL DEFAULT false, "enforceQuotas" boolean NOT NULL DEFAULT true, "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), CONSTRAINT "region_id_pk" PRIMARY KEY ("id"))`, ) // Add unique constraints for region name await queryRunner.query( `CREATE UNIQUE INDEX "region_organizationId_null_name_unique" ON "region" ("name") WHERE "organizationId" IS NULL`, ) await queryRunner.query( `CREATE UNIQUE INDEX "region_organizationId_name_unique" ON "region" ("organizationId", "name") WHERE "organizationId" IS NOT NULL`, ) // Expand organization table with defaultRegionId column (make it nullable) await queryRunner.query(`ALTER TABLE "organization" ADD "defaultRegionId" character varying NULL`) await queryRunner.query(`UPDATE "organization" SET "defaultRegionId" = "defaultRegion"`) // Add default value for required defaultRegion column before dropping it in the contract migration await queryRunner.query( `ALTER TABLE "organization" ALTER COLUMN "defaultRegion" SET DEFAULT '${configuration.defaultRegion.id}'`, ) // Create region_quota table await queryRunner.query( `CREATE TABLE "region_quota" ("organizationId" uuid NOT NULL, "regionId" character varying NOT NULL, "total_cpu_quota" integer NOT NULL DEFAULT '10', "total_memory_quota" integer NOT NULL DEFAULT '10', "total_disk_quota" integer NOT NULL DEFAULT '30', "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), CONSTRAINT "region_quota_organizationId_regionId_pk" PRIMARY KEY ("organizationId", "regionId"))`, ) await queryRunner.query( `ALTER TABLE "region_quota" ADD CONSTRAINT "region_quota_organizationId_fk" FOREIGN KEY ("organizationId") REFERENCES "organization"("id") ON DELETE CASCADE ON UPDATE NO ACTION`, ) // For existing organizations, migrate their region-specific quotas to their default region await queryRunner.query(` INSERT INTO "region_quota" ("organizationId", "regionId", "total_cpu_quota", "total_memory_quota", "total_disk_quota") SELECT o."id" as "organizationId", o."defaultRegionId" as "regionId", o."total_cpu_quota", o."total_memory_quota", o."total_disk_quota" FROM "organization" o `) } public async down(queryRunner: QueryRunner): Promise { // Drop region table await queryRunner.query(`DROP TABLE "region"`) // Drop defaultRegionId column from organization table await queryRunner.dropColumn('organization', 'defaultRegionId') // Drop region_quota table await queryRunner.query(`DROP TABLE "region_quota"`) } } ================================================ FILE: apps/api/src/migrations/1764073472180-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { configuration } from '../config/configuration' export class Migration1764073472180 implements MigrationInterface { name = 'Migration1764073472180' public async up(queryRunner: QueryRunner): Promise { // Remove defaultRegion column from organization table await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "defaultRegion"`) // Remove region-specific quotas from organization table await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "total_cpu_quota"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "total_memory_quota"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "total_disk_quota"`) } public async down(queryRunner: QueryRunner): Promise { // Restore defaultRegion column to organization table await queryRunner.query(`ALTER TABLE "organization" ADD "defaultRegion" character varying NULL`) await queryRunner.query(`UPDATE "organization" SET "defaultRegion" = "defaultRegionId"`) await queryRunner.query( `ALTER TABLE "organization" ALTER COLUMN "defaultRegion" SET DEFAULT '${configuration.defaultRegion.id}'`, ) await queryRunner.query(`ALTER TABLE "organization" ALTER COLUMN "defaultRegion" SET NOT NULL`) // Restore region-specific quotas to organization table await queryRunner.query(`ALTER TABLE "organization" ADD "total_disk_quota" integer NOT NULL DEFAULT '30'`) await queryRunner.query(`ALTER TABLE "organization" ADD "total_memory_quota" integer NOT NULL DEFAULT '10'`) await queryRunner.query(`ALTER TABLE "organization" ADD "total_cpu_quota" integer NOT NULL DEFAULT '10'`) // For each organization, restore region-specific quotas by taking the maximum values among all region quotas await queryRunner.query(` UPDATE "organization" o SET "total_cpu_quota" = COALESCE(q."total_cpu_quota", 10), "total_memory_quota" = COALESCE(q."total_memory_quota", 10), "total_disk_quota" = COALESCE(q."total_disk_quota", 30) FROM ( SELECT "organizationId", MAX("total_cpu_quota") as "total_cpu_quota", MAX("total_memory_quota") as "total_memory_quota", MAX("total_disk_quota") as "total_disk_quota" FROM "region_quota" GROUP BY "organizationId" ) q WHERE o."id" = q."organizationId" `) } } ================================================ FILE: apps/api/src/migrations/1764844895057-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' import { GlobalOrganizationRolesIds } from '../organization/constants/global-organization-roles.constant' import { OrganizationResourcePermission } from '../organization/enums/organization-resource-permission.enum' export class Migration1764844895057 implements MigrationInterface { name = 'Migration1764844895057' public async up(queryRunner: QueryRunner): Promise { // add region type field with its type and constraints await queryRunner.query(`CREATE TYPE "public"."region_regiontype_enum" AS ENUM('shared', 'dedicated', 'custom')`) await queryRunner.query(`ALTER TABLE "region" ADD "regionType" "public"."region_regiontype_enum"`) await queryRunner.query( `ALTER TABLE "region" ADD CONSTRAINT "region_not_custom" CHECK ("organizationId" IS NOT NULL OR "regionType" != 'custom')`, ) await queryRunner.query( `ALTER TABLE "region" ADD CONSTRAINT "region_not_shared" CHECK ("organizationId" IS NULL OR "regionType" != 'shared')`, ) await queryRunner.query(`UPDATE "region" SET "regionType" = 'custom' WHERE "organizationId" IS NOT NULL`) await queryRunner.query(`UPDATE "region" SET "regionType" = 'shared' WHERE "organizationId" IS NULL`) await queryRunner.query(`ALTER TABLE "region" ALTER COLUMN "regionType" SET NOT NULL`) // update api key permission enum await queryRunner.query(`ALTER TYPE "public"."api_key_permissions_enum" RENAME TO "api_key_permissions_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:snapshots', 'delete:snapshots', 'write:sandboxes', 'delete:sandboxes', 'read:volumes', 'write:volumes', 'delete:volumes', 'write:regions', 'delete:regions', 'read:runners', 'write:runners', 'delete:runners', 'read:audit_logs')`, ) await queryRunner.query( `ALTER TABLE "api_key" ALTER COLUMN "permissions" TYPE "public"."api_key_permissions_enum"[] USING "permissions"::"text"::"public"."api_key_permissions_enum"[]`, ) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum_old"`) // update organization role permission enum await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum" RENAME TO "organization_role_permissions_enum_old"`, ) await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum" AS ENUM('write:registries', 'delete:registries', 'write:snapshots', 'delete:snapshots', 'write:sandboxes', 'delete:sandboxes', 'read:volumes', 'write:volumes', 'delete:volumes', 'write:regions', 'delete:regions', 'read:runners', 'write:runners', 'delete:runners', 'read:audit_logs')`, ) await queryRunner.query( `ALTER TABLE "organization_role" ALTER COLUMN "permissions" TYPE "public"."organization_role_permissions_enum"[] USING "permissions"::"text"::"public"."organization_role_permissions_enum"[]`, ) await queryRunner.query(`DROP TYPE "public"."organization_role_permissions_enum_old"`) // add infrastructure admin role await queryRunner.query(` INSERT INTO "organization_role" ("id", "name", "description", "permissions", "isGlobal") VALUES ( '${GlobalOrganizationRolesIds.INFRASTRUCTURE_ADMIN}', 'Infrastructure Admin', 'Grants admin access to infrastructure in the organization', ARRAY[ '${OrganizationResourcePermission.WRITE_REGIONS}', '${OrganizationResourcePermission.DELETE_REGIONS}', '${OrganizationResourcePermission.READ_RUNNERS}', '${OrganizationResourcePermission.WRITE_RUNNERS}', '${OrganizationResourcePermission.DELETE_RUNNERS}' ]::organization_role_permissions_enum[], TRUE ) `) // add runner name field await queryRunner.query(`ALTER TABLE "runner" ADD "name" character varying`) await queryRunner.query(`UPDATE "runner" SET "name" = "id"`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "name" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ADD CONSTRAINT "runner_region_name_unique" UNIQUE ("region", "name")`) // create new index for runner await queryRunner.query( `CREATE INDEX "runner_state_unschedulable_region_index" ON "runner" ("state", "unschedulable", "region") `, ) // add region proxy and ssh gateway fields await queryRunner.query(`ALTER TABLE "region" ADD "proxyUrl" character varying`) await queryRunner.query(`ALTER TABLE "region" ADD "toolboxProxyUrl" character varying`) await queryRunner.query(`ALTER TABLE "region" ADD "proxyApiKeyHash" character varying`) await queryRunner.query(`ALTER TABLE "region" ADD "sshGatewayUrl" character varying`) await queryRunner.query(`ALTER TABLE "region" ADD "sshGatewayApiKeyHash" character varying`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "proxyUrl" DROP DEFAULT`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "region" DROP DEFAULT`) } public async down(queryRunner: QueryRunner): Promise { // remove region proxy and ssh gateway fields await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "sshGatewayApiKeyHash"`) await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "sshGatewayUrl"`) await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "proxyApiKeyHash"`) await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "toolboxProxyUrl"`) await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "proxyUrl"`) // drop region type field await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "regionType"`) await queryRunner.query(`DROP TYPE "public"."region_regiontype_enum"`) // remove infrastructure admin role await queryRunner.query( `DELETE FROM "organization_role" WHERE "id" = '${GlobalOrganizationRolesIds.INFRASTRUCTURE_ADMIN}'`, ) // revert api key permission enum await queryRunner.query( `CREATE TYPE "public"."api_key_permissions_enum_old" AS ENUM('delete:registries', 'delete:sandboxes', 'delete:snapshots', 'delete:volumes', 'read:audit_logs', 'read:volumes', 'write:registries', 'write:sandboxes', 'write:snapshots', 'write:volumes')`, ) await queryRunner.query( `ALTER TABLE "api_key" ALTER COLUMN "permissions" TYPE "public"."api_key_permissions_enum_old"[] USING "permissions"::"text"::"public"."api_key_permissions_enum_old"[]`, ) await queryRunner.query(`DROP TYPE "public"."api_key_permissions_enum"`) await queryRunner.query(`ALTER TYPE "public"."api_key_permissions_enum_old" RENAME TO "api_key_permissions_enum"`) // revert organization role permission enum await queryRunner.query( `CREATE TYPE "public"."organization_role_permissions_enum_old" AS ENUM('delete:registries', 'delete:sandboxes', 'delete:snapshots', 'delete:volumes', 'read:audit_logs', 'read:volumes', 'write:registries', 'write:sandboxes', 'write:snapshots', 'write:volumes')`, ) await queryRunner.query( `ALTER TABLE "organization_role" ALTER COLUMN "permissions" TYPE "public"."organization_role_permissions_enum_old"[] USING "permissions"::"text"::"public"."organization_role_permissions_enum_old"[]`, ) await queryRunner.query(`DROP TYPE "public"."organization_role_permissions_enum"`) await queryRunner.query( `ALTER TYPE "public"."organization_role_permissions_enum_old" RENAME TO "organization_role_permissions_enum"`, ) // drop runner name field await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "name"`) // drop new index for runner await queryRunner.query(`DROP INDEX "public"."runner_state_unschedulable_region_index"`) } } ================================================ FILE: apps/api/src/migrations/1764844895058-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1764844895058 implements MigrationInterface { name = 'Migration1764844895058' public async up(queryRunner: QueryRunner): Promise { // drop region hidden field await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "hidden"`) } public async down(queryRunner: QueryRunner): Promise { // revert drop region hidden field await queryRunner.query(`ALTER TABLE "region" ADD "hidden" boolean NOT NULL DEFAULT false`) } } ================================================ FILE: apps/api/src/migrations/1765282546000-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1765282546000 implements MigrationInterface { name = 'Migration1765282546000' public async up(queryRunner: QueryRunner): Promise { // Remove skipValidation column await queryRunner.query(`ALTER TABLE "snapshot" DROP COLUMN "skipValidation"`) // Update any snapshots in VALIDATING or PENDING_VALIDATION state to PENDING await queryRunner.query(`UPDATE "snapshot" SET "state" = 'pending' WHERE "state" = 'validating'`) await queryRunner.query(`UPDATE "snapshot" SET "state" = 'pending' WHERE "state" = 'pending_validation'`) // Update snapshot_state_enum to remove VALIDATING and PENDING_VALIDATION await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('pending', 'pulling', 'active', 'inactive', 'building', 'error', 'build_failed', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) } public async down(queryRunner: QueryRunner): Promise { // Revert snapshot_state_enum to include VALIDATING and PENDING_VALIDATION await queryRunner.query(`ALTER TYPE "public"."snapshot_state_enum" RENAME TO "snapshot_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."snapshot_state_enum" AS ENUM('pending', 'pulling', 'pending_validation', 'validating', 'active', 'inactive', 'building', 'error', 'build_failed', 'removing')`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "snapshot" ALTER COLUMN "state" TYPE "public"."snapshot_state_enum" USING "state"::"text"::"public"."snapshot_state_enum"`, ) await queryRunner.query(`ALTER TABLE "snapshot" ALTER COLUMN "state" SET DEFAULT 'pending'`) await queryRunner.query(`DROP TYPE "public"."snapshot_state_enum_old"`) // Re-add skipValidation column await queryRunner.query(`ALTER TABLE "snapshot" ADD "skipValidation" boolean NOT NULL DEFAULT false`) } } ================================================ FILE: apps/api/src/migrations/1765366773736-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1765366773736 implements MigrationInterface { name = 'Migration1765366773736' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" ADD "recoverable" boolean NOT NULL DEFAULT false`) // Update existing sandboxes with recoverable error reasons to set recoverable = true await queryRunner.query(` UPDATE "sandbox" SET "recoverable" = true WHERE "state" = 'error' AND ( LOWER("errorReason") LIKE '%no space left on device%' OR LOWER("errorReason") LIKE '%storage limit%' OR LOWER("errorReason") LIKE '%disk quota exceeded%' ) `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "sandbox" DROP COLUMN "recoverable"`) } } ================================================ FILE: apps/api/src/migrations/1765400000000-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1765400000000 implements MigrationInterface { name = 'Migration1765400000000' public async up(queryRunner: QueryRunner): Promise { // Normalize Docker Hub URLs to 'docker.io' for consistency // The runner will convert to 'index.docker.io/v1/' for builds where needed await queryRunner.query(` UPDATE "docker_registry" SET "url" = 'docker.io' WHERE LOWER("url") LIKE '%docker.io%' `) } public async down(queryRunner: QueryRunner): Promise { // Cannot reliably reverse this migration as we don't know the original URLs // This is a one-way normalization } } ================================================ FILE: apps/api/src/migrations/1765806205881-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1765806205881 implements MigrationInterface { name = 'Migration1765806205881' public async up(queryRunner: QueryRunner): Promise { // Create snapshot_region table await queryRunner.query(` CREATE TABLE "snapshot_region" ( "snapshotId" uuid NOT NULL, "regionId" character varying NOT NULL, "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), CONSTRAINT "PK_snapshot_region" PRIMARY KEY ("snapshotId", "regionId") ) `) // Add foreign key constraints await queryRunner.query(` ALTER TABLE "snapshot_region" ADD CONSTRAINT "FK_snapshot_region_snapshot" FOREIGN KEY ("snapshotId") REFERENCES "snapshot"("id") ON DELETE CASCADE ON UPDATE NO ACTION `) await queryRunner.query(` ALTER TABLE "snapshot_region" ADD CONSTRAINT "FK_snapshot_region_region" FOREIGN KEY ("regionId") REFERENCES "region"("id") ON DELETE CASCADE ON UPDATE NO ACTION `) // Migrate existing snapshots: add snapshot_region entries based on organization's default region await queryRunner.query(` INSERT INTO "snapshot_region" ("snapshotId", "regionId") SELECT s.id, o."defaultRegionId" FROM "snapshot" s INNER JOIN "organization" o ON s."organizationId" = o.id WHERE o."defaultRegionId" IS NOT NULL `) } public async down(queryRunner: QueryRunner): Promise { // Drop foreign key constraints await queryRunner.query(`ALTER TABLE "snapshot_region" DROP CONSTRAINT "FK_snapshot_region_region"`) await queryRunner.query(`ALTER TABLE "snapshot_region" DROP CONSTRAINT "FK_snapshot_region_snapshot"`) // Drop snapshot_region table await queryRunner.query(`DROP TABLE "snapshot_region"`) } } ================================================ FILE: apps/api/src/migrations/1766415256696-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1766415256696 implements MigrationInterface { name = 'Migration1766415256696' public async up(queryRunner: QueryRunner): Promise { // region snapshot manager field await queryRunner.query(`ALTER TABLE "region" ADD "snapshotManagerUrl" character varying`) // docker registry indexes await queryRunner.query( `CREATE INDEX "docker_registry_registryType_isDefault_index" ON "docker_registry" ("registryType", "isDefault") `, ) await queryRunner.query( `CREATE INDEX "docker_registry_region_registryType_index" ON "docker_registry" ("region", "registryType") `, ) await queryRunner.query( `CREATE INDEX "docker_registry_organizationId_registryType_index" ON "docker_registry" ("organizationId", "registryType") `, ) } public async down(queryRunner: QueryRunner): Promise { // drop region snapshot manager field await queryRunner.query(`ALTER TABLE "region" DROP COLUMN "snapshotManagerUrl"`) // drop docker registry indexes await queryRunner.query(`DROP INDEX "public"."docker_registry_organizationId_registryType_index"`) await queryRunner.query(`DROP INDEX "public"."docker_registry_region_registryType_index"`) await queryRunner.query(`DROP INDEX "public"."docker_registry_registryType_isDefault_index"`) } } ================================================ FILE: apps/api/src/migrations/1767830400000-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1767830400000 implements MigrationInterface { name = 'Migration1767830400000' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ADD "currentStartedSandboxes" integer NOT NULL DEFAULT 0`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentStartedSandboxes"`) } } ================================================ FILE: apps/api/src/migrations/1768306129179-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1768306129179 implements MigrationInterface { name = 'Migration1768306129179' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE TYPE "public"."job_status_enum" AS ENUM('PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED')`, ) await queryRunner.query(`CREATE TYPE "public"."job_resourcetype_enum" AS ENUM('SANDBOX', 'SNAPSHOT', 'BACKUP')`) await queryRunner.query( `CREATE TABLE "job" ("id" uuid NOT NULL DEFAULT uuid_generate_v4(), "version" integer NOT NULL, "type" character varying NOT NULL, "status" "public"."job_status_enum" NOT NULL DEFAULT 'PENDING', "runnerId" character varying NOT NULL, "resourceType" "public"."job_resourcetype_enum" NOT NULL, "resourceId" character varying NOT NULL, "payload" character varying, "resultMetadata" character varying, "traceContext" jsonb, "errorMessage" text, "startedAt" TIMESTAMP WITH TIME ZONE, "completedAt" TIMESTAMP WITH TIME ZONE, "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), CONSTRAINT "job_id_pk" PRIMARY KEY ("id"))`, ) await queryRunner.query( `CREATE UNIQUE INDEX "IDX_UNIQUE_INCOMPLETE_JOB" ON "job" ("resourceType", "resourceId", "runnerId") WHERE "completedAt" IS NULL`, ) await queryRunner.query(`CREATE INDEX "job_resourceType_resourceId_index" ON "job" ("resourceType", "resourceId") `) await queryRunner.query(`CREATE INDEX "job_status_createdAt_index" ON "job" ("status", "createdAt") `) await queryRunner.query(`CREATE INDEX "job_runnerId_status_index" ON "job" ("runnerId", "status") `) await queryRunner.query(`ALTER TABLE "runner" RENAME COLUMN "version" TO "apiVersion"`) await queryRunner.query(`ALTER TABLE "runner" ADD "appVersion" character varying DEFAULT 'v0.0.0-dev'`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "domain" DROP NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" DROP CONSTRAINT "UQ_330d74ac3d0e349b4c73c62ad6d"`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "apiUrl" DROP NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "proxyUrl" DROP NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "cpu" TYPE double precision`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "memoryGiB" TYPE double precision`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "diskGiB" TYPE double precision`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "gpu" DROP NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "gpuType" DROP NOT NULL`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "gpuType" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "gpu" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "diskGiB" TYPE integer`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "memoryGiB" TYPE integer`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "cpu" TYPE integer`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "proxyUrl" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "apiUrl" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" ADD CONSTRAINT "UQ_330d74ac3d0e349b4c73c62ad6d" UNIQUE ("domain")`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "domain" SET NOT NULL`) await queryRunner.query(`ALTER TABLE "runner" RENAME COLUMN "apiVersion" TO "version"`) await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "appVersion"`) await queryRunner.query(`DROP INDEX "public"."job_runnerId_status_index"`) await queryRunner.query(`DROP INDEX "public"."job_status_createdAt_index"`) await queryRunner.query(`DROP INDEX "public"."job_resourceType_resourceId_index"`) await queryRunner.query(`DROP INDEX "public"."IDX_UNIQUE_INCOMPLETE_JOB"`) await queryRunner.query(`DROP TABLE "job"`) await queryRunner.query(`DROP TYPE "public"."job_resourcetype_enum"`) await queryRunner.query(`DROP TYPE "public"."job_status_enum"`) } } ================================================ FILE: apps/api/src/migrations/1768461678804-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1768461678804 implements MigrationInterface { name = 'Migration1768461678804' // TODO: Add migrationsTransactionMode: 'each', to data-source.ts // TypeORM currently does not support non-transactional reverts // Needed because CREATE/DROP INDEX CONCURRENTLY cannot run inside a transaction // public readonly transaction = false public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(` CREATE INDEX IF NOT EXISTS "idx_sandbox_volumes_gin" ON "sandbox" USING GIN ("volumes" jsonb_path_ops) WHERE "desiredState" <> 'destroyed'; `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(` DROP INDEX IF EXISTS "idx_sandbox_volumes_gin"; `) } } ================================================ FILE: apps/api/src/migrations/1768475454675-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1768475454675 implements MigrationInterface { name = 'Migration1768475454675' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `CREATE INDEX "idx_region_custom" ON "region" ("organizationId") WHERE "regionType" = 'custom'`, ) await queryRunner.query( `CREATE UNIQUE INDEX "region_sshGatewayApiKeyHash_unique" ON "region" ("sshGatewayApiKeyHash") WHERE "sshGatewayApiKeyHash" IS NOT NULL`, ) await queryRunner.query( `CREATE UNIQUE INDEX "region_proxyApiKeyHash_unique" ON "region" ("proxyApiKeyHash") WHERE "proxyApiKeyHash" IS NOT NULL`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP INDEX "public"."region_proxyApiKeyHash_unique"`) await queryRunner.query(`DROP INDEX "public"."region_sshGatewayApiKeyHash_unique"`) await queryRunner.query(`DROP INDEX "public"."idx_region_custom"`) } } ================================================ FILE: apps/api/src/migrations/1768485728153-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1768485728153 implements MigrationInterface { name = 'Migration1768485728153' public async up(queryRunner: QueryRunner): Promise { // Note: not using CONCURRENTLY + skipping transactions because of reverting issue: https://github.com/typeorm/typeorm/issues/9981 await queryRunner.query(`CREATE INDEX "api_key_org_user_idx" ON "api_key" ("organizationId", "userId") `) await queryRunner.query( `CREATE INDEX "warm_pool_find_idx" ON "warm_pool" ("snapshot", "target", "class", "cpu", "mem", "disk", "gpu", "osUser", "env") `, ) await queryRunner.query(`CREATE INDEX "snapshot_runner_state_idx" ON "snapshot_runner" ("state") `) await queryRunner.query(`CREATE INDEX "snapshot_runner_runnerid_idx" ON "snapshot_runner" ("runnerId") `) await queryRunner.query( `CREATE INDEX "snapshot_runner_runnerid_snapshotref_idx" ON "snapshot_runner" ("runnerId", "snapshotRef") `, ) await queryRunner.query(`CREATE INDEX "snapshot_runner_snapshotref_idx" ON "snapshot_runner" ("snapshotRef") `) await queryRunner.query(`CREATE INDEX "sandbox_pending_idx" ON "sandbox" ("id") WHERE "pending" = true`) await queryRunner.query( `CREATE INDEX "sandbox_active_only_idx" ON "sandbox" ("id") WHERE "state" <> ALL (ARRAY['destroyed'::sandbox_state_enum, 'archived'::sandbox_state_enum])`, ) await queryRunner.query( `CREATE INDEX "sandbox_runner_state_desired_idx" ON "sandbox" ("runnerId", "state", "desiredState") WHERE "pending" = false`, ) await queryRunner.query(`CREATE INDEX "sandbox_backupstate_idx" ON "sandbox" ("backupState") `) await queryRunner.query(`CREATE INDEX "sandbox_resources_idx" ON "sandbox" ("cpu", "mem", "disk", "gpu") `) await queryRunner.query(`CREATE INDEX "sandbox_region_idx" ON "sandbox" ("region") `) await queryRunner.query(`CREATE INDEX "sandbox_organizationid_idx" ON "sandbox" ("organizationId") `) await queryRunner.query(`CREATE INDEX "sandbox_runner_state_idx" ON "sandbox" ("runnerId", "state") `) await queryRunner.query(`CREATE INDEX "sandbox_runnerid_idx" ON "sandbox" ("runnerId") `) await queryRunner.query(`CREATE INDEX "sandbox_snapshot_idx" ON "sandbox" ("snapshot") `) await queryRunner.query(`CREATE INDEX "sandbox_desiredstate_idx" ON "sandbox" ("desiredState") `) await queryRunner.query(`CREATE INDEX "sandbox_state_idx" ON "sandbox" ("state") `) await queryRunner.query(`CREATE INDEX "snapshot_state_idx" ON "snapshot" ("state") `) await queryRunner.query(`CREATE INDEX "snapshot_name_idx" ON "snapshot" ("name") `) await queryRunner.query( `CREATE INDEX "sandbox_labels_gin_full_idx" ON "sandbox" USING gin ("labels" jsonb_path_ops)`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP INDEX "public"."sandbox_labels_gin_full_idx"`) await queryRunner.query(`DROP INDEX "public"."snapshot_name_idx"`) await queryRunner.query(`DROP INDEX "public"."snapshot_state_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_state_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_desiredstate_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_snapshot_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_runnerid_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_runner_state_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_organizationid_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_region_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_resources_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_backupstate_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_runner_state_desired_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_active_only_idx"`) await queryRunner.query(`DROP INDEX "public"."sandbox_pending_idx"`) await queryRunner.query(`DROP INDEX "public"."snapshot_runner_snapshotref_idx"`) await queryRunner.query(`DROP INDEX "public"."snapshot_runner_runnerid_snapshotref_idx"`) await queryRunner.query(`DROP INDEX "public"."snapshot_runner_runnerid_idx"`) await queryRunner.query(`DROP INDEX "public"."snapshot_runner_state_idx"`) await queryRunner.query(`DROP INDEX "public"."warm_pool_find_idx"`) await queryRunner.query(`DROP INDEX "public"."api_key_org_user_idx"`) } } ================================================ FILE: apps/api/src/migrations/1768583941244-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1768583941244 implements MigrationInterface { name = 'Migration1768583941244' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ADD "currentCpuLoadAverage" double precision NOT NULL DEFAULT '0'`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "currentCpuLoadAverage"`) } } ================================================ FILE: apps/api/src/migrations/1769516172576-migration.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1769516172576 implements MigrationInterface { name = 'Migration1769516172576' public async up(queryRunner: QueryRunner): Promise { // For sandbox_state_enum - add 'resizing' value await queryRunner.query(`ALTER TYPE "public"."sandbox_state_enum" ADD VALUE IF NOT EXISTS 'resizing'`) } public async down(queryRunner: QueryRunner): Promise { // Drop any index with explicit desiredState enum type cast in WHERE clause (required for enum swap) // For sandbox_state_enum - remove 'resizing' value await queryRunner.query(`UPDATE "sandbox" SET "state" = 'stopped' WHERE "state" = 'resizing'`) await queryRunner.query(`ALTER TYPE "public"."sandbox_state_enum" RENAME TO "sandbox_state_enum_old"`) await queryRunner.query( `CREATE TYPE "public"."sandbox_state_enum" AS ENUM('creating', 'restoring', 'destroyed', 'destroying', 'started', 'stopped', 'starting', 'stopping', 'error', 'build_failed', 'pending_build', 'building_snapshot', 'unknown', 'pulling_snapshot', 'archiving', 'archived')`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "state" DROP DEFAULT`) await queryRunner.query( `ALTER TABLE "sandbox" ALTER COLUMN "state" TYPE "public"."sandbox_state_enum" USING "state"::"text"::"public"."sandbox_state_enum"`, ) await queryRunner.query(`ALTER TABLE "sandbox" ALTER COLUMN "state" SET DEFAULT 'unknown'`) await queryRunner.query(`DROP TYPE "public"."sandbox_state_enum_old"`) // Recreate the indices that were dropped } } ================================================ FILE: apps/api/src/migrations/1769516172577-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1769516172577 implements MigrationInterface { name = 'Migration1769516172577' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ADD "draining" boolean NOT NULL DEFAULT false`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "draining"`) } } ================================================ FILE: apps/api/src/migrations/1770043707083-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1770043707083 implements MigrationInterface { name = 'Migration1770043707083' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ADD "experimentalConfig" jsonb`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "experimentalConfig"`) } } ================================================ FILE: apps/api/src/migrations/1770212429837-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1770212429837 implements MigrationInterface { name = 'Migration1770212429837' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" ADD "authenticated_rate_limit_ttl_seconds" integer`) await queryRunner.query(`ALTER TABLE "organization" ADD "sandbox_create_rate_limit_ttl_seconds" integer`) await queryRunner.query(`ALTER TABLE "organization" ADD "sandbox_lifecycle_rate_limit_ttl_seconds" integer`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "sandbox_lifecycle_rate_limit_ttl_seconds"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "sandbox_create_rate_limit_ttl_seconds"`) await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "authenticated_rate_limit_ttl_seconds"`) } } ================================================ FILE: apps/api/src/migrations/1770823569571-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1770823569571 implements MigrationInterface { name = 'Migration1770823569571' public async up(queryRunner: QueryRunner): Promise { // Note: not using CONCURRENTLY + skipping transactions because of reverting issue: https://github.com/typeorm/typeorm/issues/9981 await queryRunner.query(`CREATE INDEX "idx_sandbox_authtoken" ON "sandbox" ("authToken") `) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP INDEX "public"."idx_sandbox_authtoken"`) } } ================================================ FILE: apps/api/src/migrations/1770880371265-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1770880371265 implements MigrationInterface { name = 'Migration1770880371265' public async up(queryRunner: QueryRunner): Promise { // Note: not using CONCURRENTLY + skipping transactions because of reverting issue: https://github.com/typeorm/typeorm/issues/9981 await queryRunner.query( `CREATE INDEX "idx_sandbox_usage_periods_sandbox_end" ON "sandbox_usage_periods" ("sandboxId", "endAt") `, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`DROP INDEX "public"."idx_sandbox_usage_periods_sandbox_end"`) } } ================================================ FILE: apps/api/src/migrations/README.md ================================================ # Database Migrations This project uses the **Expand and Contract** pattern for database migrations to support zero-downtime deployments. ## Overview The expand and contract pattern splits database changes into two phases: - **Pre-deploy (Expand)**: Additive, non-breaking changes that are backwards compatible with the current API version - **Post-deploy (Contract)**: Breaking changes that require the new API version to be deployed first This allows the database and API to be updated independently while maintaining compatibility during the deployment window. ## Migration Folders - `pre-deploy/` - Migrations that run **before** the API is deployed - `post-deploy/` - Migrations that run **after** the API is deployed Note: Root folder migrations (not in pre-deploy or post-deploy) are legacy migrations created before the expand-and-contract pattern was introduced. These run during `migration:run:init` only. ## Developer Workflow ### 1. Make Changes to Database Entities Modify the TypeORM entity files in `src/**/*.entity.ts` as needed. ### 2. Generate Migrations Run the migration generator: ```bash npm run migration:generate ``` This creates the **same autogenerated migration in both** `pre-deploy/` and `post-deploy/` folders with a timestamp prefix. ### 3. Analyze and Adjust Migrations **This is the critical step.** You MUST analyze the generated migrations and determine: - Which changes are safe to run before the API deployment (pre-deploy) - Which changes require the new API to be running first (post-deploy) - Whether manual adjustments are needed for zero-downtime compatibility #### Example Scenarios **Adding a new field (Pre-deploy only)** When adding a new nullable column or a column with a default value: ```typescript // pre-deploy/migration.ts public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" ADD "description" varchar NULL`); } ``` ```typescript // post-deploy/migration.ts // DELETE the generated migration - no post-deploy changes needed ``` The new column can be added before the API deployment since the old API will simply ignore it. --- **Dropping a field (Post-deploy only)** When removing a column: ```typescript // pre-deploy/migration.ts // DELETE the generated migration - no pre-deploy changes needed ``` ```typescript // post-deploy/migration.ts public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "legacy_field"`); } ``` The column must only be dropped after the new API is deployed, since the old API may still be reading from it. --- **Renaming a field (Expand then Contract)** Renaming requires both phases to maintain zero-downtime. Use a **database trigger** to keep columns synchronized automatically: ```typescript // pre-deploy/migration.ts (Expand) public async up(queryRunner: QueryRunner): Promise { // Add new column await queryRunner.query(`ALTER TABLE "workspace" ADD "display_name" varchar NULL`); // Copy existing data await queryRunner.query(`UPDATE "workspace" SET "display_name" = "name"`); // Create trigger to keep columns in sync during transition await queryRunner.query(` CREATE OR REPLACE FUNCTION sync_workspace_name() RETURNS TRIGGER AS $$ BEGIN IF NEW.display_name IS NOT NULL THEN NEW.name := NEW.display_name; ELSIF NEW.name IS NOT NULL THEN NEW.display_name := NEW.name; END IF; RETURN NEW; END; $$ LANGUAGE plpgsql; `); await queryRunner.query(` CREATE TRIGGER workspace_name_sync BEFORE INSERT OR UPDATE ON "workspace" FOR EACH ROW EXECUTE FUNCTION sync_workspace_name(); `); } ``` ```typescript // post-deploy/migration.ts (Contract) public async up(queryRunner: QueryRunner): Promise { // Remove trigger and old column await queryRunner.query(`DROP TRIGGER workspace_name_sync ON "workspace"`); await queryRunner.query(`DROP FUNCTION sync_workspace_name()`); await queryRunner.query(`ALTER TABLE "workspace" DROP COLUMN "name"`); } ``` **How the trigger works:** The trigger intercepts every INSERT and UPDATE on the table and automatically copies the value between columns: | API Version | Writes to | Trigger copies to | Result | |-------------|-----------|-------------------|--------| | Old API | `name` | `display_name` | Both columns have the value | | New API | `display_name` | `name` | Both columns have the value | **Deployment timeline:** 1. **Pre-deploy migration runs** → Trigger is active, both columns exist 2. **Rolling deployment begins** → Mix of old and new API instances, trigger keeps data in sync 3. **Rolling deployment completes** → All instances are new API 4. **Post-deploy migration runs** → Trigger and old column are removed **New API code changes:** The new API should read from and write to `display_name` only. The trigger handles backward compatibility with old API instances—no dual-write logic needed in application code. ## Migration Scripts ### `npm run migration:run:init` Runs **all migrations** from both pre-deploy and post-deploy folders. Use this for: - Initial database setup - Development environments - Fresh database instances ### `npm run migration:run:pre-deploy` Runs only migrations in the `pre-deploy/` folder. Use this: - **Before** deploying a new API version - As part of your CI/CD pipeline, before the rolling update begins ### `npm run migration:run:post-deploy` Runs only migrations in the `post-deploy/` folder. Use this: - **After** the new API version is fully deployed - As part of your CI/CD pipeline, after the rolling update completes ## Reverting Migrations ```bash npm run migration:revert ``` This reverts the **last executed migration** from either folder (based on the combined migration history in the database). **Important behaviors:** - Reverts one migration at a time - run multiple times to revert multiple migrations - Uses the combined data-source that sees all migrations - The revert order follows the execution timestamp, not the folder structure - Always test revert scripts in development before relying on them in production **Recommendation:** After reverting, you may need to also revert the corresponding entity changes and regenerate migrations to keep everything in sync. ================================================ FILE: apps/api/src/migrations/data-source.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DataSource, DataSourceOptions } from 'typeorm' import { CustomNamingStrategy } from '../common/utils/naming-strategy.util' import { join } from 'path' import { config } from 'dotenv' config({ path: [join(__dirname, '../../.env'), join(__dirname, '../../.env.local')] }) export const baseDataSourceOptions: DataSourceOptions = { type: 'postgres' as const, host: process.env.DB_HOST, port: parseInt(process.env.DB_PORT!, 10), username: process.env.DB_USERNAME, password: process.env.DB_PASSWORD, database: process.env.DB_DATABASE, synchronize: false, migrationsRun: false, logging: process.env.DB_LOGGING === 'true', namingStrategy: new CustomNamingStrategy(), entities: [join(__dirname, '../**/*.entity.ts')], entitySkipConstructor: true, } const AppDataSource = new DataSource({ ...baseDataSourceOptions, migrations: [join(__dirname, '**/*-migration.{ts,js}')], }) export default AppDataSource ================================================ FILE: apps/api/src/migrations/post-deploy/data-source.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { join } from 'path' import { DataSource } from 'typeorm' import { baseDataSourceOptions } from '../data-source' const PostDeployDataSource = new DataSource({ ...baseDataSourceOptions, migrations: [join(__dirname, '*-migration.{ts,js}')], }) export default PostDeployDataSource ================================================ FILE: apps/api/src/migrations/pre-deploy/1770900000000-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1770900000000 implements MigrationInterface { name = 'Migration1770900000000' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "organization" ADD "snapshot_deactivation_timeout_minutes" integer NOT NULL DEFAULT 20160`, ) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "organization" DROP COLUMN "snapshot_deactivation_timeout_minutes"`) } } ================================================ FILE: apps/api/src/migrations/pre-deploy/1773744656413-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1773744656413 implements MigrationInterface { name = 'Migration1773744656413' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "currentAllocatedCpu" TYPE double precision`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "currentAllocatedMemoryGiB" TYPE double precision`) await queryRunner.query(`ALTER TABLE "runner" ALTER COLUMN "currentAllocatedDiskGiB" TYPE double precision`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query( `ALTER TABLE "runner" ALTER COLUMN "currentAllocatedDiskGiB" TYPE integer USING ROUND("currentAllocatedDiskGiB")::integer`, ) await queryRunner.query( `ALTER TABLE "runner" ALTER COLUMN "currentAllocatedMemoryGiB" TYPE integer USING ROUND("currentAllocatedMemoryGiB")::integer`, ) await queryRunner.query( `ALTER TABLE "runner" ALTER COLUMN "currentAllocatedCpu" TYPE integer USING ROUND("currentAllocatedCpu")::integer`, ) } } ================================================ FILE: apps/api/src/migrations/pre-deploy/1773916204375-migration.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MigrationInterface, QueryRunner } from 'typeorm' export class Migration1773916204375 implements MigrationInterface { name = 'Migration1773916204375' public async up(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" ADD "serviceHealth" jsonb`) } public async down(queryRunner: QueryRunner): Promise { await queryRunner.query(`ALTER TABLE "runner" DROP COLUMN "serviceHealth"`) } } ================================================ FILE: apps/api/src/migrations/pre-deploy/data-source.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { join } from 'path' import { DataSource } from 'typeorm' import { baseDataSourceOptions } from '../data-source' const PreDeployDataSource = new DataSource({ ...baseDataSourceOptions, migrations: [join(__dirname, '*-migration.{ts,js}')], }) export default PreDeployDataSource ================================================ FILE: apps/api/src/notification/emitters/notification-redis.emitter.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnModuleInit } from '@nestjs/common' import { Emitter } from '@socket.io/redis-emitter' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { NotificationEmitter } from '../gateways/notification-emitter.abstract' import { SandboxDto } from '../../sandbox/dto/sandbox.dto' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { SandboxDesiredState } from '../../sandbox/enums/sandbox-desired-state.enum' import { SandboxEvents } from '../../sandbox/constants/sandbox-events.constants' import { SnapshotDto } from '../../sandbox/dto/snapshot.dto' import { SnapshotState } from '../../sandbox/enums/snapshot-state.enum' import { SnapshotEvents } from '../../sandbox/constants/snapshot-events' import { VolumeDto } from '../../sandbox/dto/volume.dto' import { VolumeState } from '../../sandbox/enums/volume-state.enum' import { VolumeEvents } from '../../sandbox/constants/volume-events' import { RunnerDto } from '../../sandbox/dto/runner.dto' import { RunnerState } from '../../sandbox/enums/runner-state.enum' import { RunnerEvents } from '../../sandbox/constants/runner-events' @Injectable() export class NotificationRedisEmitter extends NotificationEmitter implements OnModuleInit { private readonly logger = new Logger(NotificationRedisEmitter.name) private emitter: Emitter constructor(@InjectRedis() private readonly redis: Redis) { super() } onModuleInit() { this.emitter = new Emitter(this.redis.duplicate()) this.logger.debug('Socket.io Redis emitter initialized (publish-only)') } emitSandboxCreated(sandbox: SandboxDto) { this.emitter.to(sandbox.organizationId).emit(SandboxEvents.CREATED, sandbox) } emitSandboxStateUpdated(sandbox: SandboxDto, oldState: SandboxState, newState: SandboxState) { this.emitter.to(sandbox.organizationId).emit(SandboxEvents.STATE_UPDATED, { sandbox, oldState, newState }) } emitSandboxDesiredStateUpdated( sandbox: SandboxDto, oldDesiredState: SandboxDesiredState, newDesiredState: SandboxDesiredState, ) { this.emitter .to(sandbox.organizationId) .emit(SandboxEvents.DESIRED_STATE_UPDATED, { sandbox, oldDesiredState, newDesiredState }) } emitSnapshotCreated(snapshot: SnapshotDto) { this.emitter.to(snapshot.organizationId).emit(SnapshotEvents.CREATED, snapshot) } emitSnapshotStateUpdated(snapshot: SnapshotDto, oldState: SnapshotState, newState: SnapshotState) { this.emitter .to(snapshot.organizationId) .emit(SnapshotEvents.STATE_UPDATED, { snapshot: snapshot, oldState, newState }) } emitSnapshotRemoved(snapshot: SnapshotDto) { this.emitter.to(snapshot.organizationId).emit(SnapshotEvents.REMOVED, snapshot) } emitVolumeCreated(volume: VolumeDto) { this.emitter.to(volume.organizationId).emit(VolumeEvents.CREATED, volume) } emitVolumeStateUpdated(volume: VolumeDto, oldState: VolumeState, newState: VolumeState) { this.emitter.to(volume.organizationId).emit(VolumeEvents.STATE_UPDATED, { volume, oldState, newState }) } emitVolumeLastUsedAtUpdated(volume: VolumeDto) { this.emitter.to(volume.organizationId).emit(VolumeEvents.LAST_USED_AT_UPDATED, volume) } emitRunnerCreated(runner: RunnerDto, organizationId: string | null) { if (!organizationId) { return } this.emitter.to(organizationId).emit(RunnerEvents.CREATED, runner) } emitRunnerStateUpdated( runner: RunnerDto, organizationId: string | null, oldState: RunnerState, newState: RunnerState, ) { if (!organizationId) { return } this.emitter.to(organizationId).emit(RunnerEvents.STATE_UPDATED, { runner, oldState, newState }) } emitRunnerUnschedulableUpdated(runner: RunnerDto, organizationId: string | null) { if (!organizationId) { return } this.emitter.to(organizationId).emit(RunnerEvents.UNSCHEDULABLE_UPDATED, runner) } } ================================================ FILE: apps/api/src/notification/gateways/notification-emitter.abstract.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxDto } from '../../sandbox/dto/sandbox.dto' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { SandboxDesiredState } from '../../sandbox/enums/sandbox-desired-state.enum' import { SnapshotDto } from '../../sandbox/dto/snapshot.dto' import { SnapshotState } from '../../sandbox/enums/snapshot-state.enum' import { VolumeDto } from '../../sandbox/dto/volume.dto' import { VolumeState } from '../../sandbox/enums/volume-state.enum' import { RunnerDto } from '../../sandbox/dto/runner.dto' import { RunnerState } from '../../sandbox/enums/runner-state.enum' export abstract class NotificationEmitter { abstract emitSandboxCreated(sandbox: SandboxDto): void abstract emitSandboxStateUpdated(sandbox: SandboxDto, oldState: SandboxState, newState: SandboxState): void abstract emitSandboxDesiredStateUpdated( sandbox: SandboxDto, oldDesiredState: SandboxDesiredState, newDesiredState: SandboxDesiredState, ): void abstract emitSnapshotCreated(snapshot: SnapshotDto): void abstract emitSnapshotStateUpdated(snapshot: SnapshotDto, oldState: SnapshotState, newState: SnapshotState): void abstract emitSnapshotRemoved(snapshot: SnapshotDto): void abstract emitVolumeCreated(volume: VolumeDto): void abstract emitVolumeStateUpdated(volume: VolumeDto, oldState: VolumeState, newState: VolumeState): void abstract emitVolumeLastUsedAtUpdated(volume: VolumeDto): void abstract emitRunnerCreated(runner: RunnerDto, organizationId: string | null): void abstract emitRunnerStateUpdated( runner: RunnerDto, organizationId: string | null, oldState: RunnerState, newState: RunnerState, ): void abstract emitRunnerUnschedulableUpdated(runner: RunnerDto, organizationId: string | null): void } ================================================ FILE: apps/api/src/notification/gateways/notification.gateway.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logger, OnModuleInit, UnauthorizedException } from '@nestjs/common' import { WebSocketGateway, WebSocketServer, OnGatewayInit } from '@nestjs/websockets' import { Server, Socket } from 'socket.io' import { createAdapter } from '@socket.io/redis-adapter' import { SandboxEvents } from '../../sandbox/constants/sandbox-events.constants' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { SandboxDto } from '../../sandbox/dto/sandbox.dto' import { SnapshotDto } from '../../sandbox/dto/snapshot.dto' import { SnapshotEvents } from '../../sandbox/constants/snapshot-events' import { SnapshotState } from '../../sandbox/enums/snapshot-state.enum' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { JwtStrategy } from '../../auth/jwt.strategy' import { ApiKeyStrategy } from '../../auth/api-key.strategy' import { isAuthContext } from '../../common/interfaces/auth-context.interface' import { VolumeEvents } from '../../sandbox/constants/volume-events' import { VolumeDto } from '../../sandbox/dto/volume.dto' import { VolumeState } from '../../sandbox/enums/volume-state.enum' import { SandboxDesiredState } from '../../sandbox/enums/sandbox-desired-state.enum' import { RunnerDto } from '../../sandbox/dto/runner.dto' import { RunnerState } from '../../sandbox/enums/runner-state.enum' import { RunnerEvents } from '../../sandbox/constants/runner-events' import { NotificationEmitter } from './notification-emitter.abstract' @WebSocketGateway({ path: '/api/socket.io/', transports: ['websocket'], }) export class NotificationGateway extends NotificationEmitter implements OnGatewayInit, OnModuleInit { private readonly logger = new Logger(NotificationGateway.name) @WebSocketServer() server: Server constructor( private readonly jwtStrategy: JwtStrategy, private readonly apiKeyStrategy: ApiKeyStrategy, @InjectRedis() private readonly redis: Redis, ) { super() } onModuleInit() { const pubClient = this.redis.duplicate() const subClient = pubClient.duplicate() this.server.adapter(createAdapter(pubClient, subClient)) this.logger.debug('Socket.io initialized with Redis adapter') } afterInit(server: Server) { this.logger.debug('WebSocket Gateway initialized') server.use(async (socket: Socket, next) => { const token = socket.handshake.auth.token if (!token) { return next(new UnauthorizedException()) } // Try JWT authentication first try { const payload = await this.jwtStrategy.verifyToken(token) // Join the user room for user scoped notifications await socket.join(payload.sub) // Join the organization room for organization scoped notifications const organizationId = socket.handshake.query.organizationId as string | undefined if (organizationId) { await socket.join(organizationId) } return next() } catch { // JWT failed, try API key authentication } // Try API key authentication try { const authContext = await this.apiKeyStrategy.validate(token) if (isAuthContext(authContext)) { // Join the user room for user scoped notifications await socket.join(authContext.userId) // Join the organization room for organization scoped notifications if (authContext.organizationId) { await socket.join(authContext.organizationId) } return next() } return next(new UnauthorizedException()) } catch { return next(new UnauthorizedException()) } }) } emitSandboxCreated(sandbox: SandboxDto) { this.server.to(sandbox.organizationId).emit(SandboxEvents.CREATED, sandbox) } emitSandboxStateUpdated(sandbox: SandboxDto, oldState: SandboxState, newState: SandboxState) { this.server.to(sandbox.organizationId).emit(SandboxEvents.STATE_UPDATED, { sandbox, oldState, newState }) } emitSandboxDesiredStateUpdated( sandbox: SandboxDto, oldDesiredState: SandboxDesiredState, newDesiredState: SandboxDesiredState, ) { this.server .to(sandbox.organizationId) .emit(SandboxEvents.DESIRED_STATE_UPDATED, { sandbox, oldDesiredState, newDesiredState }) } emitSnapshotCreated(snapshot: SnapshotDto) { this.server.to(snapshot.organizationId).emit(SnapshotEvents.CREATED, snapshot) } emitSnapshotStateUpdated(snapshot: SnapshotDto, oldState: SnapshotState, newState: SnapshotState) { this.server .to(snapshot.organizationId) .emit(SnapshotEvents.STATE_UPDATED, { snapshot: snapshot, oldState, newState }) } emitSnapshotRemoved(snapshot: SnapshotDto) { this.server.to(snapshot.organizationId).emit(SnapshotEvents.REMOVED, snapshot) } emitVolumeCreated(volume: VolumeDto) { this.server.to(volume.organizationId).emit(VolumeEvents.CREATED, volume) } emitVolumeStateUpdated(volume: VolumeDto, oldState: VolumeState, newState: VolumeState) { this.server.to(volume.organizationId).emit(VolumeEvents.STATE_UPDATED, { volume, oldState, newState }) } emitVolumeLastUsedAtUpdated(volume: VolumeDto) { this.server.to(volume.organizationId).emit(VolumeEvents.LAST_USED_AT_UPDATED, volume) } emitRunnerCreated(runner: RunnerDto, organizationId: string | null) { if (!organizationId) { return } this.server.to(organizationId).emit(RunnerEvents.CREATED, runner) } emitRunnerStateUpdated( runner: RunnerDto, organizationId: string | null, oldState: RunnerState, newState: RunnerState, ) { if (!organizationId) { return } this.server.to(organizationId).emit(RunnerEvents.STATE_UPDATED, { runner, oldState, newState }) } emitRunnerUnschedulableUpdated(runner: RunnerDto, organizationId: string | null) { if (!organizationId) { return } this.server.to(organizationId).emit(RunnerEvents.UNSCHEDULABLE_UPDATED, runner) } } ================================================ FILE: apps/api/src/notification/notification.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { NotificationService } from './services/notification.service' import { NotificationGateway } from './gateways/notification.gateway' import { NotificationRedisEmitter } from './emitters/notification-redis.emitter' import { NotificationEmitter } from './gateways/notification-emitter.abstract' import { OrganizationModule } from '../organization/organization.module' import { SandboxModule } from '../sandbox/sandbox.module' import { RedisModule } from '@nestjs-modules/ioredis' import { AuthModule } from '../auth/auth.module' import { RegionModule } from '../region/region.module' import { isApiEnabled } from '../common/utils/app-mode' const gatewayEnabled = isApiEnabled() && process.env.NOTIFICATION_GATEWAY_DISABLED !== 'true' @Module({ imports: [OrganizationModule, SandboxModule, RedisModule, AuthModule, RegionModule], providers: [ NotificationService, ...(gatewayEnabled ? [NotificationGateway, { provide: NotificationEmitter, useExisting: NotificationGateway }] : [{ provide: NotificationEmitter, useClass: NotificationRedisEmitter }]), ], exports: [NotificationService], }) export class NotificationModule {} ================================================ FILE: apps/api/src/notification/services/notification.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable } from '@nestjs/common' import { OnEvent } from '@nestjs/event-emitter' import { NotificationEmitter } from '../gateways/notification-emitter.abstract' import { SandboxEvents } from '../../sandbox/constants/sandbox-events.constants' import { SandboxCreatedEvent } from '../../sandbox/events/sandbox-create.event' import { SandboxStateUpdatedEvent } from '../../sandbox/events/sandbox-state-updated.event' import { SnapshotCreatedEvent } from '../../sandbox/events/snapshot-created.event' import { SnapshotEvents } from '../../sandbox/constants/snapshot-events' import { SnapshotDto } from '../../sandbox/dto/snapshot.dto' import { SnapshotStateUpdatedEvent } from '../../sandbox/events/snapshot-state-updated.event' import { SnapshotRemovedEvent } from '../../sandbox/events/snapshot-removed.event' import { VolumeEvents } from '../../sandbox/constants/volume-events' import { VolumeCreatedEvent } from '../../sandbox/events/volume-created.event' import { VolumeDto } from '../../sandbox/dto/volume.dto' import { VolumeStateUpdatedEvent } from '../../sandbox/events/volume-state-updated.event' import { VolumeLastUsedAtUpdatedEvent } from '../../sandbox/events/volume-last-used-at-updated.event' import { SandboxDesiredStateUpdatedEvent } from '../../sandbox/events/sandbox-desired-state-updated.event' import { RunnerEvents } from '../../sandbox/constants/runner-events' import { RunnerDto } from '../../sandbox/dto/runner.dto' import { RunnerCreatedEvent } from '../../sandbox/events/runner-created.event' import { RunnerStateUpdatedEvent } from '../../sandbox/events/runner-state-updated.event' import { RunnerUnschedulableUpdatedEvent } from '../../sandbox/events/runner-unschedulable-updated.event' import { RegionService } from '../../region/services/region.service' import { SandboxService } from '../../sandbox/services/sandbox.service' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { SANDBOX_EVENT_CHANNEL } from '../../common/constants/constants' @Injectable() export class NotificationService { constructor( private readonly notificationEmitter: NotificationEmitter, private readonly regionService: RegionService, private readonly sandboxService: SandboxService, @InjectRedis() private readonly redis: Redis, ) {} @OnEvent(SandboxEvents.CREATED) async handleSandboxCreated(event: SandboxCreatedEvent) { const dto = await this.sandboxService.toSandboxDto(event.sandbox) this.notificationEmitter.emitSandboxCreated(dto) } @OnEvent(SandboxEvents.STATE_UPDATED) async handleSandboxStateUpdated(event: SandboxStateUpdatedEvent) { const dto = await this.sandboxService.toSandboxDto(event.sandbox) this.notificationEmitter.emitSandboxStateUpdated(dto, event.oldState, event.newState) this.redis.publish(SANDBOX_EVENT_CHANNEL, JSON.stringify(event)) } @OnEvent(SandboxEvents.DESIRED_STATE_UPDATED) async handleSandboxDesiredStateUpdated(event: SandboxDesiredStateUpdatedEvent) { const dto = await this.sandboxService.toSandboxDto(event.sandbox) this.notificationEmitter.emitSandboxDesiredStateUpdated(dto, event.oldDesiredState, event.newDesiredState) this.redis.publish(SANDBOX_EVENT_CHANNEL, JSON.stringify(event)) } @OnEvent(SnapshotEvents.CREATED) async handleSnapshotCreated(event: SnapshotCreatedEvent) { const dto = SnapshotDto.fromSnapshot(event.snapshot) this.notificationEmitter.emitSnapshotCreated(dto) } @OnEvent(SnapshotEvents.STATE_UPDATED) async handleSnapshotStateUpdated(event: SnapshotStateUpdatedEvent) { const dto = SnapshotDto.fromSnapshot(event.snapshot) this.notificationEmitter.emitSnapshotStateUpdated(dto, event.oldState, event.newState) } @OnEvent(SnapshotEvents.REMOVED) async handleSnapshotRemoved(event: SnapshotRemovedEvent) { const dto = SnapshotDto.fromSnapshot(event.snapshot) this.notificationEmitter.emitSnapshotRemoved(dto) } @OnEvent(VolumeEvents.CREATED) async handleVolumeCreated(event: VolumeCreatedEvent) { const dto = VolumeDto.fromVolume(event.volume) this.notificationEmitter.emitVolumeCreated(dto) } @OnEvent(VolumeEvents.STATE_UPDATED) async handleVolumeStateUpdated(event: VolumeStateUpdatedEvent) { const dto = VolumeDto.fromVolume(event.volume) this.notificationEmitter.emitVolumeStateUpdated(dto, event.oldState, event.newState) } @OnEvent(VolumeEvents.LAST_USED_AT_UPDATED) async handleVolumeLastUsedAtUpdated(event: VolumeLastUsedAtUpdatedEvent) { const dto = VolumeDto.fromVolume(event.volume) this.notificationEmitter.emitVolumeLastUsedAtUpdated(dto) } @OnEvent(RunnerEvents.CREATED) async handleRunnerCreated(event: RunnerCreatedEvent) { const dto = RunnerDto.fromRunner(event.runner) const organizationId = await this.regionService.getOrganizationId(event.runner.region) if (organizationId !== undefined) { this.notificationEmitter.emitRunnerCreated(dto, organizationId) } } @OnEvent(RunnerEvents.STATE_UPDATED) async handleRunnerStateUpdated(event: RunnerStateUpdatedEvent) { const dto = RunnerDto.fromRunner(event.runner) const organizationId = await this.regionService.getOrganizationId(event.runner.region) if (organizationId !== undefined) { this.notificationEmitter.emitRunnerStateUpdated(dto, organizationId, event.oldState, event.newState) } } @OnEvent(RunnerEvents.UNSCHEDULABLE_UPDATED) async handleRunnerUnschedulableUpdated(event: RunnerUnschedulableUpdatedEvent) { const dto = RunnerDto.fromRunner(event.runner) const organizationId = await this.regionService.getOrganizationId(event.runner.region) if (organizationId !== undefined) { this.notificationEmitter.emitRunnerUnschedulableUpdated(dto, organizationId) } } } ================================================ FILE: apps/api/src/object-storage/controllers/object-storage.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, UseGuards, HttpCode } from '@nestjs/common' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiHeader, ApiBearerAuth } from '@nestjs/swagger' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { ObjectStorageService } from '../services/object-storage.service' import { StorageAccessDto } from '../../sandbox/dto/storage-access-dto' import { CustomHeaders } from '../../common/constants/header.constants' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('object-storage') @Controller('object-storage') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class ObjectStorageController { constructor(private readonly objectStorageService: ObjectStorageService) {} @Get('push-access') @HttpCode(200) @ApiOperation({ summary: 'Get temporary storage access for pushing objects', operationId: 'getPushAccess', }) @ApiResponse({ status: 200, description: 'Temporary storage access has been generated', type: StorageAccessDto, }) async getPushAccess(@AuthContext() authContext: OrganizationAuthContext): Promise { return this.objectStorageService.getPushAccess(authContext.organizationId) } } ================================================ FILE: apps/api/src/object-storage/object-storage.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { ObjectStorageController } from './controllers/object-storage.controller' import { ObjectStorageService } from './services/object-storage.service' import { ConfigModule } from '@nestjs/config' import { OrganizationModule } from '../organization/organization.module' @Module({ imports: [ConfigModule, OrganizationModule], controllers: [ObjectStorageController], providers: [ObjectStorageService], exports: [ObjectStorageService], }) export class ObjectStorageModule {} ================================================ FILE: apps/api/src/object-storage/services/object-storage.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, Injectable, Logger, ServiceUnavailableException } from '@nestjs/common' import { TypedConfigService } from '../../config/typed-config.service' import { StorageAccessDto } from '../../sandbox/dto/storage-access-dto' import axios from 'axios' import * as aws4 from 'aws4' import * as xml2js from 'xml2js' import { STSClient, AssumeRoleCommand } from '@aws-sdk/client-sts' interface S3Config { endpoint: string stsEndpoint: string accessKey: string secretKey: string bucket: string region: string accountId?: string roleName?: string organizationId: string policy: any } @Injectable() export class ObjectStorageService { private readonly logger = new Logger(ObjectStorageService.name) constructor(private readonly configService: TypedConfigService) {} async getPushAccess(organizationId: string): Promise { if (!this.configService.get('s3.endpoint')) { throw new ServiceUnavailableException('Object storage is not configured') } try { const bucket = this.configService.getOrThrow('s3.defaultBucket') const s3Config: S3Config = { endpoint: this.configService.getOrThrow('s3.endpoint'), stsEndpoint: this.configService.getOrThrow('s3.stsEndpoint'), accessKey: this.configService.getOrThrow('s3.accessKey'), secretKey: this.configService.getOrThrow('s3.secretKey'), bucket, region: this.configService.getOrThrow('s3.region'), accountId: this.configService.getOrThrow('s3.accountId'), roleName: this.configService.getOrThrow('s3.roleName'), organizationId, policy: { Version: '2012-10-17', Statement: [ { Effect: 'Allow', Action: ['s3:PutObject', 's3:GetObject'], Resource: [`arn:aws:s3:::${bucket}/${organizationId}/*`], }, // ListBucket only shows object keys and some metadata, not the actual objects { Effect: 'Allow', Action: ['s3:ListBucket'], Resource: [`arn:aws:s3:::${bucket}`], }, ], }, } const isMinioServer = s3Config.endpoint.includes('minio') if (isMinioServer) { return this.getMinioCredentials(s3Config) } else { return this.getAwsCredentials(s3Config) } } catch (error) { this.logger.error('Storage push access error:', error.response?.data || error.message) throw new BadRequestException(`Failed to get temporary credentials: ${error.message}`) } } private async getMinioCredentials(config: S3Config): Promise { const body = new URLSearchParams({ Action: 'AssumeRole', Version: '2011-06-15', DurationSeconds: '3600', // 1 hour (in seconds) Policy: JSON.stringify(config.policy), }) const requestOptions = { host: new URL(config.endpoint).hostname, path: '/minio/v1/assume-role', service: 'sts', method: 'POST', body: body.toString(), headers: { 'Content-Type': 'application/x-www-form-urlencoded', }, } aws4.sign(requestOptions, { accessKeyId: config.accessKey, secretAccessKey: config.secretKey, }) const response = await axios.post(config.stsEndpoint, body.toString(), { headers: requestOptions.headers, }) const parser = new xml2js.Parser({ explicitArray: false }) const parsedData = await parser.parseStringPromise(response.data) if (!parsedData.AssumeRoleResponse.AssumeRoleResult.Credentials) { throw new BadRequestException('MinIO STS response did not return expected credentials') } const creds = parsedData.AssumeRoleResponse.AssumeRoleResult.Credentials return { accessKey: creds.AccessKeyId, secret: creds.SecretAccessKey, sessionToken: creds.SessionToken, storageUrl: config.endpoint, organizationId: config.organizationId, bucket: config.bucket, } } private async getAwsCredentials(config: S3Config): Promise { try { const stsClient = new STSClient({ region: config.region, endpoint: config.stsEndpoint, credentials: { accessKeyId: config.accessKey, secretAccessKey: config.secretKey, }, maxAttempts: 3, }) const command = new AssumeRoleCommand({ RoleArn: `arn:aws:iam::${config.accountId}:role/${config.roleName}`, RoleSessionName: `daytona-${config.organizationId}-${Date.now()}`, DurationSeconds: 3600, // One hour Policy: JSON.stringify(config.policy), }) try { const response = await stsClient.send(command) if (!response.Credentials) { throw new BadRequestException('AWS STS response did not return expected credentials') } return { accessKey: response.Credentials.AccessKeyId, secret: response.Credentials.SecretAccessKey, sessionToken: response.Credentials.SessionToken, storageUrl: config.endpoint, organizationId: config.organizationId, bucket: config.bucket, } } catch (error: any) { throw new BadRequestException(`Failed to assume role: ${error.message || 'Unknown AWS error'}`) } } catch (error: any) { this.logger.error(`AWS STS client setup error: ${error.message}`, error.stack) throw new BadRequestException(`Failed to setup AWS client: ${error.message}`) } } } ================================================ FILE: apps/api/src/openapi-webhooks.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { OpenAPIObject, getSchemaPath } from '@nestjs/swagger' import { WebhookEvent } from './webhook/constants/webhook-events.constants' import { SandboxCreatedWebhookDto, SandboxStateUpdatedWebhookDto, SnapshotCreatedWebhookDto, SnapshotStateUpdatedWebhookDto, SnapshotRemovedWebhookDto, VolumeCreatedWebhookDto, VolumeStateUpdatedWebhookDto, } from './webhook/dto/webhook-event-payloads.dto' export interface OpenAPIObjectWithWebhooks extends OpenAPIObject { webhooks?: { [key: string]: { post: { requestBody: { description: string content: { 'application/json': { schema: any } } } responses: { [statusCode: string]: { description: string } } } } } } export function addWebhookDocumentation(document: OpenAPIObject): OpenAPIObjectWithWebhooks { return { ...document, webhooks: { [WebhookEvent.SANDBOX_CREATED]: { post: { requestBody: { description: 'Sandbox created event', content: { 'application/json': { schema: { $ref: getSchemaPath(SandboxCreatedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, [WebhookEvent.SANDBOX_STATE_UPDATED]: { post: { requestBody: { description: 'Sandbox state updated event', content: { 'application/json': { schema: { $ref: getSchemaPath(SandboxStateUpdatedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, [WebhookEvent.SNAPSHOT_CREATED]: { post: { requestBody: { description: 'Snapshot created event', content: { 'application/json': { schema: { $ref: getSchemaPath(SnapshotCreatedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, [WebhookEvent.SNAPSHOT_STATE_UPDATED]: { post: { requestBody: { description: 'Snapshot state updated event', content: { 'application/json': { schema: { $ref: getSchemaPath(SnapshotStateUpdatedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, [WebhookEvent.SNAPSHOT_REMOVED]: { post: { requestBody: { description: 'Snapshot removed event', content: { 'application/json': { schema: { $ref: getSchemaPath(SnapshotRemovedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, [WebhookEvent.VOLUME_CREATED]: { post: { requestBody: { description: 'Volume created event', content: { 'application/json': { schema: { $ref: getSchemaPath(VolumeCreatedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, [WebhookEvent.VOLUME_STATE_UPDATED]: { post: { requestBody: { description: 'Volume state updated event', content: { 'application/json': { schema: { $ref: getSchemaPath(VolumeStateUpdatedWebhookDto) }, }, }, }, responses: { '200': { description: 'Webhook received successfully', }, }, }, }, }, } } ================================================ FILE: apps/api/src/openapi.config.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DocumentBuilder } from '@nestjs/swagger' const getOpenApiConfig = (oidcIssuer: string) => new DocumentBuilder() .setTitle('Daytona') .addServer('http://localhost:3000') .setDescription('Daytona AI platform API Docs') .setContact('Daytona Platforms Inc.', 'https://www.daytona.io', 'support@daytona.com') .setVersion('1.0') .addBearerAuth({ type: 'http', scheme: 'bearer', description: 'API Key access', }) .addOAuth2({ type: 'openIdConnect', flows: undefined, openIdConnectUrl: `${oidcIssuer}/.well-known/openid-configuration`, }) .build() export { getOpenApiConfig } ================================================ FILE: apps/api/src/organization/constants/global-organization-roles.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const GlobalOrganizationRolesIds = { DEVELOPER: '00000000-0000-0000-0000-000000000001', SANDBOXES_ADMIN: '00000000-0000-0000-0000-000000000002', SNAPSHOTS_ADMIN: '00000000-0000-0000-0000-000000000003', REGISTRIES_ADMIN: '00000000-0000-0000-0000-000000000004', SUPER_ADMIN: '00000000-0000-0000-0000-000000000005', VOLUMES_ADMIN: '00000000-0000-0000-0000-000000000006', AUDITOR: '00000000-0000-0000-0000-000000000007', INFRASTRUCTURE_ADMIN: '00000000-0000-0000-0000-000000000008', } as const ================================================ FILE: apps/api/src/organization/constants/organization-events.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const OrganizationEvents = { INVITATION_CREATED: 'invitation.created', INVITATION_ACCEPTED: 'invitation.accepted', INVITATION_DECLINED: 'invitation.declined', INVITATION_CANCELLED: 'invitation.cancelled', CREATED: 'organization.created', SUSPENDED_SANDBOX_STOPPED: 'organization.suspended-sandbox-stopped', SUSPENDED_SNAPSHOT_DEACTIVATED: 'organization.suspended-snapshot-deactivated', PERMISSIONS_UNASSIGNED: 'permissions.unassigned', } as const ================================================ FILE: apps/api/src/organization/constants/sandbox-states-consuming-compute.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' export const SANDBOX_STATES_CONSUMING_COMPUTE: SandboxState[] = [ SandboxState.CREATING, SandboxState.RESTORING, SandboxState.STARTED, SandboxState.STARTING, SandboxState.STOPPING, SandboxState.PENDING_BUILD, SandboxState.BUILDING_SNAPSHOT, SandboxState.UNKNOWN, SandboxState.PULLING_SNAPSHOT, ] ================================================ FILE: apps/api/src/organization/constants/sandbox-states-consuming-disk.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { SANDBOX_STATES_CONSUMING_COMPUTE } from './sandbox-states-consuming-compute.constant' export const SANDBOX_STATES_CONSUMING_DISK: SandboxState[] = [ ...SANDBOX_STATES_CONSUMING_COMPUTE, SandboxState.STOPPED, SandboxState.ARCHIVING, SandboxState.RESIZING, ] ================================================ FILE: apps/api/src/organization/constants/snapshot-states-consuming-resources.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SnapshotState } from '../../sandbox/enums/snapshot-state.enum' export const SNAPSHOT_STATES_CONSUMING_RESOURCES: SnapshotState[] = [ SnapshotState.BUILDING, SnapshotState.PENDING, SnapshotState.PULLING, SnapshotState.ACTIVE, ] ================================================ FILE: apps/api/src/organization/constants/volume-states-consuming-resources.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { VolumeState } from '../../sandbox/enums/volume-state.enum' export const VOLUME_STATES_CONSUMING_RESOURCES: VolumeState[] = [ VolumeState.CREATING, VolumeState.READY, VolumeState.PENDING_CREATE, VolumeState.PENDING_DELETE, VolumeState.DELETING, ] ================================================ FILE: apps/api/src/organization/controllers/organization-invitation.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Get, Param, Post, Put, UseGuards } from '@nestjs/common' import { AuthGuard } from '@nestjs/passport' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiParam, ApiBearerAuth } from '@nestjs/swagger' import { RequiredOrganizationMemberRole } from '../decorators/required-organization-member-role.decorator' import { CreateOrganizationInvitationDto } from '../dto/create-organization-invitation.dto' import { UpdateOrganizationInvitationDto } from '../dto/update-organization-invitation.dto' import { OrganizationInvitationDto } from '../dto/organization-invitation.dto' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationActionGuard } from '../guards/organization-action.guard' import { OrganizationInvitationService } from '../services/organization-invitation.service' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { AuthContext as IAuthContext } from '../../common/interfaces/auth-context.interface' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('organizations') @Controller('organizations/:organizationId/invitations') @UseGuards(AuthGuard('jwt'), AuthenticatedRateLimitGuard, OrganizationActionGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class OrganizationInvitationController { constructor(private readonly organizationInvitationService: OrganizationInvitationService) {} @Post() @ApiOperation({ summary: 'Create organization invitation', operationId: 'createOrganizationInvitation', }) @ApiResponse({ status: 201, description: 'Organization invitation created successfully', type: OrganizationInvitationDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.ORGANIZATION_INVITATION, targetIdFromResult: (result: OrganizationInvitationDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ email: req.body?.email, role: req.body?.role, assignedRoleIds: req.body?.assignedRoleIds, expiresAt: req.body?.expiresAt, }), }, }) async create( @AuthContext() authContext: IAuthContext, @Param('organizationId') organizationId: string, @Body() createOrganizationInvitationDto: CreateOrganizationInvitationDto, ): Promise { const invitation = await this.organizationInvitationService.create( organizationId, createOrganizationInvitationDto, authContext.email, ) return OrganizationInvitationDto.fromOrganizationInvitation(invitation) } @Put('/:invitationId') @ApiOperation({ summary: 'Update organization invitation', operationId: 'updateOrganizationInvitation', }) @ApiResponse({ status: 200, description: 'Organization invitation updated successfully', type: OrganizationInvitationDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'invitationId', description: 'Invitation ID', type: 'string', }) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.UPDATE, targetType: AuditTarget.ORGANIZATION_INVITATION, targetIdFromRequest: (req) => req.params.invitationId, requestMetadata: { body: (req: TypedRequest) => ({ role: req.body?.role, assignedRoleIds: req.body?.assignedRoleIds, expiresAt: req.body?.expiresAt, }), }, }) async update( @Param('organizationId') organizationId: string, @Param('invitationId') invitationId: string, @Body() updateOrganizationInvitationDto: UpdateOrganizationInvitationDto, ): Promise { const invitation = await this.organizationInvitationService.update(invitationId, updateOrganizationInvitationDto) return OrganizationInvitationDto.fromOrganizationInvitation(invitation) } @Get() @ApiOperation({ summary: 'List pending organization invitations', operationId: 'listOrganizationInvitations', }) @ApiResponse({ status: 200, description: 'List of pending organization invitations', type: [OrganizationInvitationDto], }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) async findPending(@Param('organizationId') organizationId: string): Promise { const invitations = await this.organizationInvitationService.findPending(organizationId) return invitations.map(OrganizationInvitationDto.fromOrganizationInvitation) } @Post('/:invitationId/cancel') @ApiOperation({ summary: 'Cancel organization invitation', operationId: 'cancelOrganizationInvitation', }) @ApiResponse({ status: 204, description: 'Organization invitation cancelled successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'invitationId', description: 'Invitation ID', type: 'string', }) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.ORGANIZATION_INVITATION, targetIdFromRequest: (req) => req.params.invitationId, }) async cancel( @Param('organizationId') organizationId: string, @Param('invitationId') invitationId: string, ): Promise { return this.organizationInvitationService.cancel(invitationId) } } ================================================ FILE: apps/api/src/organization/controllers/organization-region.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Logger, UseGuards, HttpCode, Post, UseInterceptors, Body, Param, NotFoundException, Delete, Patch, } from '@nestjs/common' import { ApiOAuth2, ApiResponse, ApiOperation, ApiTags, ApiBearerAuth, ApiParam, ApiHeader } from '@nestjs/swagger' import { RequiredOrganizationResourcePermissions } from '../decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../guards/organization-resource-action.guard' import { OrganizationService } from '../services/organization.service' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { ContentTypeInterceptor } from '../../common/interceptors/content-type.interceptors' import { CreateRegionDto, CreateRegionResponseDto } from '../../region/dto/create-region.dto' import { RegionDto } from '../../region/dto/region.dto' import { RegionService } from '../../region/services/region.service' import { RegionAccessGuard } from '../../region/guards/region-access.guard' import { RegenerateApiKeyResponseDto } from '../../region/dto/regenerate-api-key.dto' import { RegionType } from '../../region/enums/region-type.enum' import { RequireFlagsEnabled } from '@openfeature/nestjs-sdk' import { FeatureFlags } from '../../common/constants/feature-flags' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SnapshotManagerCredentialsDto } from '../../region/dto/snapshot-manager-credentials.dto' import { UpdateRegionDto } from '../../region/dto/update-region.dto' @ApiTags('organizations') @Controller('regions') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class OrganizationRegionController { private readonly logger = new Logger(OrganizationRegionController.name) constructor( private readonly regionService: RegionService, private readonly organizationService: OrganizationService, ) {} @Get() @HttpCode(200) @ApiOperation({ summary: 'List all available regions for the organization', operationId: 'listAvailableRegions', }) @ApiResponse({ status: 200, description: 'List of all available regions', type: [RegionDto], }) async listAvailableRegions(@AuthContext() authContext: OrganizationAuthContext): Promise { return this.organizationService.listAvailableRegions(authContext.organizationId) } @Post() @HttpCode(201) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Create a new region', operationId: 'createRegion', }) @ApiResponse({ status: 201, description: 'The region has been successfully created.', type: CreateRegionResponseDto, }) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.REGION, targetIdFromResult: (result: RegionDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, }), }, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGIONS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async createRegion( @AuthContext() authContext: OrganizationAuthContext, @Body() createRegionDto: CreateRegionDto, ): Promise { return await this.regionService.create( { ...createRegionDto, enforceQuotas: false, regionType: RegionType.CUSTOM, }, authContext.organizationId, ) } @Get(':id') @HttpCode(200) @ApiOperation({ summary: 'Get region by ID', operationId: 'getRegionById', }) @ApiResponse({ status: 200, type: RegionDto, }) @ApiParam({ name: 'id', description: 'Region ID', type: String, }) @UseGuards(RegionAccessGuard) async getRegionById(@Param('id') id: string): Promise { const region = await this.regionService.findOne(id) if (!region) { throw new NotFoundException('Region not found') } return RegionDto.fromRegion(region) } @Delete(':id') @HttpCode(204) @ApiOperation({ summary: 'Delete a region', operationId: 'deleteRegion', }) @ApiResponse({ status: 204, description: 'The region has been successfully deleted.', }) @ApiParam({ name: 'id', description: 'Region ID', }) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.REGION, targetIdFromRequest: (req) => req.params.id, }) @UseGuards(RegionAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_REGIONS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async deleteRegion(@Param('id') id: string): Promise { await this.regionService.delete(id) } @Post(':id/regenerate-proxy-api-key') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Regenerate proxy API key for a region', operationId: 'regenerateProxyApiKey', }) @ApiResponse({ status: 200, description: 'The proxy API key has been successfully regenerated.', type: RegenerateApiKeyResponseDto, }) @ApiParam({ name: 'id', description: 'Region ID', type: String, }) @Audit({ action: AuditAction.REGENERATE_PROXY_API_KEY, targetType: AuditTarget.REGION, targetIdFromRequest: (req) => req.params.id, }) @UseGuards(RegionAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGIONS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async regenerateProxyApiKey(@Param('id') id: string): Promise { const apiKey = await this.regionService.regenerateProxyApiKey(id) return new RegenerateApiKeyResponseDto(apiKey) } @Patch(':id') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Update region configuration', operationId: 'updateRegion', }) @ApiParam({ name: 'id', description: 'Region ID', type: String, }) @Audit({ action: AuditAction.UPDATE, targetType: AuditTarget.REGION, targetIdFromRequest: (req) => req.params.id, requestMetadata: { body: (req: TypedRequest) => ({ ...req.body, }), }, }) @UseGuards(RegionAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGIONS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async updateRegion(@Param('id') id: string, @Body() updateRegionDto: UpdateRegionDto): Promise { return await this.regionService.update(id, updateRegionDto) } @Post(':id/regenerate-ssh-gateway-api-key') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Regenerate SSH gateway API key for a region', operationId: 'regenerateSshGatewayApiKey', }) @ApiResponse({ status: 200, description: 'The SSH gateway API key has been successfully regenerated.', type: RegenerateApiKeyResponseDto, }) @ApiParam({ name: 'id', description: 'Region ID', type: String, }) @Audit({ action: AuditAction.REGENERATE_SSH_GATEWAY_API_KEY, targetType: AuditTarget.REGION, targetIdFromRequest: (req) => req.params.id, }) @UseGuards(RegionAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGIONS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async regenerateSshGatewayApiKey(@Param('id') id: string): Promise { const apiKey = await this.regionService.regenerateSshGatewayApiKey(id) return new RegenerateApiKeyResponseDto(apiKey) } @Post(':id/regenerate-snapshot-manager-credentials') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Regenerate snapshot manager credentials for a region', operationId: 'regenerateSnapshotManagerCredentials', }) @ApiResponse({ status: 200, description: 'The snapshot manager credentials have been successfully regenerated.', type: SnapshotManagerCredentialsDto, }) @ApiParam({ name: 'id', description: 'Region ID', type: String, }) @Audit({ action: AuditAction.REGENERATE_SNAPSHOT_MANAGER_CREDENTIALS, targetType: AuditTarget.REGION, targetIdFromRequest: (req) => req.params.id, }) @UseGuards(RegionAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_REGIONS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async regenerateSnapshotManagerCredentials(@Param('id') id: string): Promise { return await this.regionService.regenerateSnapshotManagerCredentials(id) } } ================================================ FILE: apps/api/src/organization/controllers/organization-role.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Delete, Get, Param, Post, Put, UseGuards } from '@nestjs/common' import { AuthGuard } from '@nestjs/passport' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiParam, ApiBearerAuth } from '@nestjs/swagger' import { RequiredOrganizationMemberRole } from '../decorators/required-organization-member-role.decorator' import { CreateOrganizationRoleDto } from '../dto/create-organization-role.dto' import { UpdateOrganizationRoleDto } from '../dto/update-organization-role.dto' import { OrganizationRoleDto } from '../dto/organization-role.dto' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationActionGuard } from '../guards/organization-action.guard' import { OrganizationRoleService } from '../services/organization-role.service' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('organizations') @Controller('organizations/:organizationId/roles') @UseGuards(AuthGuard('jwt'), AuthenticatedRateLimitGuard, OrganizationActionGuard) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class OrganizationRoleController { constructor(private readonly organizationRoleService: OrganizationRoleService) {} @Post() @ApiOperation({ summary: 'Create organization role', operationId: 'createOrganizationRole', }) @ApiResponse({ status: 201, description: 'Organization role created successfully', type: OrganizationRoleDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.ORGANIZATION_ROLE, targetIdFromResult: (result: OrganizationRoleDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, description: req.body?.description, permissions: req.body?.permissions, }), }, }) async create( @Param('organizationId') organizationId: string, @Body() createOrganizationRoleDto: CreateOrganizationRoleDto, ): Promise { const role = await this.organizationRoleService.create(organizationId, createOrganizationRoleDto) return OrganizationRoleDto.fromOrganizationRole(role) } @Get() @ApiOperation({ summary: 'List organization roles', operationId: 'listOrganizationRoles', }) @ApiResponse({ status: 200, description: 'List of organization roles', type: [OrganizationRoleDto], }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) async findAll(@Param('organizationId') organizationId: string): Promise { const roles = await this.organizationRoleService.findAll(organizationId) return roles.map(OrganizationRoleDto.fromOrganizationRole) } @Put('/:roleId') @ApiOperation({ summary: 'Update organization role', operationId: 'updateOrganizationRole', }) @ApiResponse({ status: 200, description: 'Role updated successfully', type: OrganizationRoleDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'roleId', description: 'Role ID', type: 'string', }) @Audit({ action: AuditAction.UPDATE, targetType: AuditTarget.ORGANIZATION_ROLE, targetIdFromRequest: (req) => req.params.roleId, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, description: req.body?.description, permissions: req.body?.permissions, }), }, }) async updateRole( @Param('organizationId') organizationId: string, @Param('roleId') roleId: string, @Body() updateOrganizationRoleDto: UpdateOrganizationRoleDto, ): Promise { const updatedRole = await this.organizationRoleService.update(roleId, updateOrganizationRoleDto) return OrganizationRoleDto.fromOrganizationRole(updatedRole) } @Delete('/:roleId') @ApiOperation({ summary: 'Delete organization role', operationId: 'deleteOrganizationRole', }) @ApiResponse({ status: 204, description: 'Organization role deleted successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'roleId', description: 'Role ID', type: 'string', }) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.ORGANIZATION_ROLE, targetIdFromRequest: (req) => req.params.roleId, }) async delete(@Param('organizationId') organizationId: string, @Param('roleId') roleId: string): Promise { return this.organizationRoleService.delete(roleId) } } ================================================ FILE: apps/api/src/organization/controllers/organization-user.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Delete, ForbiddenException, Get, Param, Post, UseGuards } from '@nestjs/common' import { AuthGuard } from '@nestjs/passport' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiParam, ApiBearerAuth } from '@nestjs/swagger' import { RequiredOrganizationMemberRole } from '../decorators/required-organization-member-role.decorator' import { UpdateOrganizationMemberAccessDto } from '../dto/update-organization-member-access.dto' import { OrganizationUserDto } from '../dto/organization-user.dto' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationActionGuard } from '../guards/organization-action.guard' import { OrganizationUserService } from '../services/organization-user.service' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { AuthContext as IAuthContext } from '../../common/interfaces/auth-context.interface' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('organizations') @Controller('organizations/:organizationId/users') @UseGuards(AuthGuard('jwt'), AuthenticatedRateLimitGuard, OrganizationActionGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class OrganizationUserController { constructor(private readonly organizationUserService: OrganizationUserService) {} @Get() @ApiOperation({ summary: 'List organization members', operationId: 'listOrganizationMembers', }) @ApiResponse({ status: 200, description: 'List of organization members', type: [OrganizationUserDto], }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) async findAll(@Param('organizationId') organizationId: string): Promise { return this.organizationUserService.findAll(organizationId) } @Post('/:userId/access') @ApiOperation({ summary: 'Update access for organization member', operationId: 'updateAccessForOrganizationMember', }) @ApiResponse({ status: 200, description: 'Access updated successfully', type: OrganizationUserDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'userId', description: 'User ID', type: 'string', }) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.UPDATE_ACCESS, targetType: AuditTarget.ORGANIZATION_USER, targetIdFromRequest: (req) => req.params.userId, requestMetadata: { body: (req: TypedRequest) => ({ role: req.body?.role, assignedRoleIds: req.body?.assignedRoleIds, }), }, }) async updateAccess( @AuthContext() authContext: IAuthContext, @Param('organizationId') organizationId: string, @Param('userId') userId: string, @Body() dto: UpdateOrganizationMemberAccessDto, ): Promise { if (authContext.userId === userId) { throw new ForbiddenException('You cannot update your own access') } return this.organizationUserService.updateAccess(organizationId, userId, dto.role, dto.assignedRoleIds) } @Delete('/:userId') @ApiOperation({ summary: 'Delete organization member', operationId: 'deleteOrganizationMember', }) @ApiResponse({ status: 204, description: 'User removed from organization successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'userId', description: 'User ID', type: 'string', }) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.ORGANIZATION_USER, targetIdFromRequest: (req) => req.params.userId, }) async delete(@Param('organizationId') organizationId: string, @Param('userId') userId: string): Promise { return this.organizationUserService.delete(organizationId, userId) } } ================================================ FILE: apps/api/src/organization/controllers/organization.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Delete, ForbiddenException, Get, HttpCode, NotFoundException, Param, Patch, Post, Put, UseGuards, } from '@nestjs/common' import { AuthGuard } from '@nestjs/passport' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiParam, ApiBody, ApiBearerAuth } from '@nestjs/swagger' import { RequiredOrganizationMemberRole } from '../decorators/required-organization-member-role.decorator' import { CreateOrganizationDto } from '../dto/create-organization.dto' import { OrganizationDto } from '../dto/organization.dto' import { OrganizationInvitationDto } from '../dto/organization-invitation.dto' import { OrganizationUsageOverviewDto } from '../dto/organization-usage-overview.dto' import { UpdateOrganizationQuotaDto } from '../dto/update-organization-quota.dto' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationActionGuard } from '../guards/organization-action.guard' import { OrganizationService } from '../services/organization.service' import { OrganizationUserService } from '../services/organization-user.service' import { OrganizationInvitationService } from '../services/organization-invitation.service' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { AuthContext as IAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredApiRole, RequiredSystemRole } from '../../common/decorators/required-role.decorator' import { SystemRole } from '../../user/enums/system-role.enum' import { OrganizationSuspensionDto } from '../dto/organization-suspension.dto' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { UserService } from '../../user/user.service' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { EmailUtils } from '../../common/utils/email.util' import { OrganizationUsageService } from '../services/organization-usage.service' import { OrganizationSandboxDefaultLimitedNetworkEgressDto } from '../dto/organization-sandbox-default-limited-network-egress.dto' import { TypedConfigService } from '../../config/typed-config.service' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' import { UpdateOrganizationRegionQuotaDto } from '../dto/update-organization-region-quota.dto' import { UpdateOrganizationDefaultRegionDto } from '../dto/update-organization-default-region.dto' import { RegionQuotaDto } from '../dto/region-quota.dto' import { RequireFlagsEnabled } from '@openfeature/nestjs-sdk' import { OrGuard } from '../../auth/or.guard' import { OtelCollectorGuard } from '../../auth/otel-collector.guard' import { OtelConfigDto } from '../dto/otel-config.dto' @ApiTags('organizations') @Controller('organizations') // TODO: Rethink this. Can we allow access to these methods with API keys as well? // @UseGuards(AuthGuard('jwt')) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class OrganizationController { constructor( private readonly organizationService: OrganizationService, private readonly organizationUserService: OrganizationUserService, private readonly organizationInvitationService: OrganizationInvitationService, private readonly organizationUsageService: OrganizationUsageService, private readonly userService: UserService, private readonly configService: TypedConfigService, ) {} @Get('/invitations') @ApiOperation({ summary: 'List organization invitations for authenticated user', operationId: 'listOrganizationInvitationsForAuthenticatedUser', }) @ApiResponse({ status: 200, description: 'List of organization invitations', type: [OrganizationInvitationDto], }) @UseGuards(AuthGuard('jwt')) async findInvitationsByUser(@AuthContext() authContext: IAuthContext): Promise { const invitations = await this.organizationInvitationService.findByUser(authContext.userId) return invitations.map(OrganizationInvitationDto.fromOrganizationInvitation) } @Get('/invitations/count') @ApiOperation({ summary: 'Get count of organization invitations for authenticated user', operationId: 'getOrganizationInvitationsCountForAuthenticatedUser', }) @ApiResponse({ status: 200, description: 'Count of organization invitations', type: Number, }) @UseGuards(AuthGuard('jwt')) async getInvitationsCountByUser(@AuthContext() authContext: IAuthContext): Promise { return this.organizationInvitationService.getCountByUser(authContext.userId) } @Post('/invitations/:invitationId/accept') @ApiOperation({ summary: 'Accept organization invitation', operationId: 'acceptOrganizationInvitation', }) @ApiResponse({ status: 200, description: 'Organization invitation accepted successfully', type: OrganizationInvitationDto, }) @ApiParam({ name: 'invitationId', description: 'Invitation ID', type: 'string', }) @UseGuards(AuthGuard('jwt')) @Audit({ action: AuditAction.ACCEPT, targetType: AuditTarget.ORGANIZATION_INVITATION, targetIdFromRequest: (req) => req.params.invitationId, }) async acceptInvitation( @AuthContext() authContext: IAuthContext, @Param('invitationId') invitationId: string, ): Promise { try { const invitation = await this.organizationInvitationService.findOneOrFail(invitationId) if (!EmailUtils.areEqual(invitation.email, authContext.email)) { throw new ForbiddenException('User email does not match invitation email') } } catch (error) { throw new NotFoundException(`Organization invitation with ID ${invitationId} not found`) } const acceptedInvitation = await this.organizationInvitationService.accept(invitationId, authContext.userId) return OrganizationInvitationDto.fromOrganizationInvitation(acceptedInvitation) } @Post('/invitations/:invitationId/decline') @ApiOperation({ summary: 'Decline organization invitation', operationId: 'declineOrganizationInvitation', }) @ApiResponse({ status: 200, description: 'Organization invitation declined successfully', }) @ApiParam({ name: 'invitationId', description: 'Invitation ID', type: 'string', }) @UseGuards(AuthGuard('jwt')) @Audit({ action: AuditAction.DECLINE, targetType: AuditTarget.ORGANIZATION_INVITATION, targetIdFromRequest: (req) => req.params.invitationId, }) async declineInvitation( @AuthContext() authContext: IAuthContext, @Param('invitationId') invitationId: string, ): Promise { try { const invitation = await this.organizationInvitationService.findOneOrFail(invitationId) if (!EmailUtils.areEqual(invitation.email, authContext.email)) { throw new ForbiddenException('User email does not match invitation email') } } catch (error) { throw new NotFoundException(`Organization invitation with ID ${invitationId} not found`) } return this.organizationInvitationService.decline(invitationId) } @Post() @ApiOperation({ summary: 'Create organization', operationId: 'createOrganization', }) @ApiResponse({ status: 201, description: 'Organization created successfully', type: OrganizationDto, }) @UseGuards(AuthGuard('jwt')) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.ORGANIZATION, targetIdFromResult: (result: OrganizationDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, defaultRegionId: req.body?.defaultRegionId, }), }, }) async create( @AuthContext() authContext: IAuthContext, @Body() createOrganizationDto: CreateOrganizationDto, ): Promise { const user = await this.userService.findOne(authContext.userId) if (!user.emailVerified && !this.configService.get('skipUserEmailVerification')) { throw new ForbiddenException('Please verify your email address') } const organization = await this.organizationService.create(createOrganizationDto, authContext.userId, false, true) return OrganizationDto.fromOrganization(organization) } @Patch('/:organizationId/default-region') @HttpCode(204) @ApiOperation({ summary: 'Set default region for organization', operationId: 'setOrganizationDefaultRegion', }) @ApiResponse({ status: 204, description: 'Default region set successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiBody({ type: UpdateOrganizationDefaultRegionDto, required: true, }) @UseGuards(AuthGuard('jwt'), AuthenticatedRateLimitGuard, OrganizationActionGuard) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.UPDATE, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, requestMetadata: { body: (req: TypedRequest) => ({ defaultRegionId: req.body?.defaultRegionId, }), }, }) async setDefaultRegion( @Param('organizationId') organizationId: string, @Body() updateDto: UpdateOrganizationDefaultRegionDto, ): Promise { await this.organizationService.setDefaultRegion(organizationId, updateDto.defaultRegionId) } @Get() @ApiOperation({ summary: 'List organizations', operationId: 'listOrganizations', }) @ApiResponse({ status: 200, description: 'List of organizations', type: [OrganizationDto], }) @UseGuards(AuthGuard('jwt')) async findAll(@AuthContext() authContext: IAuthContext): Promise { const organizations = await this.organizationService.findByUser(authContext.userId) return organizations.map(OrganizationDto.fromOrganization) } @Get('/:organizationId') @ApiOperation({ summary: 'Get organization by ID', operationId: 'getOrganization', }) @ApiResponse({ status: 200, description: 'Organization details', type: OrganizationDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @UseGuards(AuthGuard('jwt'), OrganizationActionGuard) async findOne(@Param('organizationId') organizationId: string): Promise { const organization = await this.organizationService.findOne(organizationId) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } return OrganizationDto.fromOrganization(organization) } @Delete('/:organizationId') @ApiOperation({ summary: 'Delete organization', operationId: 'deleteOrganization', }) @ApiResponse({ status: 204, description: 'Organization deleted successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @UseGuards(AuthGuard('jwt'), OrganizationActionGuard) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, }) async delete(@Param('organizationId') organizationId: string): Promise { return this.organizationService.delete(organizationId) } @Get('/:organizationId/usage') @ApiOperation({ summary: 'Get organization current usage overview', operationId: 'getOrganizationUsageOverview', }) @ApiResponse({ status: 200, description: 'Current usage overview', type: OrganizationUsageOverviewDto, }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @UseGuards(AuthGuard('jwt'), OrganizationActionGuard) async getUsageOverview(@Param('organizationId') organizationId: string): Promise { return this.organizationUsageService.getUsageOverview(organizationId) } @Patch('/:organizationId/quota') @HttpCode(204) @ApiOperation({ summary: 'Update organization quota', operationId: 'updateOrganizationQuota', }) @ApiResponse({ status: 204, description: 'Organization quota updated successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @RequiredSystemRole(SystemRole.ADMIN) @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) @Audit({ action: AuditAction.UPDATE_QUOTA, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, requestMetadata: { body: (req: TypedRequest) => ({ maxCpuPerSandbox: req.body?.maxCpuPerSandbox, maxMemoryPerSandbox: req.body?.maxMemoryPerSandbox, maxDiskPerSandbox: req.body?.maxDiskPerSandbox, snapshotQuota: req.body?.snapshotQuota, maxSnapshotSize: req.body?.maxSnapshotSize, volumeQuota: req.body?.volumeQuota, }), }, }) async updateOrganizationQuota( @Param('organizationId') organizationId: string, @Body() updateDto: UpdateOrganizationQuotaDto, ): Promise { await this.organizationService.updateQuota(organizationId, updateDto) } @Patch('/:organizationId/quota/:regionId') @HttpCode(204) @ApiOperation({ summary: 'Update organization region quota', operationId: 'updateOrganizationRegionQuota', }) @ApiResponse({ status: 204, description: 'Region quota updated successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiParam({ name: 'regionId', description: 'ID of the region where the updated quota will be applied', type: 'string', }) @RequiredSystemRole(SystemRole.ADMIN) @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) @Audit({ action: AuditAction.UPDATE_REGION_QUOTA, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, requestMetadata: { params: (req) => ({ regionId: req.params.regionId, }), body: (req: TypedRequest) => ({ totalCpuQuota: req.body?.totalCpuQuota, totalMemoryQuota: req.body?.totalMemoryQuota, totalDiskQuota: req.body?.totalDiskQuota, }), }, }) async updateOrganizationRegionQuota( @Param('organizationId') organizationId: string, @Param('regionId') regionId: string, @Body() updateDto: UpdateOrganizationRegionQuotaDto, ): Promise { await this.organizationService.updateRegionQuota(organizationId, regionId, updateDto) } @Post('/:organizationId/leave') @ApiOperation({ summary: 'Leave organization', operationId: 'leaveOrganization', }) @ApiResponse({ status: 204, description: 'Organization left successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @UseGuards(AuthGuard('jwt'), OrganizationActionGuard) @Audit({ action: AuditAction.LEAVE_ORGANIZATION, }) async leave( @AuthContext() authContext: IAuthContext, @Param('organizationId') organizationId: string, ): Promise { return this.organizationUserService.delete(organizationId, authContext.userId) } @Post('/:organizationId/suspend') @ApiOperation({ summary: 'Suspend organization', operationId: 'suspendOrganization', }) @ApiResponse({ status: 204, description: 'Organization suspended successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiBody({ type: OrganizationSuspensionDto, required: false, }) @RequiredSystemRole(SystemRole.ADMIN) @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) @Audit({ action: AuditAction.SUSPEND, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, requestMetadata: { body: (req: TypedRequest) => ({ reason: req.body?.reason, until: req.body?.until, }), }, }) async suspend( @Param('organizationId') organizationId: string, @Body() organizationSuspensionDto?: OrganizationSuspensionDto, ): Promise { return this.organizationService.suspend( organizationId, organizationSuspensionDto?.reason, organizationSuspensionDto?.until, organizationSuspensionDto?.suspensionCleanupGracePeriodHours, ) } @Post('/:organizationId/unsuspend') @ApiOperation({ summary: 'Unsuspend organization', operationId: 'unsuspendOrganization', }) @ApiResponse({ status: 204, description: 'Organization unsuspended successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @RequiredSystemRole(SystemRole.ADMIN) @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) @Audit({ action: AuditAction.UNSUSPEND, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, }) async unsuspend(@Param('organizationId') organizationId: string): Promise { return this.organizationService.unsuspend(organizationId) } @Get('/by-sandbox-id/:sandboxId') @ApiOperation({ summary: 'Get organization by sandbox ID', operationId: 'getOrganizationBySandboxId', }) @ApiResponse({ status: 200, description: 'Organization', type: OrganizationDto, }) @ApiParam({ name: 'sandboxId', description: 'Sandbox ID', type: 'string', }) @RequiredApiRole([SystemRole.ADMIN, 'proxy']) @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) async getBySandboxId(@Param('sandboxId') sandboxId: string): Promise { const organization = await this.organizationService.findBySandboxId(sandboxId) if (!organization) { throw new NotFoundException(`Organization with sandbox ID ${sandboxId} not found`) } return OrganizationDto.fromOrganization(organization) } @Get('/region-quota/by-sandbox-id/:sandboxId') @ApiOperation({ summary: 'Get region quota by sandbox ID', operationId: 'getRegionQuotaBySandboxId', }) @ApiResponse({ status: 200, description: 'Region quota', type: RegionQuotaDto, }) @ApiParam({ name: 'sandboxId', description: 'Sandbox ID', type: 'string', }) @RequiredApiRole([SystemRole.ADMIN, 'proxy']) @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) async getRegionQuotaBySandboxId(@Param('sandboxId') sandboxId: string): Promise { const regionQuota = await this.organizationService.getRegionQuotaBySandboxId(sandboxId) if (!regionQuota) { throw new NotFoundException(`Region quota for sandbox with ID ${sandboxId} not found`) } return regionQuota } @Get('/otel-config/by-sandbox-auth-token/:authToken') @ApiOperation({ summary: 'Get organization OTEL config by sandbox auth token', operationId: 'getOrganizationOtelConfigBySandboxAuthToken', }) @ApiResponse({ status: 200, description: 'OTEL Config', type: OtelConfigDto, }) @ApiParam({ name: 'authToken', description: 'Sandbox Auth Token', type: 'string', }) @RequiredApiRole([SystemRole.ADMIN, 'otel-collector']) @UseGuards(CombinedAuthGuard, OrGuard([SystemActionGuard, OtelCollectorGuard])) async getOtelConfigBySandboxAuthToken(@Param('authToken') authToken: string): Promise { const otelConfigDto = await this.organizationService.getOtelConfigBySandboxAuthToken(authToken) if (!otelConfigDto) { throw new NotFoundException(`Organization OTEL config with sandbox auth token ${authToken} not found`) } return otelConfigDto } @Post('/:organizationId/sandbox-default-limited-network-egress') @ApiOperation({ summary: 'Update sandbox default limited network egress', operationId: 'updateSandboxDefaultLimitedNetworkEgress', }) @ApiResponse({ status: 204, description: 'Sandbox default limited network egress updated successfully', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @RequiredSystemRole(SystemRole.ADMIN) @UseGuards(CombinedAuthGuard, SystemActionGuard) @Audit({ action: AuditAction.UPDATE_SANDBOX_DEFAULT_LIMITED_NETWORK_EGRESS, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, requestMetadata: { body: (req: TypedRequest) => ({ sandboxDefaultLimitedNetworkEgress: req.body?.sandboxDefaultLimitedNetworkEgress, }), }, }) async updateSandboxDefaultLimitedNetworkEgress( @Param('organizationId') organizationId: string, @Body() body: OrganizationSandboxDefaultLimitedNetworkEgressDto, ): Promise { return this.organizationService.updateSandboxDefaultLimitedNetworkEgress( organizationId, body.sandboxDefaultLimitedNetworkEgress, ) } @Put('/:organizationId/experimental-config') @ApiOperation({ summary: 'Update experimental configuration', operationId: 'updateExperimentalConfig', }) @ApiParam({ name: 'organizationId', description: 'Organization ID', type: 'string', }) @ApiBody({ description: 'Experimental configuration as a JSON object. Set to null to clear the configuration.', required: false, schema: { additionalProperties: true, example: { otel: { endpoint: 'http://otel-collector:4317', headers: { 'api-key': 'XXX', }, }, }, }, }) @RequiredOrganizationMemberRole(OrganizationMemberRole.OWNER) @UseGuards(AuthGuard('jwt'), OrganizationActionGuard) @RequireFlagsEnabled({ flags: [{ flagKey: 'organization_experiments', defaultValue: true }] }) async updateExperimentalConfig( @Param('organizationId') organizationId: string, @Body() experimentalConfig: Record | null, ): Promise { await this.organizationService.updateExperimentalConfig(organizationId, experimentalConfig) } } ================================================ FILE: apps/api/src/organization/decorators/required-organization-member-role.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Reflector } from '@nestjs/core' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' export const RequiredOrganizationMemberRole = Reflector.createDecorator() ================================================ FILE: apps/api/src/organization/decorators/required-organization-resource-permissions.decorator.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Reflector } from '@nestjs/core' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' export const RequiredOrganizationResourcePermissions = Reflector.createDecorator() ================================================ FILE: apps/api/src/organization/dto/create-organization-invitation.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { Type } from 'class-transformer' import { IsArray, IsDate, IsEmail, IsEnum, IsOptional, IsString } from 'class-validator' import { GlobalOrganizationRolesIds } from '../constants/global-organization-roles.constant' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' @ApiSchema({ name: 'CreateOrganizationInvitation' }) export class CreateOrganizationInvitationDto { @ApiProperty({ description: 'Email address of the invitee', example: 'mail@example.com', required: true, }) @IsString() @IsEmail() email: string @ApiProperty({ description: 'Organization member role for the invitee', enum: OrganizationMemberRole, default: OrganizationMemberRole.MEMBER, }) @IsEnum(OrganizationMemberRole) role: OrganizationMemberRole @ApiProperty({ description: 'Array of assigned role IDs for the invitee', type: [String], default: [GlobalOrganizationRolesIds.DEVELOPER], }) @IsArray() @IsString({ each: true }) assignedRoleIds: string[] @ApiPropertyOptional({ description: 'Expiration date of the invitation', example: '2021-12-31T23:59:59Z', }) @IsOptional() @Type(() => Date) @IsDate() expiresAt?: Date } ================================================ FILE: apps/api/src/organization/dto/create-organization-quota.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsNumber, IsOptional } from 'class-validator' @ApiSchema({ name: 'CreateOrganizationQuota' }) export class CreateOrganizationQuotaDto { @ApiPropertyOptional() @IsNumber() @IsOptional() totalCpuQuota?: number @ApiPropertyOptional() @IsNumber() @IsOptional() totalMemoryQuota?: number @ApiPropertyOptional() @IsNumber() @IsOptional() totalDiskQuota?: number @ApiPropertyOptional() @IsNumber() @IsOptional() maxCpuPerSandbox?: number @ApiPropertyOptional() @IsNumber() @IsOptional() maxMemoryPerSandbox?: number @ApiPropertyOptional() @IsNumber() @IsOptional() maxDiskPerSandbox?: number @ApiPropertyOptional() @IsNumber() @IsOptional() snapshotQuota?: number @ApiPropertyOptional() @IsNumber() @IsOptional() maxSnapshotSize?: number @ApiPropertyOptional() @IsNumber() @IsOptional() volumeQuota?: number } ================================================ FILE: apps/api/src/organization/dto/create-organization-role.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { ArrayNotEmpty, IsArray, IsEnum, IsString } from 'class-validator' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' @ApiSchema({ name: 'CreateOrganizationRole' }) export class CreateOrganizationRoleDto { @ApiProperty({ description: 'The name of the role', example: 'Maintainer', required: true, }) @IsString() name: string @ApiProperty({ description: 'The description of the role', example: 'Can manage all resources', }) @IsString() description: string @ApiProperty({ description: 'The list of permissions assigned to the role', enum: OrganizationResourcePermission, isArray: true, required: true, }) @IsArray() @ArrayNotEmpty() @IsEnum(OrganizationResourcePermission, { each: true }) permissions: OrganizationResourcePermission[] } ================================================ FILE: apps/api/src/organization/dto/create-organization.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsNotEmpty, IsString } from 'class-validator' @ApiSchema({ name: 'CreateOrganization' }) export class CreateOrganizationDto { @ApiProperty({ description: 'The name of organization', example: 'My Organization', required: true, }) @IsString() @IsNotEmpty() name: string @ApiProperty({ description: 'The ID of the default region for the organization', example: 'us', required: true, }) @IsString() @IsNotEmpty() defaultRegionId: string } ================================================ FILE: apps/api/src/organization/dto/create-organization.internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface CreateOrganizationInternalDto { name: string defaultRegionId?: string } ================================================ FILE: apps/api/src/organization/dto/organization-invitation.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { OrganizationRoleDto } from './organization-role.dto' import { OrganizationInvitationStatus } from '../enums/organization-invitation-status.enum' import { OrganizationInvitation } from '../entities/organization-invitation.entity' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' @ApiSchema({ name: 'OrganizationInvitation' }) export class OrganizationInvitationDto { @ApiProperty({ description: 'Invitation ID', }) id: string @ApiProperty({ description: 'Email address of the invitee', }) email: string @ApiProperty({ description: 'Email address of the inviter', }) invitedBy: string @ApiProperty({ description: 'Organization ID', }) organizationId: string @ApiProperty({ description: 'Organization name', }) organizationName: string @ApiProperty({ description: 'Expiration date of the invitation', }) expiresAt: Date @ApiProperty({ description: 'Invitation status', enum: OrganizationInvitationStatus, }) status: OrganizationInvitationStatus @ApiProperty({ description: 'Member role', enum: OrganizationMemberRole, }) role: OrganizationMemberRole @ApiProperty({ description: 'Assigned roles', type: [OrganizationRoleDto], }) assignedRoles: OrganizationRoleDto[] @ApiProperty({ description: 'Creation timestamp', }) createdAt: Date @ApiProperty({ description: 'Last update timestamp', }) updatedAt: Date static fromOrganizationInvitation(invitation: OrganizationInvitation): OrganizationInvitationDto { const dto: OrganizationInvitationDto = { id: invitation.id, email: invitation.email, invitedBy: invitation.invitedBy, organizationId: invitation.organizationId, organizationName: invitation.organization.name, expiresAt: invitation.expiresAt, status: invitation.status, role: invitation.role, assignedRoles: invitation.assignedRoles.map(OrganizationRoleDto.fromOrganizationRole), createdAt: invitation.createdAt, updatedAt: invitation.updatedAt, } return dto } } ================================================ FILE: apps/api/src/organization/dto/organization-role.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { OrganizationRole } from '../entities/organization-role.entity' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' @ApiSchema({ name: 'OrganizationRole' }) export class OrganizationRoleDto { @ApiProperty({ description: 'Role ID', }) id: string @ApiProperty({ description: 'Role name', }) name: string @ApiProperty({ description: 'Role description', }) description: string @ApiProperty({ description: 'Roles assigned to the user', enum: OrganizationResourcePermission, isArray: true, }) permissions: OrganizationResourcePermission[] @ApiProperty({ description: 'Global role flag', }) isGlobal: boolean @ApiProperty({ description: 'Creation timestamp', }) createdAt: Date @ApiProperty({ description: 'Last update timestamp', }) updatedAt: Date static fromOrganizationRole(role: OrganizationRole): OrganizationRoleDto { const dto: OrganizationRoleDto = { id: role.id, name: role.name, description: role.description, permissions: role.permissions, isGlobal: role.isGlobal, createdAt: role.createdAt, updatedAt: role.updatedAt, } return dto } } ================================================ FILE: apps/api/src/organization/dto/organization-sandbox-default-limited-network-egress.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'OrganizationSandboxDefaultLimitedNetworkEgress' }) export class OrganizationSandboxDefaultLimitedNetworkEgressDto { @ApiProperty({ description: 'Sandbox default limited network egress', }) sandboxDefaultLimitedNetworkEgress: boolean } ================================================ FILE: apps/api/src/organization/dto/organization-suspension.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsNumber, IsOptional, Min } from 'class-validator' @ApiSchema({ name: 'OrganizationSuspension' }) export class OrganizationSuspensionDto { @ApiProperty({ description: 'Suspension reason', }) reason: string @ApiProperty({ description: 'Suspension until', }) @IsOptional() until?: Date @ApiPropertyOptional({ description: 'Suspension cleanup grace period hours', type: 'number', minimum: 0, }) @IsOptional() @IsNumber() @Min(0) suspensionCleanupGracePeriodHours?: number } ================================================ FILE: apps/api/src/organization/dto/organization-usage-overview.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'RegionUsageOverview' }) export class RegionUsageOverviewDto { @ApiProperty() regionId: string @ApiProperty() totalCpuQuota: number @ApiProperty() currentCpuUsage: number @ApiProperty() totalMemoryQuota: number @ApiProperty() currentMemoryUsage: number @ApiProperty() totalDiskQuota: number @ApiProperty() currentDiskUsage: number } @ApiSchema({ name: 'OrganizationUsageOverview' }) export class OrganizationUsageOverviewDto { @ApiProperty({ type: [RegionUsageOverviewDto], }) regionUsage: RegionUsageOverviewDto[] // Snapshot usage @ApiProperty() totalSnapshotQuota: number @ApiProperty() currentSnapshotUsage: number // Volume usage @ApiProperty() totalVolumeQuota: number @ApiProperty() currentVolumeUsage: number } ================================================ FILE: apps/api/src/organization/dto/organization-user.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationRoleDto } from './organization-role.dto' import { OrganizationUser } from '../entities/organization-user.entity' import { User } from '../../user/user.entity' @ApiSchema({ name: 'OrganizationUser' }) export class OrganizationUserDto { @ApiProperty({ description: 'User ID', }) userId: string @ApiProperty({ description: 'Organization ID', }) organizationId: string @ApiProperty({ description: 'User name', }) name: string @ApiProperty({ description: 'User email', }) email: string @ApiProperty({ description: 'Member role', enum: OrganizationMemberRole, }) role: OrganizationMemberRole @ApiProperty({ description: 'Roles assigned to the user', type: [OrganizationRoleDto], }) assignedRoles: OrganizationRoleDto[] @ApiProperty({ description: 'Creation timestamp', }) createdAt: Date @ApiProperty({ description: 'Last update timestamp', }) updatedAt: Date static fromEntities(organizationUser: OrganizationUser, user: User | null | undefined): OrganizationUserDto { const dto: OrganizationUserDto = { ...organizationUser, assignedRoles: organizationUser.assignedRoles.map(OrganizationRoleDto.fromOrganizationRole), name: user ? user.name : '', email: user ? user.email : '', } return dto } } ================================================ FILE: apps/api/src/organization/dto/organization.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { Organization } from '../entities/organization.entity' @ApiSchema({ name: 'Organization' }) export class OrganizationDto { @ApiProperty({ description: 'Organization ID', }) id: string @ApiProperty({ description: 'Organization name', }) name: string @ApiProperty({ description: 'User ID of the organization creator', }) createdBy: string @ApiProperty({ description: 'Personal organization flag', }) personal: boolean @ApiProperty({ description: 'Creation timestamp', }) createdAt: Date @ApiProperty({ description: 'Last update timestamp', }) updatedAt: Date @ApiProperty({ description: 'Suspended flag', }) suspended: boolean @ApiProperty({ description: 'Suspended at', }) suspendedAt?: Date @ApiProperty({ description: 'Suspended reason', }) suspensionReason?: string @ApiProperty({ description: 'Suspended until', }) suspendedUntil?: Date @ApiProperty({ description: 'Suspension cleanup grace period hours', }) suspensionCleanupGracePeriodHours?: number @ApiProperty({ description: 'Max CPU per sandbox', }) maxCpuPerSandbox: number @ApiProperty({ description: 'Max memory per sandbox', }) maxMemoryPerSandbox: number @ApiProperty({ description: 'Max disk per sandbox', }) maxDiskPerSandbox: number @ApiProperty({ description: 'Time in minutes before an unused snapshot is deactivated', default: 20160, }) snapshotDeactivationTimeoutMinutes: number @ApiProperty({ description: 'Sandbox default network block all', }) sandboxLimitedNetworkEgress: boolean @ApiPropertyOptional({ description: 'Default region ID', required: false, }) defaultRegionId?: string @ApiProperty({ description: 'Authenticated rate limit per minute', nullable: true, }) authenticatedRateLimit: number | null @ApiProperty({ description: 'Sandbox create rate limit per minute', nullable: true, }) sandboxCreateRateLimit: number | null @ApiProperty({ description: 'Sandbox lifecycle rate limit per minute', nullable: true, }) sandboxLifecycleRateLimit: number | null @ApiProperty({ description: 'Experimental configuration', }) experimentalConfig: Record | null @ApiProperty({ description: 'Authenticated rate limit TTL in seconds', nullable: true, }) authenticatedRateLimitTtlSeconds: number | null @ApiProperty({ description: 'Sandbox create rate limit TTL in seconds', nullable: true, }) sandboxCreateRateLimitTtlSeconds: number | null @ApiProperty({ description: 'Sandbox lifecycle rate limit TTL in seconds', nullable: true, }) sandboxLifecycleRateLimitTtlSeconds: number | null static fromOrganization(organization: Organization): OrganizationDto { const experimentalConfig = organization._experimentalConfig if (experimentalConfig && experimentalConfig.otel && experimentalConfig.otel.headers) { experimentalConfig.otel.headers = Object.entries(experimentalConfig.otel.headers).reduce( (acc, [key]) => { acc[key] = '******' return acc }, {} as Record, ) } const dto: OrganizationDto = { id: organization.id, name: organization.name, createdBy: organization.createdBy, personal: organization.personal, createdAt: organization.createdAt, updatedAt: organization.updatedAt, suspended: organization.suspended, suspensionReason: organization.suspensionReason, suspendedAt: organization.suspendedAt, suspendedUntil: organization.suspendedUntil, suspensionCleanupGracePeriodHours: organization.suspensionCleanupGracePeriodHours, maxCpuPerSandbox: organization.maxCpuPerSandbox, maxMemoryPerSandbox: organization.maxMemoryPerSandbox, maxDiskPerSandbox: organization.maxDiskPerSandbox, snapshotDeactivationTimeoutMinutes: organization.snapshotDeactivationTimeoutMinutes, sandboxLimitedNetworkEgress: organization.sandboxLimitedNetworkEgress, defaultRegionId: organization.defaultRegionId, authenticatedRateLimit: organization.authenticatedRateLimit, sandboxCreateRateLimit: organization.sandboxCreateRateLimit, sandboxLifecycleRateLimit: organization.sandboxLifecycleRateLimit, experimentalConfig, authenticatedRateLimitTtlSeconds: organization.authenticatedRateLimitTtlSeconds, sandboxCreateRateLimitTtlSeconds: organization.sandboxCreateRateLimitTtlSeconds, sandboxLifecycleRateLimitTtlSeconds: organization.sandboxLifecycleRateLimitTtlSeconds, } return dto } } ================================================ FILE: apps/api/src/organization/dto/otel-config.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'OtelConfig' }) export class OtelConfigDto { @ApiProperty({ description: 'Endpoint', }) endpoint: string @ApiProperty({ description: 'Headers', example: { 'x-api-key': 'my-api-key', }, nullable: true, required: false, additionalProperties: { type: 'string' }, }) headers?: Record } ================================================ FILE: apps/api/src/organization/dto/region-quota.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { RegionQuota } from '../entities/region-quota.entity' @ApiSchema({ name: 'RegionQuota' }) export class RegionQuotaDto { @ApiProperty() organizationId: string @ApiProperty() regionId: string @ApiProperty() totalCpuQuota: number @ApiProperty() totalMemoryQuota: number @ApiProperty() totalDiskQuota: number constructor(regionQuota: RegionQuota) { this.organizationId = regionQuota.organizationId this.regionId = regionQuota.regionId this.totalCpuQuota = regionQuota.totalCpuQuota this.totalMemoryQuota = regionQuota.totalMemoryQuota this.totalDiskQuota = regionQuota.totalDiskQuota } } ================================================ FILE: apps/api/src/organization/dto/sandbox-usage-overview-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type SandboxUsageOverviewInternalDto = { currentCpuUsage: number currentMemoryUsage: number currentDiskUsage: number } export type PendingSandboxUsageOverviewInternalDto = { pendingCpuUsage: number | null pendingMemoryUsage: number | null pendingDiskUsage: number | null } export type SandboxUsageOverviewWithPendingInternalDto = SandboxUsageOverviewInternalDto & PendingSandboxUsageOverviewInternalDto ================================================ FILE: apps/api/src/organization/dto/snapshot-usage-overview-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type SnapshotUsageOverviewInternalDto = { currentSnapshotUsage: number } export type PendingSnapshotUsageOverviewInternalDto = { pendingSnapshotUsage: number | null } export type SnapshotUsageOverviewWithPendingInternalDto = SnapshotUsageOverviewInternalDto & PendingSnapshotUsageOverviewInternalDto ================================================ FILE: apps/api/src/organization/dto/update-organization-default-region.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsNotEmpty, IsString } from 'class-validator' @ApiSchema({ name: 'UpdateOrganizationDefaultRegion' }) export class UpdateOrganizationDefaultRegionDto { @ApiProperty({ description: 'The ID of the default region for the organization', example: 'us', required: true, }) @IsString() @IsNotEmpty() defaultRegionId: string } ================================================ FILE: apps/api/src/organization/dto/update-organization-invitation.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { Type } from 'class-transformer' import { IsDate, IsEnum, IsOptional, IsString } from 'class-validator' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' @ApiSchema({ name: 'UpdateOrganizationInvitation' }) export class UpdateOrganizationInvitationDto { @ApiProperty({ description: 'Organization member role', enum: OrganizationMemberRole, }) @IsEnum(OrganizationMemberRole) role: OrganizationMemberRole @ApiProperty({ description: 'Array of role IDs', type: [String], }) @IsString({ each: true }) assignedRoleIds: string[] @ApiPropertyOptional({ description: 'Expiration date of the invitation', example: '2021-12-31T23:59:59Z', }) @IsOptional() @Type(() => Date) @IsDate() expiresAt?: Date } ================================================ FILE: apps/api/src/organization/dto/update-organization-member-access.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsArray, IsEnum, IsString } from 'class-validator' import { GlobalOrganizationRolesIds } from '../constants/global-organization-roles.constant' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' @ApiSchema({ name: 'UpdateOrganizationMemberAccess' }) export class UpdateOrganizationMemberAccessDto { @ApiProperty({ description: 'Organization member role', enum: OrganizationMemberRole, default: OrganizationMemberRole.MEMBER, }) @IsEnum(OrganizationMemberRole) role: OrganizationMemberRole @ApiProperty({ description: 'Array of assigned role IDs', type: [String], default: [GlobalOrganizationRolesIds.DEVELOPER], }) @IsArray() @IsString({ each: true }) assignedRoleIds: string[] } ================================================ FILE: apps/api/src/organization/dto/update-organization-quota.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'UpdateOrganizationQuota' }) export class UpdateOrganizationQuotaDto { @ApiProperty({ nullable: true }) maxCpuPerSandbox?: number @ApiProperty({ nullable: true }) maxMemoryPerSandbox?: number @ApiProperty({ nullable: true }) maxDiskPerSandbox?: number @ApiProperty({ nullable: true }) snapshotQuota?: number @ApiProperty({ nullable: true }) maxSnapshotSize?: number @ApiProperty({ nullable: true }) volumeQuota?: number @ApiProperty({ nullable: true }) authenticatedRateLimit?: number @ApiProperty({ nullable: true }) sandboxCreateRateLimit?: number @ApiProperty({ nullable: true }) sandboxLifecycleRateLimit?: number @ApiProperty({ nullable: true }) authenticatedRateLimitTtlSeconds?: number @ApiProperty({ nullable: true }) sandboxCreateRateLimitTtlSeconds?: number @ApiProperty({ nullable: true }) sandboxLifecycleRateLimitTtlSeconds?: number @ApiProperty({ nullable: true, description: 'Time in minutes before an unused snapshot is deactivated' }) snapshotDeactivationTimeoutMinutes?: number } ================================================ FILE: apps/api/src/organization/dto/update-organization-region-quota.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'UpdateOrganizationRegionQuota' }) export class UpdateOrganizationRegionQuotaDto { @ApiProperty({ nullable: true }) totalCpuQuota?: number @ApiProperty({ nullable: true }) totalMemoryQuota?: number @ApiProperty({ nullable: true }) totalDiskQuota?: number } ================================================ FILE: apps/api/src/organization/dto/update-organization-role.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { ArrayNotEmpty, IsArray, IsEnum, IsString } from 'class-validator' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' @ApiSchema({ name: 'UpdateOrganizationRole' }) export class UpdateOrganizationRoleDto { @ApiProperty({ description: 'The name of the role', example: 'Maintainer', required: true, }) @IsString() name: string @ApiProperty({ description: 'The description of the role', example: 'Can manage all resources', }) @IsString() description: string @ApiProperty({ description: 'The list of permissions assigned to the role', enum: OrganizationResourcePermission, isArray: true, required: true, }) @IsArray() @ArrayNotEmpty() @IsEnum(OrganizationResourcePermission, { each: true }) permissions: OrganizationResourcePermission[] } ================================================ FILE: apps/api/src/organization/dto/volume-usage-overview-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type VolumeUsageOverviewInternalDto = { currentVolumeUsage: number } export type PendingVolumeUsageOverviewInternalDto = { pendingVolumeUsage: number | null } export type VolumeUsageOverviewWithPendingInternalDto = VolumeUsageOverviewInternalDto & PendingVolumeUsageOverviewInternalDto ================================================ FILE: apps/api/src/organization/entities/organization-invitation.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, JoinColumn, JoinTable, ManyToMany, ManyToOne, PrimaryGeneratedColumn, } from 'typeorm' import { Organization } from './organization.entity' import { OrganizationInvitationStatus } from '../enums/organization-invitation-status.enum' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationRole } from './organization-role.entity' @Entity() export class OrganizationInvitation { @PrimaryGeneratedColumn('uuid') id: string @Column() organizationId: string @Column() email: string @Column({ default: '', }) invitedBy: string @Column({ type: 'enum', enum: OrganizationMemberRole, default: OrganizationMemberRole.MEMBER, }) role: OrganizationMemberRole @ManyToMany(() => OrganizationRole, (role) => role.invitations, { cascade: true, onDelete: 'CASCADE', }) @JoinTable({ name: 'organization_role_assignment_invitation', joinColumn: { name: 'invitationId', referencedColumnName: 'id', }, inverseJoinColumn: { name: 'roleId', referencedColumnName: 'id', }, }) assignedRoles: OrganizationRole[] @Column({ type: 'timestamp with time zone', }) expiresAt: Date @Column({ type: 'enum', enum: OrganizationInvitationStatus, default: OrganizationInvitationStatus.PENDING, }) status: OrganizationInvitationStatus @ManyToOne(() => Organization, { onDelete: 'CASCADE' }) @JoinColumn({ name: 'organizationId' }) organization: Organization @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @CreateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/organization/entities/organization-role.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, JoinColumn, ManyToMany, ManyToOne, PrimaryGeneratedColumn } from 'typeorm' import { Organization } from './organization.entity' import { OrganizationUser } from './organization-user.entity' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' import { OrganizationInvitation } from './organization-invitation.entity' @Entity() export class OrganizationRole { @PrimaryGeneratedColumn('uuid') id: string @Column() name: string @Column() description: string @Column({ type: 'enum', enum: OrganizationResourcePermission, array: true, }) permissions: OrganizationResourcePermission[] @Column({ default: false }) isGlobal: boolean @Column({ nullable: true, }) organizationId?: string @ManyToOne(() => Organization, { onDelete: 'CASCADE' }) @JoinColumn({ name: 'organizationId' }) organization: Organization @ManyToMany(() => OrganizationUser, (user) => user.assignedRoles) users: OrganizationUser[] @ManyToMany(() => OrganizationInvitation, (invitation) => invitation.assignedRoles) invitations: OrganizationInvitation[] @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @CreateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/organization/entities/organization-user.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, JoinColumn, JoinTable, ManyToMany, ManyToOne, PrimaryColumn } from 'typeorm' import { Organization } from './organization.entity' import { OrganizationRole } from './organization-role.entity' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' @Entity() export class OrganizationUser { @PrimaryColumn() organizationId: string @PrimaryColumn() userId: string @Column({ type: 'enum', enum: OrganizationMemberRole, default: OrganizationMemberRole.MEMBER, }) role: OrganizationMemberRole @ManyToOne(() => Organization, (organization) => organization.users, { onDelete: 'CASCADE', }) @JoinColumn({ name: 'organizationId' }) organization: Organization @ManyToMany(() => OrganizationRole, (role) => role.users, { cascade: true, onDelete: 'CASCADE', }) @JoinTable({ name: 'organization_role_assignment', joinColumns: [ { name: 'organizationId', referencedColumnName: 'organizationId' }, { name: 'userId', referencedColumnName: 'userId' }, ], inverseJoinColumns: [{ name: 'roleId', referencedColumnName: 'id' }], }) assignedRoles: OrganizationRole[] @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @CreateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/organization/entities/organization.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, OneToMany, PrimaryGeneratedColumn, UpdateDateColumn } from 'typeorm' import { OrganizationUser } from './organization-user.entity' import { OrganizationRole } from './organization-role.entity' import { OrganizationInvitation } from './organization-invitation.entity' import { RegionQuota } from './region-quota.entity' @Entity() export class Organization { @PrimaryGeneratedColumn('uuid') id: string @Column() name: string @Column() createdBy: string @Column({ default: false, }) personal: boolean @Column({ default: true, }) telemetryEnabled: boolean @Column({ nullable: true }) defaultRegionId?: string @Column({ type: 'int', default: 4, name: 'max_cpu_per_sandbox', }) maxCpuPerSandbox: number @Column({ type: 'int', default: 8, name: 'max_memory_per_sandbox', }) maxMemoryPerSandbox: number @Column({ type: 'int', default: 10, name: 'max_disk_per_sandbox', }) maxDiskPerSandbox: number @Column({ type: 'int', default: 20, name: 'max_snapshot_size', }) maxSnapshotSize: number @Column({ type: 'int', default: 100, name: 'snapshot_quota', }) snapshotQuota: number @Column({ type: 'int', default: 100, name: 'volume_quota', }) volumeQuota: number @Column({ type: 'int', nullable: true, name: 'authenticated_rate_limit', }) authenticatedRateLimit: number | null @Column({ type: 'int', nullable: true, name: 'sandbox_create_rate_limit', }) sandboxCreateRateLimit: number | null @Column({ type: 'int', nullable: true, name: 'sandbox_lifecycle_rate_limit', }) sandboxLifecycleRateLimit: number | null @Column({ type: 'int', nullable: true, name: 'authenticated_rate_limit_ttl_seconds', }) authenticatedRateLimitTtlSeconds: number | null @Column({ type: 'int', nullable: true, name: 'sandbox_create_rate_limit_ttl_seconds', }) sandboxCreateRateLimitTtlSeconds: number | null @Column({ type: 'int', nullable: true, name: 'sandbox_lifecycle_rate_limit_ttl_seconds', }) sandboxLifecycleRateLimitTtlSeconds: number | null @OneToMany(() => RegionQuota, (quota) => quota.organization, { cascade: true, onDelete: 'CASCADE', }) regionQuotas: RegionQuota[] @OneToMany(() => OrganizationRole, (organizationRole) => organizationRole.organization, { cascade: true, onDelete: 'CASCADE', }) roles: OrganizationRole[] @OneToMany(() => OrganizationUser, (user) => user.organization, { cascade: true, onDelete: 'CASCADE', }) users: OrganizationUser[] @OneToMany(() => OrganizationInvitation, (invitation) => invitation.organization, { cascade: true, onDelete: 'CASCADE', }) invitations: OrganizationInvitation[] @Column({ default: false, }) suspended: boolean @Column({ nullable: true, type: 'timestamp with time zone', }) suspendedAt?: Date @Column({ nullable: true, }) suspensionReason?: string @Column({ type: 'int', default: 24, }) suspensionCleanupGracePeriodHours: number @Column({ nullable: true, type: 'timestamp with time zone', }) suspendedUntil?: Date @Column({ type: 'int', default: 20160, name: 'snapshot_deactivation_timeout_minutes', }) snapshotDeactivationTimeoutMinutes: number @Column({ default: false, }) sandboxLimitedNetworkEgress: boolean @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date @Column({ type: 'jsonb', nullable: true, name: 'experimentalConfig', }) // configuration for experimental features _experimentalConfig: Record | null get sandboxMetadata(): Record { return { organizationId: this.id, organizationName: this.name, limitNetworkEgress: String(this.sandboxLimitedNetworkEgress), } } constructor(defaultRegionId?: string) { if (defaultRegionId) { this.defaultRegionId = defaultRegionId } } } ================================================ FILE: apps/api/src/organization/entities/region-quota.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, JoinColumn, ManyToOne, PrimaryColumn, UpdateDateColumn } from 'typeorm' import { Organization } from './organization.entity' @Entity() export class RegionQuota { @PrimaryColumn() organizationId: string @PrimaryColumn() regionId: string @ManyToOne(() => Organization, (organization) => organization.regionQuotas, { onDelete: 'CASCADE', }) @JoinColumn({ name: 'organizationId' }) organization: Organization @Column({ type: 'int', default: 10, name: 'total_cpu_quota', }) totalCpuQuota: number @Column({ type: 'int', default: 10, name: 'total_memory_quota', }) totalMemoryQuota: number @Column({ type: 'int', default: 30, name: 'total_disk_quota', }) totalDiskQuota: number @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date constructor( organizationId: string, regionId: string, totalCpuQuota: number, totalMemoryQuota: number, totalDiskQuota: number, ) { this.organizationId = organizationId this.regionId = regionId this.totalCpuQuota = totalCpuQuota this.totalMemoryQuota = totalMemoryQuota this.totalDiskQuota = totalDiskQuota } } ================================================ FILE: apps/api/src/organization/enums/organization-invitation-status.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum OrganizationInvitationStatus { PENDING = 'pending', ACCEPTED = 'accepted', DECLINED = 'declined', CANCELLED = 'cancelled', } ================================================ FILE: apps/api/src/organization/enums/organization-member-role.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum OrganizationMemberRole { OWNER = 'owner', MEMBER = 'member', } ================================================ FILE: apps/api/src/organization/enums/organization-resource-permission.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ /* IMPORTANT: When adding a new permission, make sure to update apps/dashboard/src/constants/CreateApiKeyPermissionsGroups.ts accordingly */ export enum OrganizationResourcePermission { // docker registries WRITE_REGISTRIES = 'write:registries', DELETE_REGISTRIES = 'delete:registries', // snapshots WRITE_SNAPSHOTS = 'write:snapshots', DELETE_SNAPSHOTS = 'delete:snapshots', // sandboxes WRITE_SANDBOXES = 'write:sandboxes', DELETE_SANDBOXES = 'delete:sandboxes', // volumes READ_VOLUMES = 'read:volumes', WRITE_VOLUMES = 'write:volumes', DELETE_VOLUMES = 'delete:volumes', // regions WRITE_REGIONS = 'write:regions', DELETE_REGIONS = 'delete:regions', // runners READ_RUNNERS = 'read:runners', WRITE_RUNNERS = 'write:runners', DELETE_RUNNERS = 'delete:runners', // audit READ_AUDIT_LOGS = 'read:audit_logs', } ================================================ FILE: apps/api/src/organization/events/organization-invitation-accepted.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationRole } from '../entities/organization-role.entity' export class OrganizationInvitationAcceptedEvent { constructor( public readonly entityManager: EntityManager, public readonly organizationId: string, public readonly userId: string, public readonly role: OrganizationMemberRole, public readonly assignedRoles: OrganizationRole[], ) {} } ================================================ FILE: apps/api/src/organization/events/organization-invitation-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class OrganizationInvitationCreatedEvent { constructor( public readonly organizationName: string, public readonly invitedBy: string, public readonly inviteeEmail: string, public readonly invitationId: string, public readonly expiresAt: Date, ) {} } ================================================ FILE: apps/api/src/organization/events/organization-resource-permissions-unassigned.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' export class OrganizationResourcePermissionsUnassignedEvent { constructor( public readonly entityManager: EntityManager, public readonly organizationId: string, public readonly userId: string, public readonly unassignedPermissions: OrganizationResourcePermission[], ) {} } ================================================ FILE: apps/api/src/organization/events/organization-suspended-sandbox-stopped.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class OrganizationSuspendedSandboxStoppedEvent { constructor(public readonly sandboxId: string) {} } ================================================ FILE: apps/api/src/organization/events/organization-suspended-snapshot-deactivated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class OrganizationSuspendedSnapshotDeactivatedEvent { constructor(public readonly snapshotId: string) {} } ================================================ FILE: apps/api/src/organization/exceptions/DefaultRegionRequiredException.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { HttpException, HttpStatus } from '@nestjs/common' export class DefaultRegionRequiredException extends HttpException { constructor( message = 'This organization does not have a default region. Please open the Daytona Dashboard to set a default region.', ) { super(message, HttpStatus.PRECONDITION_REQUIRED) } } ================================================ FILE: apps/api/src/organization/guards/organization-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CanActivate, ExecutionContext, Injectable, Logger } from '@nestjs/common' import { OrganizationService } from '../services/organization.service' import { OrganizationUserService } from '../services/organization-user.service' import { AuthContext, OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { Organization } from '../entities/organization.entity' import { OrganizationUser } from '../entities/organization-user.entity' @Injectable() export class OrganizationAccessGuard implements CanActivate { protected readonly logger = new Logger(OrganizationAccessGuard.name) @InjectRedis() private readonly redis: Redis constructor( private readonly organizationService: OrganizationService, private readonly organizationUserService: OrganizationUserService, ) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() // TODO: initialize authContext safely const authContext: AuthContext = request.user if (!authContext) { this.logger.warn('User object is undefined. Authentication may not be set up correctly.') return false } // note: semantic parameter names must be used (avoid :id) const organizationIdParam = request.params.organizationId || request.params.orgId if ( authContext.role !== 'ssh-gateway' && authContext.role !== 'runner' && authContext.role !== 'proxy' && authContext.role !== 'region-proxy' && authContext.role !== 'region-ssh-gateway' && !organizationIdParam && !authContext.organizationId ) { this.logger.warn('Organization ID missing from the request context.') return false } if ( organizationIdParam && authContext.apiKey && authContext.apiKey.organizationId !== organizationIdParam && authContext.role !== SystemRole.ADMIN && authContext.role !== 'ssh-gateway' ) { this.logger.warn( `Organization ID mismatch in the request context. Expected: ${organizationIdParam}, Actual: ${authContext.apiKey.organizationId}`, ) return false } const organizationId = organizationIdParam || authContext.organizationId const organization = await this.getCachedOrganization(organizationId) if (!organization) { this.logger.warn(`Organization not found. Organization ID: ${organizationId}`) return false } const organizationAuthContext: OrganizationAuthContext = { ...authContext, organizationId, organization, } request.user = organizationAuthContext if (authContext.role === SystemRole.ADMIN) { return true } const organizationUser = await this.getCachedOrganizationUser(organizationId, authContext.userId) if (!organizationUser) { this.logger.warn( `Organization user not found. User ID: ${authContext.userId}, Organization ID: ${organizationId}`, ) return false } organizationAuthContext.organizationUser = organizationUser request.user = organizationAuthContext return true } private async getCachedOrganization(organizationId: string): Promise { try { const cachedOrganization = await this.redis.get(`organization:${organizationId}`) if (cachedOrganization) { return JSON.parse(cachedOrganization) } const organization = await this.organizationService.findOne(organizationId) if (organization) { await this.redis.set(`organization:${organizationId}`, JSON.stringify(organization), 'EX', 10) return organization } return null } catch (error) { this.logger.error('Error getting cached organization:', error) return null } } private async getCachedOrganizationUser(organizationId: string, userId: string): Promise { try { const cachedOrganizationUser = await this.redis.get(`organization-user:${organizationId}:${userId}`) if (cachedOrganizationUser) { return JSON.parse(cachedOrganizationUser) } const organizationUser = await this.organizationUserService.findOne(organizationId, userId) if (organizationUser) { await this.redis.set( `organization-user:${organizationId}:${userId}`, JSON.stringify(organizationUser), 'EX', 10, ) return organizationUser } return null } catch (ex) { this.logger.error('Error getting cached organization user:', ex) return null } } } ================================================ FILE: apps/api/src/organization/guards/organization-action.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger } from '@nestjs/common' import { Reflector } from '@nestjs/core' import { OrganizationAccessGuard } from './organization-access.guard' import { RequiredOrganizationMemberRole } from '../decorators/required-organization-member-role.decorator' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationService } from '../services/organization.service' import { OrganizationUserService } from '../services/organization-user.service' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' @Injectable() export class OrganizationActionGuard extends OrganizationAccessGuard { protected readonly logger = new Logger(OrganizationActionGuard.name) constructor( organizationService: OrganizationService, organizationUserService: OrganizationUserService, private readonly reflector: Reflector, ) { super(organizationService, organizationUserService) } async canActivate(context: ExecutionContext): Promise { if (!(await super.canActivate(context))) { return false } const request = context.switchToHttp().getRequest() // TODO: initialize authContext safely const authContext: OrganizationAuthContext = request.user if (authContext.role === SystemRole.ADMIN) { return true } if (!authContext.organizationUser) { return false } const requiredRole = this.reflector.get(RequiredOrganizationMemberRole, context.getHandler()) if (!requiredRole) { return true } if (requiredRole === OrganizationMemberRole.OWNER) { return authContext.organizationUser.role === OrganizationMemberRole.OWNER } return true } } ================================================ FILE: apps/api/src/organization/guards/organization-resource-action.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger } from '@nestjs/common' import { Reflector } from '@nestjs/core' import { OrganizationAccessGuard } from './organization-access.guard' import { RequiredOrganizationResourcePermissions } from '../decorators/required-organization-resource-permissions.decorator' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationService } from '../services/organization.service' import { OrganizationUserService } from '../services/organization-user.service' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' @Injectable() export class OrganizationResourceActionGuard extends OrganizationAccessGuard { protected readonly logger = new Logger(OrganizationResourceActionGuard.name) constructor( organizationService: OrganizationService, organizationUserService: OrganizationUserService, private readonly reflector: Reflector, ) { super(organizationService, organizationUserService) } async canActivate(context: ExecutionContext): Promise { const canActivate = await super.canActivate(context) const request = context.switchToHttp().getRequest() // TODO: initialize authContext safely const authContext: OrganizationAuthContext = request.user if (authContext.role === SystemRole.ADMIN) { return true } if (!canActivate) { return false } if (!authContext.organizationUser) { return false } if (authContext.organizationUser.role === OrganizationMemberRole.OWNER && !authContext.apiKey) { return true } const requiredPermissions = this.reflector.get(RequiredOrganizationResourcePermissions, context.getHandler()) || this.reflector.get(RequiredOrganizationResourcePermissions, context.getClass()) if (!requiredPermissions) { return true } const assignedPermissions = authContext.apiKey ? new Set(authContext.apiKey.permissions) : new Set(authContext.organizationUser.assignedRoles.flatMap((role) => role.permissions)) return requiredPermissions.every((permission) => assignedPermissions.has(permission)) } } ================================================ FILE: apps/api/src/organization/helpers/organization-usage.helper.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type OrganizationUsageQuotaType = 'cpu' | 'memory' | 'disk' | 'snapshot_count' | 'volume_count' export type OrganizationUsageResourceType = 'sandbox' | 'snapshot' | 'volume' const QUOTA_TO_RESOURCE_MAP: Record = { cpu: 'sandbox', memory: 'sandbox', disk: 'sandbox', snapshot_count: 'snapshot', volume_count: 'volume', } as const export function getResourceTypeFromQuota(quotaType: OrganizationUsageQuotaType): OrganizationUsageResourceType { return QUOTA_TO_RESOURCE_MAP[quotaType] } ================================================ FILE: apps/api/src/organization/organization.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { OrganizationController } from './controllers/organization.controller' import { OrganizationRoleController } from './controllers/organization-role.controller' import { OrganizationUserController } from './controllers/organization-user.controller' import { OrganizationInvitationController } from './controllers/organization-invitation.controller' import { Organization } from './entities/organization.entity' import { OrganizationRole } from './entities/organization-role.entity' import { OrganizationUser } from './entities/organization-user.entity' import { OrganizationInvitation } from './entities/organization-invitation.entity' import { OrganizationService } from './services/organization.service' import { OrganizationRoleService } from './services/organization-role.service' import { OrganizationUserService } from './services/organization-user.service' import { OrganizationInvitationService } from './services/organization-invitation.service' import { UserModule } from '../user/user.module' import { Sandbox } from '../sandbox/entities/sandbox.entity' import { Snapshot } from '../sandbox/entities/snapshot.entity' import { Volume } from '../sandbox/entities/volume.entity' import { RedisLockProvider } from '../sandbox/common/redis-lock.provider' import { SnapshotRunner } from '../sandbox/entities/snapshot-runner.entity' import { OrganizationUsageService } from './services/organization-usage.service' import { DataSource } from 'typeorm' import { EventEmitter2 } from '@nestjs/event-emitter' import { SandboxRepository } from '../sandbox/repositories/sandbox.repository' import { SandboxLookupCacheInvalidationService } from '../sandbox/services/sandbox-lookup-cache-invalidation.service' import { RegionQuota } from './entities/region-quota.entity' import { RegionModule } from '../region/region.module' import { OrganizationRegionController } from './controllers/organization-region.controller' import { Region } from '../region/entities/region.entity' import { EncryptionModule } from '../encryption/encryption.module' @Module({ imports: [ UserModule, RegionModule, TypeOrmModule.forFeature([ Organization, OrganizationRole, OrganizationUser, OrganizationInvitation, Sandbox, Snapshot, Volume, SnapshotRunner, RegionQuota, Region, ]), EncryptionModule, ], controllers: [ OrganizationController, OrganizationRoleController, OrganizationUserController, OrganizationInvitationController, OrganizationRegionController, ], providers: [ OrganizationService, OrganizationRoleService, OrganizationUserService, OrganizationInvitationService, OrganizationUsageService, RedisLockProvider, SandboxLookupCacheInvalidationService, { provide: SandboxRepository, inject: [DataSource, EventEmitter2, SandboxLookupCacheInvalidationService], useFactory: ( dataSource: DataSource, eventEmitter: EventEmitter2, sandboxLookupCacheInvalidationService: SandboxLookupCacheInvalidationService, ) => new SandboxRepository(dataSource, eventEmitter, sandboxLookupCacheInvalidationService), }, ], exports: [ OrganizationService, OrganizationRoleService, OrganizationUserService, OrganizationInvitationService, OrganizationUsageService, ], }) export class OrganizationModule {} ================================================ FILE: apps/api/src/organization/services/organization-invitation.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, ConflictException, ForbiddenException, Injectable, NotFoundException, } from '@nestjs/common' import { ConfigService } from '@nestjs/config' import { EventEmitter2 } from '@nestjs/event-emitter' import { InjectRepository } from '@nestjs/typeorm' import { DataSource, MoreThan, Repository } from 'typeorm' import { OrganizationRoleService } from './organization-role.service' import { OrganizationUserService } from './organization-user.service' import { OrganizationService } from './organization.service' import { OrganizationEvents } from '../constants/organization-events.constant' import { CreateOrganizationInvitationDto } from '../dto/create-organization-invitation.dto' import { UpdateOrganizationInvitationDto } from '../dto/update-organization-invitation.dto' import { OrganizationInvitation } from '../entities/organization-invitation.entity' import { OrganizationInvitationStatus } from '../enums/organization-invitation-status.enum' import { OrganizationInvitationAcceptedEvent } from '../events/organization-invitation-accepted.event' import { OrganizationInvitationCreatedEvent } from '../events/organization-invitation-created.event' import { UserService } from '../../user/user.service' import { EmailUtils } from '../../common/utils/email.util' @Injectable() export class OrganizationInvitationService { constructor( @InjectRepository(OrganizationInvitation) private readonly organizationInvitationRepository: Repository, private readonly organizationService: OrganizationService, private readonly organizationUserService: OrganizationUserService, private readonly organizationRoleService: OrganizationRoleService, private readonly userService: UserService, private readonly eventEmitter: EventEmitter2, private readonly dataSource: DataSource, private readonly configService: ConfigService, ) {} async create( organizationId: string, createOrganizationInvitationDto: CreateOrganizationInvitationDto, invitedBy: string, ): Promise { const organization = await this.organizationService.findOne(organizationId) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } if (organization.personal) { throw new ForbiddenException('Cannot invite users to personal organization') } const normalizedEmail = EmailUtils.normalize(createOrganizationInvitationDto.email) const existingUser = await this.userService.findOneByEmail(normalizedEmail, true) if (existingUser) { const organizationUser = await this.organizationUserService.findOne(organizationId, existingUser.id) if (organizationUser) { throw new ConflictException(`User with email ${normalizedEmail} is already associated with this organization`) } } const existingInvitation = await this.organizationInvitationRepository.findOne({ where: { organizationId, email: normalizedEmail, status: OrganizationInvitationStatus.PENDING, expiresAt: MoreThan(new Date()), }, }) if (existingInvitation) { throw new ConflictException(`User with email "${normalizedEmail}" already invited to this organization`) } let invitation = new OrganizationInvitation() invitation.organizationId = organizationId invitation.organization = organization invitation.email = normalizedEmail invitation.expiresAt = createOrganizationInvitationDto.expiresAt || new Date(Date.now() + 7 * 24 * 60 * 60 * 1000) invitation.role = createOrganizationInvitationDto.role invitation.invitedBy = invitedBy const assignedRoles = await this.organizationRoleService.findByIds(createOrganizationInvitationDto.assignedRoleIds) if (assignedRoles.length !== createOrganizationInvitationDto.assignedRoleIds.length) { throw new BadRequestException('One or more role IDs are invalid') } invitation.assignedRoles = assignedRoles invitation = await this.organizationInvitationRepository.save(invitation) this.eventEmitter.emit( OrganizationEvents.INVITATION_CREATED, new OrganizationInvitationCreatedEvent( invitation.organization.name, invitation.invitedBy, invitation.email, invitation.id, invitation.expiresAt, ), ) return invitation } async update( invitationId: string, updateOrganizationInvitationDto: UpdateOrganizationInvitationDto, ): Promise { const invitation = await this.organizationInvitationRepository.findOne({ where: { id: invitationId }, relations: { organization: true, assignedRoles: true, }, }) if (!invitation) { throw new NotFoundException(`Invitation with ID ${invitationId} not found`) } if (invitation.expiresAt && invitation.expiresAt < new Date()) { throw new ForbiddenException(`Invitation with ID ${invitationId} is expired`) } if (invitation.status !== OrganizationInvitationStatus.PENDING) { throw new ForbiddenException(`Invitation with ID ${invitationId} is already ${invitation.status}`) } if (updateOrganizationInvitationDto.expiresAt) { invitation.expiresAt = updateOrganizationInvitationDto.expiresAt } invitation.role = updateOrganizationInvitationDto.role const assignedRoles = await this.organizationRoleService.findByIds(updateOrganizationInvitationDto.assignedRoleIds) if (assignedRoles.length !== updateOrganizationInvitationDto.assignedRoleIds.length) { throw new BadRequestException('One or more role IDs are invalid') } invitation.assignedRoles = assignedRoles return this.organizationInvitationRepository.save(invitation) } async findPending(organizationId: string): Promise { return this.organizationInvitationRepository.find({ where: { organizationId, status: OrganizationInvitationStatus.PENDING, expiresAt: MoreThan(new Date()), }, relations: { organization: true, assignedRoles: true, }, }) } async findByUser(userId: string): Promise { const user = await this.userService.findOne(userId) if (!user) { throw new NotFoundException(`User with ID ${userId} not found`) } return this.organizationInvitationRepository.find({ where: { email: EmailUtils.normalize(user.email), status: OrganizationInvitationStatus.PENDING, expiresAt: MoreThan(new Date()), }, relations: { organization: true, assignedRoles: true, }, }) } async getCountByUser(userId: string): Promise { const user = await this.userService.findOne(userId) if (!user) { throw new NotFoundException(`User with ID ${userId} not found`) } return this.organizationInvitationRepository.count({ where: { email: EmailUtils.normalize(user.email), status: OrganizationInvitationStatus.PENDING, expiresAt: MoreThan(new Date()), }, }) } async findOneOrFail(invitationId: string): Promise { return this.organizationInvitationRepository.findOneOrFail({ where: { id: invitationId }, relations: { organization: true, assignedRoles: true, }, }) } async accept(invitationId: string, userId: string): Promise { const invitation = await this.prepareStatusUpdate(invitationId, OrganizationInvitationStatus.ACCEPTED) await this.dataSource.transaction(async (em) => { await em.save(invitation) await this.eventEmitter.emitAsync( OrganizationEvents.INVITATION_ACCEPTED, new OrganizationInvitationAcceptedEvent( em, invitation.organizationId, userId, invitation.role, invitation.assignedRoles, ), ) }) return invitation } async decline(invitationId: string): Promise { const invitation = await this.prepareStatusUpdate(invitationId, OrganizationInvitationStatus.DECLINED) await this.organizationInvitationRepository.save(invitation) } async cancel(invitationId: string): Promise { const invitation = await this.prepareStatusUpdate(invitationId, OrganizationInvitationStatus.CANCELLED) await this.organizationInvitationRepository.save(invitation) } private async prepareStatusUpdate( invitationId: string, newStatus: OrganizationInvitationStatus, ): Promise { const invitation = await this.organizationInvitationRepository.findOne({ where: { id: invitationId }, relations: { organization: true, assignedRoles: true, }, }) if (!invitation) { throw new NotFoundException(`Invitation with ID ${invitationId} not found`) } if (invitation.expiresAt && invitation.expiresAt < new Date()) { throw new ForbiddenException(`Invitation with ID ${invitationId} is expired`) } if (invitation.status !== OrganizationInvitationStatus.PENDING) { throw new ForbiddenException(`Invitation with ID ${invitationId} is already ${invitation.status}`) } invitation.status = newStatus return invitation } } ================================================ FILE: apps/api/src/organization/services/organization-role.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ForbiddenException, Injectable, NotFoundException } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { In, Repository } from 'typeorm' import { CreateOrganizationRoleDto } from '../dto/create-organization-role.dto' import { UpdateOrganizationRoleDto } from '../dto/update-organization-role.dto' import { OrganizationRole } from '../entities/organization-role.entity' @Injectable() export class OrganizationRoleService { constructor( @InjectRepository(OrganizationRole) private readonly organizationRoleRepository: Repository, ) {} async create( organizationId: string, createOrganizationRoleDto: CreateOrganizationRoleDto, ): Promise { const role = new OrganizationRole() role.organizationId = organizationId role.name = createOrganizationRoleDto.name role.description = createOrganizationRoleDto.description role.permissions = createOrganizationRoleDto.permissions return this.organizationRoleRepository.save(role) } async findAll(organizationId: string): Promise { return this.organizationRoleRepository.find({ where: [{ organizationId }, { isGlobal: true }], order: { id: 'ASC', }, }) } async findByIds(roleIds: string[]): Promise { if (roleIds.length === 0) { return [] } return this.organizationRoleRepository.find({ where: { id: In(roleIds), }, }) } async update(roleId: string, updateOrganizationRoleDto: UpdateOrganizationRoleDto): Promise { const role = await this.organizationRoleRepository.findOne({ where: { id: roleId }, }) if (!role) { throw new NotFoundException(`Organization role with ID ${roleId} not found`) } if (role.isGlobal) { throw new ForbiddenException('Global roles cannot be updated') } role.name = updateOrganizationRoleDto.name role.description = updateOrganizationRoleDto.description role.permissions = updateOrganizationRoleDto.permissions return this.organizationRoleRepository.save(role) } async delete(roleId: string): Promise { const role = await this.organizationRoleRepository.findOne({ where: { id: roleId }, }) if (!role) { throw new NotFoundException(`Organization role with ID ${roleId} not found`) } if (role.isGlobal) { throw new ForbiddenException('Global roles cannot be deleted') } await this.organizationRoleRepository.remove(role) } } ================================================ FILE: apps/api/src/organization/services/organization-usage.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, Injectable, Logger, NotFoundException } from '@nestjs/common' import { OnEvent } from '@nestjs/event-emitter' import { InjectRepository } from '@nestjs/typeorm' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { In, Repository } from 'typeorm' import { SANDBOX_STATES_CONSUMING_COMPUTE } from '../constants/sandbox-states-consuming-compute.constant' import { SANDBOX_STATES_CONSUMING_DISK } from '../constants/sandbox-states-consuming-disk.constant' import { SNAPSHOT_STATES_CONSUMING_RESOURCES } from '../constants/snapshot-states-consuming-resources.constant' import { VOLUME_STATES_CONSUMING_RESOURCES } from '../constants/volume-states-consuming-resources.constant' import { OrganizationUsageOverviewDto, RegionUsageOverviewDto } from '../dto/organization-usage-overview.dto' import { PendingSandboxUsageOverviewInternalDto, SandboxUsageOverviewInternalDto, SandboxUsageOverviewWithPendingInternalDto, } from '../dto/sandbox-usage-overview-internal.dto' import { PendingSnapshotUsageOverviewInternalDto, SnapshotUsageOverviewInternalDto, SnapshotUsageOverviewWithPendingInternalDto, } from '../dto/snapshot-usage-overview-internal.dto' import { PendingVolumeUsageOverviewInternalDto, VolumeUsageOverviewInternalDto, VolumeUsageOverviewWithPendingInternalDto, } from '../dto/volume-usage-overview-internal.dto' import { Organization } from '../entities/organization.entity' import { OrganizationUsageQuotaType, OrganizationUsageResourceType } from '../helpers/organization-usage.helper' import { RedisLockProvider } from '../../sandbox/common/redis-lock.provider' import { SandboxEvents } from '../../sandbox/constants/sandbox-events.constants' import { SnapshotEvents } from '../../sandbox/constants/snapshot-events' import { VolumeEvents } from '../../sandbox/constants/volume-events' import { Snapshot } from '../../sandbox/entities/snapshot.entity' import { Volume } from '../../sandbox/entities/volume.entity' import { SandboxCreatedEvent } from '../../sandbox/events/sandbox-create.event' import { SandboxStateUpdatedEvent } from '../../sandbox/events/sandbox-state-updated.event' import { SnapshotCreatedEvent } from '../../sandbox/events/snapshot-created.event' import { SnapshotStateUpdatedEvent } from '../../sandbox/events/snapshot-state-updated.event' import { VolumeCreatedEvent } from '../../sandbox/events/volume-created.event' import { VolumeStateUpdatedEvent } from '../../sandbox/events/volume-state-updated.event' import { SandboxDesiredState } from '../../sandbox/enums/sandbox-desired-state.enum' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { OrganizationService } from './organization.service' import { SandboxRepository } from '../../sandbox/repositories/sandbox.repository' @Injectable() export class OrganizationUsageService { private readonly logger = new Logger(OrganizationUsageService.name) /** * Time-to-live for cached quota usage values */ private readonly CACHE_TTL_SECONDS = 60 /** * Cache is considered stale if it was last populated from db more than `CACHE_MAX_AGE_MS` ago */ private readonly CACHE_MAX_AGE_MS = 60 * 60 * 1000 constructor( @InjectRedis() private readonly redis: Redis, @InjectRepository(Organization) private readonly organizationRepository: Repository, private readonly sandboxRepository: SandboxRepository, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRepository(Volume) private readonly volumeRepository: Repository, private readonly redisLockProvider: RedisLockProvider, private readonly organizationService: OrganizationService, ) {} /** * Get the current usage overview for all organization quotas. * * @param organizationId * @param organization - Provide the organization entity to avoid fetching it from the database (optional) */ async getUsageOverview(organizationId: string, organization?: Organization): Promise { if (organization && organization.id !== organizationId) { throw new BadRequestException('Organization ID mismatch') } if (!organization) { organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) } if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } const regionQuotas = await this.organizationService.getRegionQuotas(organizationId) const regionUsage: RegionUsageOverviewDto[] = await Promise.all( regionQuotas.map(async (rq) => { const sandboxUsage = await this.getSandboxUsageOverview(organizationId, rq.regionId) const usage: RegionUsageOverviewDto = { regionId: rq.regionId, totalCpuQuota: rq.totalCpuQuota, totalMemoryQuota: rq.totalMemoryQuota, totalDiskQuota: rq.totalDiskQuota, currentCpuUsage: sandboxUsage.currentCpuUsage, currentMemoryUsage: sandboxUsage.currentMemoryUsage, currentDiskUsage: sandboxUsage.currentDiskUsage, } return usage }), ) const snapshotUsage = await this.getSnapshotUsageOverview(organizationId) const volumeUsage = await this.getVolumeUsageOverview(organizationId) return { regionUsage, totalSnapshotQuota: organization.snapshotQuota, totalVolumeQuota: organization.volumeQuota, currentSnapshotUsage: snapshotUsage.currentSnapshotUsage, currentVolumeUsage: volumeUsage.currentVolumeUsage, } } /** * Get the current and pending usage overview for sandbox-related organization quotas in a specific region. * * @param organizationId * @param regionId * @param excludeSandboxId - If provided, the usage overview will exclude the current usage of the sandbox with the given ID */ async getSandboxUsageOverview( organizationId: string, regionId: string, excludeSandboxId?: string, ): Promise { let cachedUsageOverview = await this.getCachedSandboxUsageOverview(organizationId, regionId) // cache hit if (cachedUsageOverview) { if (excludeSandboxId) { return await this.excludeSandboxFromUsageOverview(cachedUsageOverview, excludeSandboxId) } return cachedUsageOverview } // cache miss, wait for lock const lockKey = `org:${organizationId}:fetch-sandbox-usage-from-db` await this.redisLockProvider.waitForLock(lockKey, 60) try { // check if cache was updated while waiting for lock cachedUsageOverview = await this.getCachedSandboxUsageOverview(organizationId, regionId) // cache hit if (cachedUsageOverview) { if (excludeSandboxId) { return await this.excludeSandboxFromUsageOverview(cachedUsageOverview, excludeSandboxId) } return cachedUsageOverview } // cache miss, fetch from db const usageOverview = await this.fetchSandboxUsageFromDb(organizationId, regionId) // get pending usage separately since it's not stored in DB const pendingUsageOverview = await this.getCachedPendingSandboxUsageOverview(organizationId, regionId) const combinedUsageOverview: SandboxUsageOverviewWithPendingInternalDto = { ...usageOverview, ...pendingUsageOverview, } if (excludeSandboxId) { return await this.excludeSandboxFromUsageOverview(combinedUsageOverview, excludeSandboxId) } return combinedUsageOverview } finally { await this.redisLockProvider.unlock(lockKey) } } /** * Get the current and pending usage overview for snapshot-related organization quotas. * * @param organizationId */ async getSnapshotUsageOverview(organizationId: string): Promise { let cachedUsageOverview = await this.getCachedSnapshotUsageOverview(organizationId) // cache hit if (cachedUsageOverview) { return cachedUsageOverview } // cache miss, wait for lock const lockKey = `org:${organizationId}:fetch-snapshot-usage-from-db` await this.redisLockProvider.waitForLock(lockKey, 60) try { // check if cache was updated while waiting for lock cachedUsageOverview = await this.getCachedSnapshotUsageOverview(organizationId) // cache hit if (cachedUsageOverview) { return cachedUsageOverview } // cache miss, fetch from db const usageOverview = await this.fetchSnapshotUsageFromDb(organizationId) // get pending usage separately since it's not stored in DB const pendingUsageOverview = await this.getCachedPendingSnapshotUsageOverview(organizationId) return { ...usageOverview, ...pendingUsageOverview, } } finally { await this.redisLockProvider.unlock(lockKey) } } /** * Get the current and pending usage overview for volume-related organization quotas. * * @param organizationId */ async getVolumeUsageOverview(organizationId: string): Promise { let cachedUsageOverview = await this.getCachedVolumeUsageOverview(organizationId) // cache hit if (cachedUsageOverview) { return cachedUsageOverview } // cache miss, wait for lock const lockKey = `org:${organizationId}:fetch-volume-usage-from-db` await this.redisLockProvider.waitForLock(lockKey, 60) try { // check if cache was updated while waiting for lock cachedUsageOverview = await this.getCachedVolumeUsageOverview(organizationId) // cache hit if (cachedUsageOverview) { return cachedUsageOverview } // cache miss, fetch from db const usageOverview = await this.fetchVolumeUsageFromDb(organizationId) // get pending usage separately since it's not stored in DB const pendingUsageOverview = await this.getCachedPendingVolumeUsageOverview(organizationId) return { ...usageOverview, ...pendingUsageOverview, } } finally { await this.redisLockProvider.unlock(lockKey) } } /** * Exclude the current usage of a specific sandbox from the usage overview. * * @param usageOverview * @param excludeSandboxId */ private async excludeSandboxFromUsageOverview( usageOverview: T, excludeSandboxId: string, ): Promise { const excludedSandbox = await this.sandboxRepository.findOne({ where: { id: excludeSandboxId }, }) if (!excludedSandbox) { return usageOverview } let cpuToSubtract = 0 let memToSubtract = 0 let diskToSubtract = 0 if (SANDBOX_STATES_CONSUMING_COMPUTE.includes(excludedSandbox.state)) { cpuToSubtract = excludedSandbox.cpu memToSubtract = excludedSandbox.mem } if (SANDBOX_STATES_CONSUMING_DISK.includes(excludedSandbox.state)) { diskToSubtract = excludedSandbox.disk } return { ...usageOverview, currentCpuUsage: Math.max(0, usageOverview.currentCpuUsage - cpuToSubtract), currentMemoryUsage: Math.max(0, usageOverview.currentMemoryUsage - memToSubtract), currentDiskUsage: Math.max(0, usageOverview.currentDiskUsage - diskToSubtract), } } /** * Get the cached current and pending usage overview for sandbox-related organization quotas in a specific region. * * @param organizationId * @param regionId */ private async getCachedSandboxUsageOverview( organizationId: string, regionId: string, ): Promise { const script = ` return { redis.call("GET", KEYS[1]), redis.call("GET", KEYS[2]), redis.call("GET", KEYS[3]), redis.call("GET", KEYS[4]), redis.call("GET", KEYS[5]), redis.call("GET", KEYS[6]) } ` const result = (await this.redis.eval( script, 6, this.getCurrentQuotaUsageCacheKey(organizationId, 'cpu', regionId), this.getCurrentQuotaUsageCacheKey(organizationId, 'memory', regionId), this.getCurrentQuotaUsageCacheKey(organizationId, 'disk', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'cpu', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'memory', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'disk', regionId), )) as (string | null)[] const [cpuUsage, memoryUsage, diskUsage, pendingCpuUsage, pendingMemoryUsage, pendingDiskUsage] = result // Cache miss if (cpuUsage === null || memoryUsage === null || diskUsage === null) { return null } // Check cache staleness for current usage const isStale = await this.isCacheStale(organizationId, 'sandbox', regionId) if (isStale) { return null } // Validate current usage values are non-negative numbers const parsedCpuUsage = this.parseNonNegativeCachedValue(cpuUsage) const parsedMemoryUsage = this.parseNonNegativeCachedValue(memoryUsage) const parsedDiskUsage = this.parseNonNegativeCachedValue(diskUsage) if (parsedCpuUsage === null || parsedMemoryUsage === null || parsedDiskUsage === null) { return null } // Parse pending usage values (null is acceptable) const parsedPendingCpuUsage = this.parseNonNegativeCachedValue(pendingCpuUsage) const parsedPendingMemoryUsage = this.parseNonNegativeCachedValue(pendingMemoryUsage) const parsedPendingDiskUsage = this.parseNonNegativeCachedValue(pendingDiskUsage) return { currentCpuUsage: parsedCpuUsage, currentMemoryUsage: parsedMemoryUsage, currentDiskUsage: parsedDiskUsage, pendingCpuUsage: parsedPendingCpuUsage, pendingMemoryUsage: parsedPendingMemoryUsage, pendingDiskUsage: parsedPendingDiskUsage, } } /** * Get the cached pending usage overview for sandbox-related organization quotas in a specific region. * * @param organizationId * @param regionId */ private async getCachedPendingSandboxUsageOverview( organizationId: string, regionId: string, ): Promise { const script = ` return { redis.call("GET", KEYS[1]), redis.call("GET", KEYS[2]), redis.call("GET", KEYS[3]) } ` const result = (await this.redis.eval( script, 3, this.getPendingQuotaUsageCacheKey(organizationId, 'cpu', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'memory', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'disk', regionId), )) as (string | null)[] const [pendingCpuUsage, pendingMemoryUsage, pendingDiskUsage] = result const parsedPendingCpuUsage = this.parseNonNegativeCachedValue(pendingCpuUsage) const parsedPendingMemoryUsage = this.parseNonNegativeCachedValue(pendingMemoryUsage) const parsedPendingDiskUsage = this.parseNonNegativeCachedValue(pendingDiskUsage) return { pendingCpuUsage: parsedPendingCpuUsage, pendingMemoryUsage: parsedPendingMemoryUsage, pendingDiskUsage: parsedPendingDiskUsage, } } /** * Get the cached overview for current and pending usage for snapshot-related organization quotas. * * @param organizationId */ private async getCachedSnapshotUsageOverview( organizationId: string, ): Promise { const script = ` return { redis.call("GET", KEYS[1]), redis.call("GET", KEYS[2]) } ` const result = (await this.redis.eval( script, 2, this.getCurrentQuotaUsageCacheKey(organizationId, 'snapshot_count'), this.getPendingQuotaUsageCacheKey(organizationId, 'snapshot_count'), )) as (string | null)[] const [currentSnapshotUsage, pendingSnapshotUsage] = result // Cache miss if (currentSnapshotUsage === null) { return null } // Check cache staleness for current usage const isStale = await this.isCacheStale(organizationId, 'snapshot') if (isStale) { return null } // Validate current usage values are non-negative numbers const parsedCurrentSnapshotUsage = this.parseNonNegativeCachedValue(currentSnapshotUsage) if (parsedCurrentSnapshotUsage === null) { return null } // Parse pending usage values (null is acceptable) const parsedPendingSnapshotUsage = this.parseNonNegativeCachedValue(pendingSnapshotUsage) return { currentSnapshotUsage: parsedCurrentSnapshotUsage, pendingSnapshotUsage: parsedPendingSnapshotUsage, } } /** * Get the cached pending usage overview for snapshot-related organization quotas. * * @param organizationId */ private async getCachedPendingSnapshotUsageOverview( organizationId: string, ): Promise { const script = ` return { redis.call("GET", KEYS[1]) } ` const result = (await this.redis.eval( script, 1, this.getPendingQuotaUsageCacheKey(organizationId, 'snapshot_count'), )) as (string | null)[] const [pendingSnapshotUsage] = result // Parse pending usage values (null is acceptable) const parsedPendingSnapshotUsage = this.parseNonNegativeCachedValue(pendingSnapshotUsage) return { pendingSnapshotUsage: parsedPendingSnapshotUsage, } } /** * Get the cached overview for current and pending usage for volume-related organization quotas. * * @param organizationId */ private async getCachedVolumeUsageOverview( organizationId: string, ): Promise { const script = ` return { redis.call("GET", KEYS[1]), redis.call("GET", KEYS[2]) } ` const result = (await this.redis.eval( script, 2, this.getCurrentQuotaUsageCacheKey(organizationId, 'volume_count'), this.getPendingQuotaUsageCacheKey(organizationId, 'volume_count'), )) as (string | null)[] const [currentVolumeUsage, pendingVolumeUsage] = result if (currentVolumeUsage === null) { return null } // Check cache staleness for current usage const isStale = await this.isCacheStale(organizationId, 'volume') if (isStale) { return null } // Validate current usage values are non-negative numbers const parsedCurrentVolumeUsage = this.parseNonNegativeCachedValue(currentVolumeUsage) if (parsedCurrentVolumeUsage === null) { return null } // Parse pending usage values (null is acceptable) const parsedPendingVolumeUsage = this.parseNonNegativeCachedValue(pendingVolumeUsage) return { currentVolumeUsage: parsedCurrentVolumeUsage, pendingVolumeUsage: parsedPendingVolumeUsage, } } /** * Get the cached pending usage overview for volume-related organization quotas. * * @param organizationId */ private async getCachedPendingVolumeUsageOverview( organizationId: string, ): Promise { const script = ` return { redis.call("GET", KEYS[1]) } ` const result = (await this.redis.eval( script, 1, this.getPendingQuotaUsageCacheKey(organizationId, 'volume_count'), )) as (string | null)[] const [pendingVolumeUsage] = result // Parse pending usage values (null is acceptable) const parsedPendingVolumeUsage = this.parseNonNegativeCachedValue(pendingVolumeUsage) return { pendingVolumeUsage: parsedPendingVolumeUsage, } } /** * Attempts to parse a given value to a non-negative number. * * @param value - The value to parse. * @returns The parsed non-negative number or `null` if the given value is null or not a non-negative number. */ private parseNonNegativeCachedValue(value: string | null): number | null { if (value === null) { return null } const parsedValue = Number(value) if (isNaN(parsedValue) || parsedValue < 0) { return null } return parsedValue } /** * Fetch the current usage overview for sandbox-related organization quotas in a specific region from the database and cache the results. * * @param organizationId * @param regionId */ async fetchSandboxUsageFromDb(organizationId: string, regionId: string): Promise { // fetch from db // For CPU/memory, we need to also count RESIZING sandboxes that were hot resizing (desiredState = 'started') // since they are still running and consuming compute resources const sandboxUsageMetrics: { used_cpu: number used_mem: number used_disk: number } = await this.sandboxRepository .createQueryBuilder('sandbox') .select([ `SUM(CASE WHEN sandbox.state IN (:...statesConsumingCompute) OR (sandbox.state = :resizingState AND sandbox."desiredState" = :startedDesiredState) THEN sandbox.cpu ELSE 0 END) as used_cpu`, `SUM(CASE WHEN sandbox.state IN (:...statesConsumingCompute) OR (sandbox.state = :resizingState AND sandbox."desiredState" = :startedDesiredState) THEN sandbox.mem ELSE 0 END) as used_mem`, 'SUM(CASE WHEN sandbox.state IN (:...statesConsumingDisk) THEN sandbox.disk ELSE 0 END) as used_disk', ]) .where('sandbox.organizationId = :organizationId', { organizationId }) .andWhere('sandbox.region = :regionId', { regionId }) .setParameter('statesConsumingCompute', SANDBOX_STATES_CONSUMING_COMPUTE) .setParameter('statesConsumingDisk', SANDBOX_STATES_CONSUMING_DISK) .setParameter('resizingState', SandboxState.RESIZING) .setParameter('startedDesiredState', SandboxDesiredState.STARTED) .getRawOne() const cpuUsage = Number(sandboxUsageMetrics.used_cpu) || 0 const memoryUsage = Number(sandboxUsageMetrics.used_mem) || 0 const diskUsage = Number(sandboxUsageMetrics.used_disk) || 0 // cache the results const cpuCacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, 'cpu', regionId) const memoryCacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, 'memory', regionId) const diskCacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, 'disk', regionId) await this.redis .multi() .setex(cpuCacheKey, this.CACHE_TTL_SECONDS, cpuUsage) .setex(memoryCacheKey, this.CACHE_TTL_SECONDS, memoryUsage) .setex(diskCacheKey, this.CACHE_TTL_SECONDS, diskUsage) .exec() await this.resetCacheStaleness(organizationId, 'sandbox', regionId) return { currentCpuUsage: cpuUsage, currentMemoryUsage: memoryUsage, currentDiskUsage: diskUsage, } } /** * Fetch the current usage overview for snapshot-related organization quotas from the database and cache the results. * * @param organizationId */ private async fetchSnapshotUsageFromDb(organizationId: string): Promise { // fetch from db const snapshotUsage = await this.snapshotRepository.count({ where: { organizationId, state: In(SNAPSHOT_STATES_CONSUMING_RESOURCES), }, }) // cache the result const cacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, 'snapshot_count') await this.redis.setex(cacheKey, this.CACHE_TTL_SECONDS, snapshotUsage) await this.resetCacheStaleness(organizationId, 'snapshot') return { currentSnapshotUsage: snapshotUsage, } } /** * Fetch the current usage overview for volume-related organization quotas from the database and cache the results. * * @param organizationId */ private async fetchVolumeUsageFromDb(organizationId: string): Promise { // fetch from db const volumeUsage = await this.volumeRepository.count({ where: { organizationId, state: In(VOLUME_STATES_CONSUMING_RESOURCES), }, }) // cache the result const cacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, 'volume_count') await this.redis.setex(cacheKey, this.CACHE_TTL_SECONDS, volumeUsage) await this.resetCacheStaleness(organizationId, 'volume') return { currentVolumeUsage: volumeUsage, } } /** * Get the cache key for the current usage of a given organization quota. */ private getCurrentQuotaUsageCacheKey( organizationId: string, quotaType: 'cpu' | 'memory' | 'disk', regionId: string, ): string private getCurrentQuotaUsageCacheKey(organizationId: string, quotaType: 'snapshot_count' | 'volume_count'): string private getCurrentQuotaUsageCacheKey( organizationId: string, quotaType: OrganizationUsageQuotaType, regionId?: string, ): string { return `org:${organizationId}${regionId ? `:region:${regionId}` : ''}:quota:${quotaType}:usage` } /** * Get the cache key for the pending usage of a given organization quota. */ private getPendingQuotaUsageCacheKey( organizationId: string, quotaType: 'cpu' | 'memory' | 'disk', regionId: string, ): string private getPendingQuotaUsageCacheKey(organizationId: string, quotaType: 'snapshot_count' | 'volume_count'): string private getPendingQuotaUsageCacheKey( organizationId: string, quotaType: OrganizationUsageQuotaType, regionId?: string, ): string { return `org:${organizationId}${regionId ? `:region:${regionId}` : ''}:quota:${quotaType}:pending` } /** * Updates the current usage of a given organization quota in the cache. If cache is not present, this method is a no-op. * * If the corresponding quota type has pending usage in the cache and the delta is positive, the pending usage is decremented accordingly. */ private async updateCurrentQuotaUsage( organizationId: string, quotaType: 'cpu' | 'memory' | 'disk', delta: number, regionId: string, ): Promise private async updateCurrentQuotaUsage( organizationId: string, quotaType: 'snapshot_count' | 'volume_count', delta: number, ): Promise private async updateCurrentQuotaUsage( organizationId: string, quotaType: OrganizationUsageQuotaType, delta: number, regionId?: string, ): Promise { const script = ` local cacheKey = KEYS[1] local pendingCacheKey = KEYS[2] local delta = tonumber(ARGV[1]) local ttl = tonumber(ARGV[2]) if redis.call("EXISTS", cacheKey) == 1 then redis.call("INCRBY", cacheKey, delta) redis.call("EXPIRE", cacheKey, ttl) end local pending = tonumber(redis.call("GET", pendingCacheKey)) if pending and pending > 0 and delta > 0 then redis.call("DECRBY", pendingCacheKey, delta) end ` let currentQuotaUsageCacheKey = '' let pendingQuotaUsageCacheKey = '' switch (quotaType) { case 'cpu': case 'memory': case 'disk': currentQuotaUsageCacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, quotaType, regionId) pendingQuotaUsageCacheKey = this.getPendingQuotaUsageCacheKey(organizationId, quotaType, regionId) break case 'snapshot_count': case 'volume_count': currentQuotaUsageCacheKey = this.getCurrentQuotaUsageCacheKey(organizationId, quotaType) pendingQuotaUsageCacheKey = this.getPendingQuotaUsageCacheKey(organizationId, quotaType) break } await this.redis.eval( script, 2, currentQuotaUsageCacheKey, pendingQuotaUsageCacheKey, delta.toString(), this.CACHE_TTL_SECONDS.toString(), ) } /** * Increments the pending usage for sandbox-related organization quotas in a specific region. * * Pending usage is used to protect against race conditions to prevent quota abuse. * * If a user action will result in increased quota usage, we will first increment the pending usage. * * When the user action is complete, this pending usage will be transfered to the actual usage. * * As a safeguard, an expiration time is set on the pending usage cache to prevent lockout for new operations. * * @param organizationId * @param regionId * @param cpu - The amount of CPU to increment. * @param memory - The amount of memory to increment. * @param disk - The amount of disk to increment. * @param excludeSandboxId - If provided, pending usage will be incremented only for quotas that are not consumed by the sandbox in its current state. * @returns an object with the boolean values indicating if the pending usage was incremented for each quota type */ async incrementPendingSandboxUsage( organizationId: string, regionId: string, cpu: number, memory: number, disk: number, excludeSandboxId?: string, ): Promise<{ cpuIncremented: boolean memoryIncremented: boolean diskIncremented: boolean }> { // determine for which quota types we should increment the pending usage let shouldIncrementCpu = true let shouldIncrementMemory = true let shouldIncrementDisk = true if (excludeSandboxId) { const excludedSandbox = await this.sandboxRepository.findOne({ where: { id: excludeSandboxId }, }) if (excludedSandbox) { if (SANDBOX_STATES_CONSUMING_COMPUTE.includes(excludedSandbox.state)) { shouldIncrementCpu = false shouldIncrementMemory = false } if (SANDBOX_STATES_CONSUMING_DISK.includes(excludedSandbox.state)) { shouldIncrementDisk = false } } } // increment the pending usage for necessary quota types const script = ` local cpuKey = KEYS[1] local memoryKey = KEYS[2] local diskKey = KEYS[3] local shouldIncrementCpu = ARGV[1] == "true" local shouldIncrementMemory = ARGV[2] == "true" local shouldIncrementDisk = ARGV[3] == "true" local cpuIncrement = tonumber(ARGV[4]) local memoryIncrement = tonumber(ARGV[5]) local diskIncrement = tonumber(ARGV[6]) local ttl = tonumber(ARGV[7]) if shouldIncrementCpu then redis.call("INCRBY", cpuKey, cpuIncrement) redis.call("EXPIRE", cpuKey, ttl) end if shouldIncrementMemory then redis.call("INCRBY", memoryKey, memoryIncrement) redis.call("EXPIRE", memoryKey, ttl) end if shouldIncrementDisk then redis.call("INCRBY", diskKey, diskIncrement) redis.call("EXPIRE", diskKey, ttl) end ` await this.redis.eval( script, 3, this.getPendingQuotaUsageCacheKey(organizationId, 'cpu', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'memory', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'disk', regionId), shouldIncrementCpu.toString(), shouldIncrementMemory.toString(), shouldIncrementDisk.toString(), cpu.toString(), memory.toString(), disk.toString(), this.CACHE_TTL_SECONDS.toString(), ) return { cpuIncremented: shouldIncrementCpu, memoryIncremented: shouldIncrementMemory, diskIncremented: shouldIncrementDisk, } } /** * Decrements the pending usage for sandbox-related organization quotas in a specific region. * * Use this method to roll back pending usage after incrementing it for an action that was subsequently rejected. * * Pending usage is used to protect against race conditions to prevent quota abuse. * * If a user action will result in increased quota usage, we will first increment the pending usage. * * When the user action is complete, this pending usage will be transfered to the actual usage. * * @param organizationId * @param regionId * @param cpu - If provided, the amount of CPU to decrement. * @param memory - If provided, the amount of memory to decrement. * @param disk - If provided, the amount of disk to decrement. */ async decrementPendingSandboxUsage( organizationId: string, regionId: string, cpu?: number, memory?: number, disk?: number, ): Promise { // decrement the pending usage for necessary quota types const script = ` local cpuKey = KEYS[1] local memoryKey = KEYS[2] local diskKey = KEYS[3] local cpuDecrement = tonumber(ARGV[1]) local memoryDecrement = tonumber(ARGV[2]) local diskDecrement = tonumber(ARGV[3]) if cpuDecrement then redis.call("DECRBY", cpuKey, cpuDecrement) end if memoryDecrement then redis.call("DECRBY", memoryKey, memoryDecrement) end if diskDecrement then redis.call("DECRBY", diskKey, diskDecrement) end ` await this.redis.eval( script, 3, this.getPendingQuotaUsageCacheKey(organizationId, 'cpu', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'memory', regionId), this.getPendingQuotaUsageCacheKey(organizationId, 'disk', regionId), cpu?.toString() ?? '0', memory?.toString() ?? '0', disk?.toString() ?? '0', ) } /** * Increments the pending usage for snapshot-related organization quotas. * * Pending usage is used to protect against race conditions to prevent quota abuse. * * If a user action will result in increased quota usage, we will first increment the pending usage. * * When the user action is complete, this pending usage will be transfered to the actual usage. * * As a safeguard, an expiration time is set on the pending usage cache to prevent lockout for new operations. * * @param organizationId * @param snapshotCount - The count of snapshots to increment. */ async incrementPendingSnapshotUsage(organizationId: string, snapshotCount: number): Promise { const script = ` local snapshotCountKey = KEYS[1] local snapshotCountIncrement = tonumber(ARGV[1]) local ttl = tonumber(ARGV[2]) redis.call("INCRBY", snapshotCountKey, snapshotCountIncrement) redis.call("EXPIRE", snapshotCountKey, ttl) ` await this.redis.eval( script, 1, this.getPendingQuotaUsageCacheKey(organizationId, 'snapshot_count'), snapshotCount.toString(), this.CACHE_TTL_SECONDS.toString(), ) } /** * Decrements the pending usage for snapshot-related organization quotas. * * Use this method to roll back pending usage after incrementing it for an action that was subsequently rejected. * * Pending usage is used to protect against race conditions to prevent quota abuse. * * If a user action will result in increased quota usage, we will first increment the pending usage. * * When the user action is complete, this pending usage will be transfered to the actual usage. * * @param organizationId * @param snapshotCount - If provided, the count of snapshots to decrement. */ async decrementPendingSnapshotUsage(organizationId: string, snapshotCount?: number): Promise { // decrement the pending usage for necessary quota types const script = ` local snapshotCountKey = KEYS[1] local snapshotCountDecrement = tonumber(ARGV[1]) if snapshotCountDecrement then redis.call("DECRBY", snapshotCountKey, snapshotCountDecrement) end ` await this.redis.eval( script, 1, this.getPendingQuotaUsageCacheKey(organizationId, 'snapshot_count'), snapshotCount?.toString() ?? '0', ) } /** * Increments the pending usage for volume-related organization quotas. * * Pending usage is used to protect against race conditions to prevent quota abuse. * * If a user action will result in increased quota usage, we will first increment the pending usage. * * When the user action is complete, this pending usage will be transfered to the actual usage. * * As a safeguard, an expiration time is set on the pending usage cache to prevent lockout for new operations. * * @param organizationId * @param volumeCount - The count of volumes to increment. */ async incrementPendingVolumeUsage(organizationId: string, volumeCount: number): Promise { const script = ` local volumeCountKey = KEYS[1] local volumeCountIncrement = tonumber(ARGV[1]) local ttl = tonumber(ARGV[2]) redis.call("INCRBY", volumeCountKey, volumeCountIncrement) redis.call("EXPIRE", volumeCountKey, ttl) ` await this.redis.eval( script, 1, this.getPendingQuotaUsageCacheKey(organizationId, 'volume_count'), volumeCount.toString(), this.CACHE_TTL_SECONDS.toString(), ) } /** * Decrements the pending usage for volume-related organization quotas. * * Use this method to roll back pending usage after incrementing it for an action that was subsequently rejected. * * Pending usage is used to protect against race conditions to prevent quota abuse. * * If a user action will result in increased quota usage, we will first increment the pending usage. * * When the user action is complete, this pending usage will be transfered to the actual usage. * * @param organizationId * @param volumeCount - If provided, the count of volumes to decrement. */ async decrementPendingVolumeUsage(organizationId: string, volumeCount?: number): Promise { // decrement the pending usage for necessary quota types const script = ` local volumeCountKey = KEYS[1] local volumeCountDecrement = tonumber(ARGV[1]) if volumeCountDecrement then redis.call("DECRBY", volumeCountKey, volumeCountDecrement) end ` await this.redis.eval( script, 1, this.getPendingQuotaUsageCacheKey(organizationId, 'volume_count'), volumeCount?.toString() ?? '0', ) } /** * Apply usage change after resize completes successfully. * Updates current usage and clears pending by the deltas. * Supports both positive (increase) and negative (decrease) deltas. * @param organizationId * @param regionId * @param cpuDelta * @param memDelta * @param diskDelta */ async applyResizeUsageChange( organizationId: string, regionId: string, cpuDelta: number, memDelta: number, diskDelta: number, ): Promise { if (cpuDelta !== 0) { await this.updateCurrentQuotaUsage(organizationId, 'cpu', cpuDelta, regionId) } if (memDelta !== 0) { await this.updateCurrentQuotaUsage(organizationId, 'memory', memDelta, regionId) } if (diskDelta !== 0) { await this.updateCurrentQuotaUsage(organizationId, 'disk', diskDelta, regionId) } } /** * Get the cache key for the timestamp of the last time the cached usage of organization quotas for a given resource type was populated from the database. * * @param organizationId * @param resourceType * @param regionId */ private getCacheStalenessKey( organizationId: string, resourceType: OrganizationUsageResourceType, regionId?: string, ): string { return `org:${organizationId}${regionId ? `:region:${regionId}` : ''}:resource:${resourceType}:usage:fetched_at` } /** * Reset the timestamp of the last time the cached usage of organization quotas for a given resource type was populated from the database. */ private resetCacheStaleness(organizationId: string, resourceType: 'sandbox', regionId: string): Promise private resetCacheStaleness(organizationId: string, resourceType: 'snapshot' | 'volume'): Promise private async resetCacheStaleness( organizationId: string, resourceType: OrganizationUsageResourceType, regionId?: string, ): Promise { const cacheKey = this.getCacheStalenessKey(organizationId, resourceType, regionId) await this.redis.set(cacheKey, Date.now()) } /** * Check if the cached usage of organization quotas for a given resource type was last populated from the database more than CACHE_MAX_AGE_MS ago. * * @returns `true` if the cached usage is stale, `false` otherwise */ private async isCacheStale(organizationId: string, resourceType: 'sandbox', regionId: string): Promise private async isCacheStale(organizationId: string, resourceType: 'snapshot' | 'volume'): Promise private async isCacheStale( organizationId: string, resourceType: OrganizationUsageResourceType, regionId?: string, ): Promise { const cacheKey = this.getCacheStalenessKey(organizationId, resourceType, regionId) const cachedData = await this.redis.get(cacheKey) if (!cachedData) { return true } const lastFetchedAtTimestamp = Number(cachedData) if (isNaN(lastFetchedAtTimestamp)) { return true } return Date.now() - lastFetchedAtTimestamp > this.CACHE_MAX_AGE_MS } @OnEvent(SandboxEvents.CREATED) async handleSandboxCreated(event: SandboxCreatedEvent) { const lockKey = `sandbox:${event.sandbox.id}:quota-usage-update` await this.redisLockProvider.waitForLock(lockKey, 60) try { await this.updateCurrentQuotaUsage(event.sandbox.organizationId, 'cpu', event.sandbox.cpu, event.sandbox.region) await this.updateCurrentQuotaUsage( event.sandbox.organizationId, 'memory', event.sandbox.mem, event.sandbox.region, ) await this.updateCurrentQuotaUsage(event.sandbox.organizationId, 'disk', event.sandbox.disk, event.sandbox.region) } catch (error) { this.logger.warn( `Error updating cached sandbox quota usage for organization ${event.sandbox.organizationId} in region ${event.sandbox.region}`, error, ) } finally { await this.redisLockProvider.unlock(lockKey) } } @OnEvent(SandboxEvents.STATE_UPDATED) async handleSandboxStateUpdated(event: SandboxStateUpdatedEvent) { if (event.oldState === SandboxState.RESIZING || event.newState === SandboxState.RESIZING) { return } const lockKey = `sandbox:${event.sandbox.id}:quota-usage-update` await this.redisLockProvider.waitForLock(lockKey, 60) // Special case for warm pool sandboxes (otherwise the quota usage deltas would be 0 due to the "unchanged" state) if (event.oldState === event.newState && event.newState === SandboxState.STARTED) { try { await this.updateCurrentQuotaUsage(event.sandbox.organizationId, 'cpu', event.sandbox.cpu, event.sandbox.region) await this.updateCurrentQuotaUsage( event.sandbox.organizationId, 'memory', event.sandbox.mem, event.sandbox.region, ) await this.updateCurrentQuotaUsage( event.sandbox.organizationId, 'disk', event.sandbox.disk, event.sandbox.region, ) return } catch (error) { this.logger.warn( `Error updating cached sandbox quota usage for organization ${event.sandbox.organizationId} in region ${event.sandbox.region}`, error, ) return } finally { await this.redisLockProvider.unlock(lockKey) } } try { const cpuDelta = this.calculateQuotaUsageDelta( event.sandbox.cpu, event.oldState, event.newState, SANDBOX_STATES_CONSUMING_COMPUTE, ) const memDelta = this.calculateQuotaUsageDelta( event.sandbox.mem, event.oldState, event.newState, SANDBOX_STATES_CONSUMING_COMPUTE, ) const diskDelta = this.calculateQuotaUsageDelta( event.sandbox.disk, event.oldState, event.newState, SANDBOX_STATES_CONSUMING_DISK, ) if (cpuDelta !== 0) { await this.updateCurrentQuotaUsage(event.sandbox.organizationId, 'cpu', cpuDelta, event.sandbox.region) } if (memDelta !== 0) { await this.updateCurrentQuotaUsage(event.sandbox.organizationId, 'memory', memDelta, event.sandbox.region) } if (diskDelta !== 0) { await this.updateCurrentQuotaUsage(event.sandbox.organizationId, 'disk', diskDelta, event.sandbox.region) } } catch (error) { this.logger.warn( `Error updating cached sandbox quota usage for organization ${event.sandbox.organizationId} in region ${event.sandbox.region}`, error, ) } finally { await this.redisLockProvider.unlock(lockKey) } } @OnEvent(SnapshotEvents.CREATED) async handleSnapshotCreated(event: SnapshotCreatedEvent) { const lockKey = `snapshot:${event.snapshot.id}:quota-usage-update` await this.redisLockProvider.waitForLock(lockKey, 60) try { await this.updateCurrentQuotaUsage(event.snapshot.organizationId, 'snapshot_count', 1) } catch (error) { this.logger.warn( `Error updating cached snapshot quota usage for organization ${event.snapshot.organizationId}`, error, ) } finally { await this.redisLockProvider.unlock(lockKey) } } @OnEvent(SnapshotEvents.STATE_UPDATED) async handleSnapshotStateUpdated(event: SnapshotStateUpdatedEvent) { const lockKey = `snapshot:${event.snapshot.id}:quota-usage-update` await this.redisLockProvider.waitForLock(lockKey, 60) try { const countDelta = this.calculateQuotaUsageDelta( 1, event.oldState, event.newState, SNAPSHOT_STATES_CONSUMING_RESOURCES, ) if (countDelta !== 0) { await this.updateCurrentQuotaUsage(event.snapshot.organizationId, 'snapshot_count', countDelta) } } catch (error) { this.logger.warn( `Error updating cached snapshot quota usage for organization ${event.snapshot.organizationId}`, error, ) } finally { await this.redisLockProvider.unlock(lockKey) } } @OnEvent(VolumeEvents.CREATED) async handleVolumeCreated(event: VolumeCreatedEvent) { const lockKey = `volume:${event.volume.id}:quota-usage-update` await this.redisLockProvider.waitForLock(lockKey, 60) try { await this.updateCurrentQuotaUsage(event.volume.organizationId, 'volume_count', 1) } catch (error) { this.logger.warn( `Error updating cached volume quota usage for organization ${event.volume.organizationId}`, error, ) } finally { await this.redisLockProvider.unlock(lockKey) } } @OnEvent(VolumeEvents.STATE_UPDATED) async handleVolumeStateUpdated(event: VolumeStateUpdatedEvent) { const lockKey = `volume:${event.volume.id}:quota-usage-update` await this.redisLockProvider.waitForLock(lockKey, 60) try { const countDelta = this.calculateQuotaUsageDelta( 1, event.oldState, event.newState, VOLUME_STATES_CONSUMING_RESOURCES, ) if (countDelta !== 0) { await this.updateCurrentQuotaUsage(event.volume.organizationId, 'volume_count', countDelta) } } catch (error) { this.logger.warn( `Error updating cached volume quota usage for organization ${event.volume.organizationId}`, error, ) } finally { await this.redisLockProvider.unlock(lockKey) } } private calculateQuotaUsageDelta( resourceAmount: number, oldState: TState, newState: TState, statesConsumingResource: TState[], ): number { const wasConsumingResource = statesConsumingResource.includes(oldState) const isConsumingResource = statesConsumingResource.includes(newState) if (!wasConsumingResource && isConsumingResource) { return resourceAmount } if (wasConsumingResource && !isConsumingResource) { return -resourceAmount } return 0 } } ================================================ FILE: apps/api/src/organization/services/organization-user.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, ForbiddenException, Injectable, NotFoundException } from '@nestjs/common' import { EventEmitter2 } from '@nestjs/event-emitter' import { InjectRepository } from '@nestjs/typeorm' import { DataSource, EntityManager, Repository } from 'typeorm' import { OrganizationRoleService } from './organization-role.service' import { OrganizationEvents } from '../constants/organization-events.constant' import { OrganizationUserDto } from '../dto/organization-user.dto' import { OrganizationUser } from '../entities/organization-user.entity' import { OrganizationRole } from '../entities/organization-role.entity' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OrganizationResourcePermission } from '../enums/organization-resource-permission.enum' import { OrganizationInvitationAcceptedEvent } from '../events/organization-invitation-accepted.event' import { OrganizationResourcePermissionsUnassignedEvent } from '../events/organization-resource-permissions-unassigned.event' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { UserService } from '../../user/user.service' import { UserEvents } from '../../user/constants/user-events.constant' import { UserDeletedEvent } from '../../user/events/user-deleted.event' @Injectable() export class OrganizationUserService { constructor( @InjectRepository(OrganizationUser) private readonly organizationUserRepository: Repository, private readonly organizationRoleService: OrganizationRoleService, private readonly userService: UserService, private readonly eventEmitter: EventEmitter2, private readonly dataSource: DataSource, ) {} async findAll(organizationId: string): Promise { const organizationUsers = await this.organizationUserRepository.find({ where: { organizationId }, relations: { assignedRoles: true, }, }) const userIds = organizationUsers.map((orgUser) => orgUser.userId) const users = await this.userService.findByIds(userIds) const userMap = new Map(users.map((user) => [user.id, user])) const dtos: OrganizationUserDto[] = organizationUsers.map((orgUser) => { const user = userMap.get(orgUser.userId) return OrganizationUserDto.fromEntities(orgUser, user) }) return dtos } async findOne(organizationId: string, userId: string): Promise { return this.organizationUserRepository.findOne({ where: { organizationId, userId }, relations: { assignedRoles: true, }, }) } async exists(organizationId: string, userId: string): Promise { return this.organizationUserRepository.exists({ where: { organizationId, userId }, }) } async updateAccess( organizationId: string, userId: string, role: OrganizationMemberRole, assignedRoleIds: string[], ): Promise { let organizationUser = await this.organizationUserRepository.findOne({ where: { organizationId, userId, }, relations: { assignedRoles: true, }, }) if (!organizationUser) { throw new NotFoundException(`User with ID ${userId} not found in organization with ID ${organizationId}`) } // validate role if (organizationUser.role === OrganizationMemberRole.OWNER && role !== OrganizationMemberRole.OWNER) { const ownersCount = await this.organizationUserRepository.count({ where: { organizationId, role: OrganizationMemberRole.OWNER, }, }) if (ownersCount === 1) { throw new ForbiddenException('The organization must have at least one owner') } } // validate assignments const assignedRoles = await this.organizationRoleService.findByIds(assignedRoleIds) if (assignedRoles.length !== assignedRoleIds.length) { throw new BadRequestException('One or more role IDs are invalid') } // check if any previous permissions are not present in the new assignments, api keys with those permissions will be revoked let permissionsToRevoke: OrganizationResourcePermission[] = [] if (role !== OrganizationMemberRole.OWNER) { const prevPermissions = this.getAssignedPermissions(organizationUser.role, organizationUser.assignedRoles) const newPermissions = this.getAssignedPermissions(role, assignedRoles) permissionsToRevoke = Array.from(prevPermissions).filter((permission) => !newPermissions.has(permission)) } organizationUser.role = role organizationUser.assignedRoles = assignedRoles if (permissionsToRevoke.length > 0) { await this.dataSource.transaction(async (em) => { organizationUser = await em.save(organizationUser) await this.eventEmitter.emitAsync( OrganizationEvents.PERMISSIONS_UNASSIGNED, new OrganizationResourcePermissionsUnassignedEvent(em, organizationId, userId, permissionsToRevoke), ) }) } else { organizationUser = await this.organizationUserRepository.save(organizationUser) } const user = await this.userService.findOne(userId) return OrganizationUserDto.fromEntities(organizationUser, user) } async delete(organizationId: string, userId: string): Promise { const organizationUser = await this.organizationUserRepository.findOne({ where: { organizationId, userId, }, }) if (!organizationUser) { throw new NotFoundException(`User with ID ${userId} not found in organization with ID ${organizationId}`) } await this.removeWithEntityManager(this.organizationUserRepository.manager, organizationUser) } private async removeWithEntityManager( entityManager: EntityManager, organizationUser: OrganizationUser, force = false, ): Promise { if (!force) { if (organizationUser.role === OrganizationMemberRole.OWNER) { const ownersCount = await entityManager.count(OrganizationUser, { where: { organizationId: organizationUser.organizationId, role: OrganizationMemberRole.OWNER, }, }) if (ownersCount === 1) { throw new ForbiddenException( `Organization with ID ${organizationUser.organizationId} must have at least one owner`, ) } } } await entityManager.remove(organizationUser) } private async createWithEntityManager( entityManager: EntityManager, organizationId: string, userId: string, role: OrganizationMemberRole, assignedRoles: OrganizationRole[], ): Promise { const organizationUser = new OrganizationUser() organizationUser.organizationId = organizationId organizationUser.userId = userId organizationUser.role = role organizationUser.assignedRoles = assignedRoles return entityManager.save(organizationUser) } @OnAsyncEvent({ event: OrganizationEvents.INVITATION_ACCEPTED, }) async handleOrganizationInvitationAcceptedEvent( payload: OrganizationInvitationAcceptedEvent, ): Promise { return this.createWithEntityManager( payload.entityManager, payload.organizationId, payload.userId, payload.role, payload.assignedRoles, ) } @OnAsyncEvent({ event: UserEvents.DELETED, }) async handleUserDeletedEvent(payload: UserDeletedEvent): Promise { const memberships = await payload.entityManager.find(OrganizationUser, { where: { userId: payload.userId, organization: { personal: false, }, }, relations: { organization: true, }, }) /* // TODO // user deletion will fail if the user is the only owner of some non-personal organization // potential improvements: // - auto-delete the organization if there are no other members // - auto-promote a new owner if there are other members */ await Promise.all(memberships.map((membership) => this.removeWithEntityManager(payload.entityManager, membership))) } private getAssignedPermissions( role: OrganizationMemberRole, assignedRoles: OrganizationRole[], ): Set { if (role === OrganizationMemberRole.OWNER) { return new Set(Object.values(OrganizationResourcePermission)) } return new Set(assignedRoles.flatMap((role) => role.permissions)) } } ================================================ FILE: apps/api/src/organization/services/organization.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ForbiddenException, Injectable, NotFoundException, Logger, OnModuleInit, OnApplicationShutdown, ConflictException, } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { EntityManager, In, Not, Repository } from 'typeorm' import { CreateOrganizationInternalDto } from '../dto/create-organization.internal.dto' import { UpdateOrganizationQuotaDto } from '../dto/update-organization-quota.dto' import { Organization } from '../entities/organization.entity' import { OrganizationUser } from '../entities/organization-user.entity' import { OrganizationMemberRole } from '../enums/organization-member-role.enum' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { UserEvents } from '../../user/constants/user-events.constant' import { UserCreatedEvent } from '../../user/events/user-created.event' import { UserDeletedEvent } from '../../user/events/user-deleted.event' import { Snapshot } from '../../sandbox/entities/snapshot.entity' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { EventEmitter2 } from '@nestjs/event-emitter' import { OrganizationEvents } from '../constants/organization-events.constant' import { CreateOrganizationQuotaDto } from '../dto/create-organization-quota.dto' import { UserEmailVerifiedEvent } from '../../user/events/user-email-verified.event' import { Cron, CronExpression } from '@nestjs/schedule' import { RedisLockProvider } from '../../sandbox/common/redis-lock.provider' import { OrganizationSuspendedSandboxStoppedEvent } from '../events/organization-suspended-sandbox-stopped.event' import { SandboxDesiredState } from '../../sandbox/enums/sandbox-desired-state.enum' import { SystemRole } from '../../user/enums/system-role.enum' import { SnapshotState } from '../../sandbox/enums/snapshot-state.enum' import { OrganizationSuspendedSnapshotDeactivatedEvent } from '../events/organization-suspended-snapshot-deactivated.event' import { TrackJobExecution } from '../../common/decorators/track-job-execution.decorator' import { TrackableJobExecutions } from '../../common/interfaces/trackable-job-executions' import { setTimeout } from 'timers/promises' import { TypedConfigService } from '../../config/typed-config.service' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { RegionQuota } from '../entities/region-quota.entity' import { UpdateOrganizationRegionQuotaDto } from '../dto/update-organization-region-quota.dto' import { RegionService } from '../../region/services/region.service' import { Region } from '../../region/entities/region.entity' import { RegionQuotaDto } from '../dto/region-quota.dto' import { RegionType } from '../../region/enums/region-type.enum' import { RegionDto } from '../../region/dto/region.dto' import { EncryptionService } from '../../encryption/encryption.service' import { OtelConfigDto } from '../dto/otel-config.dto' import { sandboxLookupCacheKeyByAuthToken } from '../../sandbox/utils/sandbox-lookup-cache.util' import { SandboxRepository } from '../../sandbox/repositories/sandbox.repository' @Injectable() export class OrganizationService implements OnModuleInit, TrackableJobExecutions, OnApplicationShutdown { activeJobs = new Set() private readonly logger = new Logger(OrganizationService.name) private defaultOrganizationQuota: CreateOrganizationQuotaDto private defaultSandboxLimitedNetworkEgress: boolean constructor( @InjectRepository(Organization) private readonly organizationRepository: Repository, private readonly sandboxRepository: SandboxRepository, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, private readonly eventEmitter: EventEmitter2, private readonly configService: TypedConfigService, private readonly redisLockProvider: RedisLockProvider, @InjectRepository(RegionQuota) private readonly regionQuotaRepository: Repository, @InjectRepository(Region) private readonly regionRepository: Repository, private readonly regionService: RegionService, private readonly encryptionService: EncryptionService, ) { this.defaultOrganizationQuota = this.configService.getOrThrow('defaultOrganizationQuota') this.defaultSandboxLimitedNetworkEgress = this.configService.getOrThrow( 'organizationSandboxDefaultLimitedNetworkEgress', ) } async onApplicationShutdown() { // wait for all active jobs to finish while (this.activeJobs.size > 0) { this.logger.log(`Waiting for ${this.activeJobs.size} active jobs to finish`) await setTimeout(1000) } } async onModuleInit(): Promise { await this.stopSuspendedOrganizationSandboxes() } async create( createOrganizationDto: CreateOrganizationInternalDto, createdBy: string, personal = false, creatorEmailVerified = false, ): Promise { return this.createWithEntityManager( this.organizationRepository.manager, createOrganizationDto, createdBy, creatorEmailVerified, personal, ) } async findByUser(userId: string): Promise { return this.organizationRepository.find({ where: { users: { userId, }, }, }) } async findOne(organizationId: string): Promise { return this.organizationRepository.findOne({ where: { id: organizationId }, }) } async findBySandboxId(sandboxId: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId }, }) if (!sandbox) { return null } return this.organizationRepository.findOne({ where: { id: sandbox.organizationId } }) } async findBySandboxAuthToken(authToken: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { authToken }, cache: { id: sandboxLookupCacheKeyByAuthToken({ authToken }), milliseconds: 10_000, }, }) if (!sandbox) { return null } return this.organizationRepository.findOne({ where: { id: sandbox.organizationId } }) } async findPersonal(userId: string): Promise { return this.findPersonalWithEntityManager(this.organizationRepository.manager, userId) } async delete(organizationId: string): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } return this.removeWithEntityManager(this.organizationRepository.manager, organization) } async updateQuota(organizationId: string, updateDto: UpdateOrganizationQuotaDto): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } organization.maxCpuPerSandbox = updateDto.maxCpuPerSandbox ?? organization.maxCpuPerSandbox organization.maxMemoryPerSandbox = updateDto.maxMemoryPerSandbox ?? organization.maxMemoryPerSandbox organization.maxDiskPerSandbox = updateDto.maxDiskPerSandbox ?? organization.maxDiskPerSandbox organization.maxSnapshotSize = updateDto.maxSnapshotSize ?? organization.maxSnapshotSize organization.volumeQuota = updateDto.volumeQuota ?? organization.volumeQuota organization.snapshotQuota = updateDto.snapshotQuota ?? organization.snapshotQuota organization.authenticatedRateLimit = updateDto.authenticatedRateLimit ?? organization.authenticatedRateLimit organization.sandboxCreateRateLimit = updateDto.sandboxCreateRateLimit ?? organization.sandboxCreateRateLimit organization.sandboxLifecycleRateLimit = updateDto.sandboxLifecycleRateLimit ?? organization.sandboxLifecycleRateLimit organization.authenticatedRateLimitTtlSeconds = updateDto.authenticatedRateLimitTtlSeconds ?? organization.authenticatedRateLimitTtlSeconds organization.sandboxCreateRateLimitTtlSeconds = updateDto.sandboxCreateRateLimitTtlSeconds ?? organization.sandboxCreateRateLimitTtlSeconds organization.sandboxLifecycleRateLimitTtlSeconds = updateDto.sandboxLifecycleRateLimitTtlSeconds ?? organization.sandboxLifecycleRateLimitTtlSeconds organization.snapshotDeactivationTimeoutMinutes = updateDto.snapshotDeactivationTimeoutMinutes ?? organization.snapshotDeactivationTimeoutMinutes await this.organizationRepository.save(organization) } async updateRegionQuota( organizationId: string, regionId: string, updateDto: UpdateOrganizationRegionQuotaDto, ): Promise { const regionQuota = await this.regionQuotaRepository.findOne({ where: { organizationId, regionId } }) if (!regionQuota) { throw new NotFoundException('Region not found') } regionQuota.totalCpuQuota = updateDto.totalCpuQuota ?? regionQuota.totalCpuQuota regionQuota.totalMemoryQuota = updateDto.totalMemoryQuota ?? regionQuota.totalMemoryQuota regionQuota.totalDiskQuota = updateDto.totalDiskQuota ?? regionQuota.totalDiskQuota await this.regionQuotaRepository.save(regionQuota) } async getRegionQuotas(organizationId: string): Promise { const regionQuotas = await this.regionQuotaRepository.find({ where: { organizationId } }) return regionQuotas.map((regionQuota) => new RegionQuotaDto(regionQuota)) } async getRegionQuota(organizationId: string, regionId: string): Promise { const regionQuota = await this.regionQuotaRepository.findOne({ where: { organizationId, regionId } }) if (!regionQuota) { return null } return new RegionQuotaDto(regionQuota) } async getRegionQuotaBySandboxId(sandboxId: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId }, }) if (!sandbox) { return null } return this.getRegionQuota(sandbox.organizationId, sandbox.region) } /** * Lists all available regions for the organization. * * A region is available for the organization if either: * - It is directly associated with the organization, or * - It is not associated with any organization, but the organization has quotas allocated for the region or quotas are not enforced for the region * * @param organizationId - The organization ID. * @returns The available regions */ async listAvailableRegions(organizationId: string): Promise { const regions = await this.regionRepository .createQueryBuilder('region') .where('region."regionType" = :customRegionType AND region."organizationId" = :organizationId', { customRegionType: RegionType.CUSTOM, organizationId, }) .orWhere('region."regionType" IN (:...otherRegionTypes) AND region."enforceQuotas" = false', { otherRegionTypes: [RegionType.DEDICATED, RegionType.SHARED], }) .orWhere( 'region."regionType" IN (:...otherRegionTypes) AND region."enforceQuotas" = true AND EXISTS (SELECT 1 FROM region_quota rq WHERE rq."regionId" = region."id" AND rq."organizationId" = :organizationId)', { otherRegionTypes: [RegionType.DEDICATED, RegionType.SHARED], organizationId, }, ) .orderBy( `CASE region."regionType" WHEN '${RegionType.CUSTOM}' THEN 1 WHEN '${RegionType.DEDICATED}' THEN 2 WHEN '${RegionType.SHARED}' THEN 3 ELSE 4 END`, ) .getMany() return regions.map(RegionDto.fromRegion) } async suspend( organizationId: string, suspensionReason?: string, suspendedUntil?: Date, suspensionCleanupGracePeriodHours?: number, ): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } organization.suspended = true organization.suspensionReason = suspensionReason || null organization.suspendedUntil = suspendedUntil || null organization.suspendedAt = new Date() if (suspensionCleanupGracePeriodHours) { organization.suspensionCleanupGracePeriodHours = suspensionCleanupGracePeriodHours } await this.organizationRepository.save(organization) } async unsuspend(organizationId: string): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } organization.suspended = false organization.suspensionReason = null organization.suspendedUntil = null organization.suspendedAt = null await this.organizationRepository.save(organization) } async updateSandboxDefaultLimitedNetworkEgress( organizationId: string, sandboxDefaultLimitedNetworkEgress: boolean, ): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } organization.sandboxLimitedNetworkEgress = sandboxDefaultLimitedNetworkEgress await this.organizationRepository.save(organization) } /** * @param organizationId - The ID of the organization. * @param defaultRegionId - The ID of the region to set as the default region. * @throws {NotFoundException} If the organization is not found. * @throws {ConflictException} If the organization already has a default region set. */ async setDefaultRegion(organizationId: string, defaultRegionId: string): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } if (organization.defaultRegionId) { throw new ConflictException('Organization already has a default region set') } const defaultRegion = await this.validateOrganizationDefaultRegion(defaultRegionId) organization.defaultRegionId = defaultRegionId if (defaultRegion.enforceQuotas) { const regionQuota = new RegionQuota( organization.id, defaultRegionId, this.defaultOrganizationQuota.totalCpuQuota, this.defaultOrganizationQuota.totalMemoryQuota, this.defaultOrganizationQuota.totalDiskQuota, ) if (organization.regionQuotas) { organization.regionQuotas = [...organization.regionQuotas, regionQuota] } else { organization.regionQuotas = [regionQuota] } } await this.organizationRepository.save(organization) } async updateExperimentalConfig( organizationId: string, experimentalConfig: Record | null, ): Promise { const organization = await this.organizationRepository.findOne({ where: { id: organizationId } }) if (!organization) { throw new NotFoundException(`Organization with ID ${organizationId} not found`) } const existingConfig = organization._experimentalConfig organization._experimentalConfig = await this.validatedExperimentalConfig(experimentalConfig) // If experimentalConfig contains redacted fields, we need to preserve the existing encrypted values if (experimentalConfig && experimentalConfig.otel && experimentalConfig.otel.headers) { if (existingConfig && existingConfig.otel && existingConfig.otel.headers) { for (const [key, value] of Object.entries(experimentalConfig.otel.headers)) { if ( typeof value === 'string' && value.match(/\*/g)?.length === value.length && existingConfig.otel.headers[key] ) { organization._experimentalConfig.otel.headers[key] = existingConfig.otel.headers[key] } } } } await this.organizationRepository.save(organization) } async getOtelConfigBySandboxAuthToken(sandboxAuthToken: string): Promise { const organization = await this.findBySandboxAuthToken(sandboxAuthToken) if (!organization) { return null } if (!organization._experimentalConfig || !organization._experimentalConfig.otel) { return null } const otelConfig = organization._experimentalConfig.otel const decryptedHeaders: Record = {} if (otelConfig.headers && typeof otelConfig.headers === 'object') { for (const [key, value] of Object.entries(otelConfig.headers)) { if (typeof key === 'string' && key.trim() && typeof value === 'string' && value.trim()) { decryptedHeaders[key] = await this.encryptionService.decrypt(value) } } } return { endpoint: otelConfig.endpoint, headers: Object.keys(decryptedHeaders).length > 0 ? decryptedHeaders : undefined, } } private async validatedExperimentalConfig( experimentalConfig: Record | null, ): Promise | null> { if (!experimentalConfig) { return null } if (!experimentalConfig.otel) { return experimentalConfig } const otelConfig = { ...experimentalConfig.otel } if (typeof otelConfig.endpoint !== 'string' || !otelConfig.endpoint.trim()) { throw new ForbiddenException('Invalid OpenTelemetry endpoint') } if (otelConfig.headers && typeof otelConfig.headers === 'object') { const headers: Record = {} for (const [key, value] of Object.entries(otelConfig.headers)) { if (typeof key === 'string' && key.trim() && typeof value === 'string' && value.trim()) { headers[key] = await this.encryptionService.encrypt(value) } } otelConfig.headers = headers } else { otelConfig.headers = {} } return { ...experimentalConfig, otel: otelConfig, } } private async createWithEntityManager( entityManager: EntityManager, createOrganizationDto: CreateOrganizationInternalDto, createdBy: string, creatorEmailVerified: boolean, personal = false, quota: CreateOrganizationQuotaDto = this.defaultOrganizationQuota, sandboxLimitedNetworkEgress: boolean = this.defaultSandboxLimitedNetworkEgress, ): Promise { if (personal) { const count = await entityManager.count(Organization, { where: { createdBy, personal: true }, }) if (count > 0) { throw new ForbiddenException('Personal organization already exists') } } // set some limit to the number of created organizations const createdCount = await entityManager.count(Organization, { where: { createdBy }, }) if (createdCount >= 10) { throw new ForbiddenException('You have reached the maximum number of created organizations') } let organization = new Organization(createOrganizationDto.defaultRegionId) organization.name = createOrganizationDto.name organization.createdBy = createdBy organization.personal = personal organization.maxCpuPerSandbox = quota.maxCpuPerSandbox organization.maxMemoryPerSandbox = quota.maxMemoryPerSandbox organization.maxDiskPerSandbox = quota.maxDiskPerSandbox organization.snapshotQuota = quota.snapshotQuota organization.maxSnapshotSize = quota.maxSnapshotSize organization.volumeQuota = quota.volumeQuota if (!creatorEmailVerified && !this.configService.get('skipUserEmailVerification')) { organization.suspended = true organization.suspendedAt = new Date() organization.suspensionReason = 'Please verify your email address' } else if (this.configService.get('billingApiUrl') && !personal) { organization.suspended = true organization.suspendedAt = new Date() organization.suspensionReason = 'Payment method required' } organization.sandboxLimitedNetworkEgress = sandboxLimitedNetworkEgress const owner = new OrganizationUser() owner.userId = createdBy owner.role = OrganizationMemberRole.OWNER organization.users = [owner] if (createOrganizationDto.defaultRegionId) { const defaultRegion = await this.validateOrganizationDefaultRegion(createOrganizationDto.defaultRegionId) if (defaultRegion.enforceQuotas) { const regionQuota = new RegionQuota( organization.id, createOrganizationDto.defaultRegionId, quota.totalCpuQuota, quota.totalMemoryQuota, quota.totalDiskQuota, ) organization.regionQuotas = [regionQuota] } } await entityManager.transaction(async (em) => { organization = await em.save(organization) await this.eventEmitter.emitAsync(OrganizationEvents.CREATED, organization) }) return organization } private async removeWithEntityManager( entityManager: EntityManager, organization: Organization, force = false, ): Promise { if (!force) { if (organization.personal) { throw new ForbiddenException('Cannot delete personal organization') } } await entityManager.remove(organization) } private async unsuspendPersonalWithEntityManager(entityManager: EntityManager, userId: string): Promise { const organization = await this.findPersonalWithEntityManager(entityManager, userId) organization.suspended = false organization.suspendedAt = null organization.suspensionReason = null organization.suspendedUntil = null await entityManager.save(organization) } private async findPersonalWithEntityManager(entityManager: EntityManager, userId: string): Promise { const organization = await entityManager.findOne(Organization, { where: { createdBy: userId, personal: true }, }) if (!organization) { throw new NotFoundException(`Personal organization for user ${userId} not found`) } return organization } /** * @throws NotFoundException - If the region is not found or not available to the organization */ async validateOrganizationDefaultRegion(defaultRegionId: string): Promise { const region = await this.regionService.findOne(defaultRegionId) if (!region || region.regionType !== RegionType.SHARED) { throw new NotFoundException('Region not found') } return region } @Cron(CronExpression.EVERY_MINUTE, { name: 'stop-suspended-organization-sandboxes' }) @TrackJobExecution() @LogExecution('stop-suspended-organization-sandboxes') @WithInstrumentation() async stopSuspendedOrganizationSandboxes(): Promise { // lock the sync to only run one instance at a time const lockKey = 'stop-suspended-organization-sandboxes' if (!(await this.redisLockProvider.lock(lockKey, 60))) { return } const queryResult = await this.organizationRepository .createQueryBuilder('organization') .select('id') .where('suspended = true') .andWhere(`"suspendedAt" < NOW() - INTERVAL '1 hour' * "suspensionCleanupGracePeriodHours"`) .andWhere(`"suspendedAt" > NOW() - INTERVAL '7 day'`) .andWhereExists( this.sandboxRepository .createQueryBuilder('sandbox') .select('1') .where( `"sandbox"."organizationId" = "organization"."id" AND "sandbox"."desiredState" = '${SandboxDesiredState.STARTED}' and "sandbox"."state" NOT IN ('${SandboxState.ERROR}', '${SandboxState.BUILD_FAILED}')`, ), ) .take(100) .getRawMany() const suspendedOrganizationIds = queryResult.map((result) => result.id) // Skip if no suspended organizations found to avoid empty IN clause if (suspendedOrganizationIds.length === 0) { await this.redisLockProvider.unlock(lockKey) return } const sandboxes = await this.sandboxRepository.find({ where: { organizationId: In(suspendedOrganizationIds), desiredState: SandboxDesiredState.STARTED, state: Not(In([SandboxState.ERROR, SandboxState.BUILD_FAILED])), }, }) sandboxes.map((sandbox) => this.eventEmitter.emitAsync( OrganizationEvents.SUSPENDED_SANDBOX_STOPPED, new OrganizationSuspendedSandboxStoppedEvent(sandbox.id), ), ) await this.redisLockProvider.unlock(lockKey) } @Cron(CronExpression.EVERY_MINUTE, { name: 'deactivate-suspended-organization-snapshots' }) @TrackJobExecution() @LogExecution('deactivate-suspended-organization-snapshots') @WithInstrumentation() async deactivateSuspendedOrganizationSnapshots(): Promise { // lock the sync to only run one instance at a time const lockKey = 'deactivate-suspended-organization-snapshots' if (!(await this.redisLockProvider.lock(lockKey, 60))) { return } const queryResult = await this.organizationRepository .createQueryBuilder('organization') .select('id') .where('suspended = true') .andWhere(`"suspendedAt" < NOW() - INTERVAL '1 hour' * "suspensionCleanupGracePeriodHours"`) .andWhere(`"suspendedAt" > NOW() - INTERVAL '7 day'`) .andWhereExists( this.snapshotRepository .createQueryBuilder('snapshot') .select('1') .where('snapshot.organizationId = organization.id') .andWhere(`snapshot.state = '${SnapshotState.ACTIVE}'`) .andWhere(`snapshot.general = false`), ) .take(100) .getRawMany() const suspendedOrganizationIds = queryResult.map((result) => result.id) // Skip if no suspended organizations found to avoid empty IN clause if (suspendedOrganizationIds.length === 0) { await this.redisLockProvider.unlock(lockKey) return } const snapshotQueryResult = await this.snapshotRepository .createQueryBuilder('snapshot') .select('id') .where('snapshot.organizationId IN (:...suspendedOrgIds)', { suspendedOrgIds: suspendedOrganizationIds }) .andWhere(`snapshot.state = '${SnapshotState.ACTIVE}'`) .andWhere(`snapshot.general = false`) .take(100) .getRawMany() const snapshotIds = snapshotQueryResult.map((result) => result.id) snapshotIds.map((id) => this.eventEmitter.emitAsync( OrganizationEvents.SUSPENDED_SNAPSHOT_DEACTIVATED, new OrganizationSuspendedSnapshotDeactivatedEvent(id), ), ) await this.redisLockProvider.unlock(lockKey) } @OnAsyncEvent({ event: UserEvents.CREATED, }) @TrackJobExecution() async handleUserCreatedEvent(payload: UserCreatedEvent): Promise { return this.createWithEntityManager( payload.entityManager, { name: 'Personal', defaultRegionId: payload.personalOrganizationDefaultRegionId, }, payload.user.id, payload.user.role === SystemRole.ADMIN ? true : payload.user.emailVerified, true, payload.personalOrganizationQuota, payload.user.role === SystemRole.ADMIN ? false : undefined, ) } @OnAsyncEvent({ event: UserEvents.EMAIL_VERIFIED, }) @TrackJobExecution() async handleUserEmailVerifiedEvent(payload: UserEmailVerifiedEvent): Promise { await this.unsuspendPersonalWithEntityManager(payload.entityManager, payload.userId) } @OnAsyncEvent({ event: UserEvents.DELETED, }) @TrackJobExecution() async handleUserDeletedEvent(payload: UserDeletedEvent): Promise { const organization = await this.findPersonalWithEntityManager(payload.entityManager, payload.userId) await this.removeWithEntityManager(payload.entityManager, organization, true) } assertOrganizationIsNotSuspended(organization: Organization): void { if (!organization.suspended) { return } if (organization.suspendedUntil ? organization.suspendedUntil > new Date() : true) { if (organization.suspensionReason) { throw new ForbiddenException(`Organization is suspended: ${organization.suspensionReason}`) } else { throw new ForbiddenException('Organization is suspended') } } } } ================================================ FILE: apps/api/src/region/constants/region-events.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const RegionEvents = { CREATED: 'region.created', DELETED: 'region.deleted', SNAPSHOT_MANAGER_CREDENTIALS_REGENERATED: 'region.snapshot-manager-credentials-regenerated', SNAPSHOT_MANAGER_UPDATED: 'region.snapshot-manager-updated', } as const ================================================ FILE: apps/api/src/region/constants/region-name-regex.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const REGION_NAME_REGEX = /^[a-zA-Z0-9_.-]+$/ ================================================ FILE: apps/api/src/region/controllers/region.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Logger, HttpCode } from '@nestjs/common' import { ApiOAuth2, ApiResponse, ApiOperation, ApiTags, ApiBearerAuth } from '@nestjs/swagger' import { RegionDto } from '../dto/region.dto' import { RegionType } from '../enums/region-type.enum' import { RegionService } from '../services/region.service' @ApiTags('regions') @Controller('shared-regions') @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class RegionController { private readonly logger = new Logger(RegionController.name) constructor(private readonly regionService: RegionService) {} @Get() @HttpCode(200) @ApiOperation({ summary: 'List all shared regions', operationId: 'listSharedRegions', }) @ApiResponse({ status: 200, description: 'List of all shared regions', type: [RegionDto], }) async listRegions(): Promise { return this.regionService.findAllByRegionType(RegionType.SHARED) } } ================================================ FILE: apps/api/src/region/dto/create-region-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { RegionType } from '../enums/region-type.enum' export class CreateRegionInternalDto { id?: string name: string enforceQuotas: boolean regionType: RegionType proxyUrl?: string | null sshGatewayUrl?: string | null snapshotManagerUrl?: string | null } ================================================ FILE: apps/api/src/region/dto/create-region.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString, IsNotEmpty } from 'class-validator' @ApiSchema({ name: 'CreateRegion' }) export class CreateRegionDto { @ApiProperty({ description: 'Region name', example: 'us-east-1', }) @IsString() @IsNotEmpty() name: string @ApiProperty({ description: 'Proxy URL for the region', example: 'https://proxy.example.com', nullable: true, required: false, }) proxyUrl?: string @ApiProperty({ description: 'SSH Gateway URL for the region', example: 'ssh://ssh-gateway.example.com', nullable: true, required: false, }) sshGatewayUrl?: string @ApiProperty({ description: 'Snapshot Manager URL for the region', example: 'https://snapshot-manager.example.com', nullable: true, required: false, }) snapshotManagerUrl?: string } @ApiSchema({ name: 'CreateRegionResponse' }) export class CreateRegionResponseDto { @ApiProperty({ description: 'ID of the created region', example: 'region_12345', }) @IsString() @IsNotEmpty() id: string @ApiProperty({ description: 'Proxy API key for the region', example: 'proxy-api-key-xyz', nullable: true, required: false, }) proxyApiKey?: string @ApiProperty({ description: 'SSH Gateway API key for the region', example: 'ssh-gateway-api-key-abc', nullable: true, required: false, }) sshGatewayApiKey?: string @ApiProperty({ description: 'Snapshot Manager username for the region', example: 'daytona', nullable: true, required: false, }) snapshotManagerUsername?: string @ApiProperty({ description: 'Snapshot Manager password for the region', nullable: true, required: false, }) snapshotManagerPassword?: string constructor(params: { id: string proxyApiKey?: string sshGatewayApiKey?: string snapshotManagerUsername?: string snapshotManagerPassword?: string }) { this.id = params.id this.proxyApiKey = params.proxyApiKey this.sshGatewayApiKey = params.sshGatewayApiKey this.snapshotManagerUsername = params.snapshotManagerUsername this.snapshotManagerPassword = params.snapshotManagerPassword } } ================================================ FILE: apps/api/src/region/dto/create-region.internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface CreateRegionInternalDto { id?: string name: string enforceQuotas: boolean } ================================================ FILE: apps/api/src/region/dto/regenerate-api-key.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString, IsNotEmpty } from 'class-validator' @ApiSchema({ name: 'RegenerateApiKeyResponse' }) export class RegenerateApiKeyResponseDto { @ApiProperty({ description: 'The newly generated API key', example: 'api-key-xyz123', }) @IsString() @IsNotEmpty() apiKey: string constructor(apiKey: string) { this.apiKey = apiKey } } ================================================ FILE: apps/api/src/region/dto/region.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsEnum } from 'class-validator' import { Region } from '../entities/region.entity' import { RegionType } from '../enums/region-type.enum' @ApiSchema({ name: 'Region' }) export class RegionDto { @ApiProperty({ description: 'Region ID', example: '123456789012', }) id: string @ApiProperty({ description: 'Region name', example: 'us-east-1', }) name: string @ApiProperty({ description: 'Organization ID', example: '123e4567-e89b-12d3-a456-426614174000', nullable: true, required: false, }) organizationId: string | null @ApiProperty({ description: 'The type of the region', enum: RegionType, enumName: 'RegionType', example: Object.values(RegionType)[0], }) @IsEnum(RegionType) regionType: RegionType @ApiProperty({ description: 'Creation timestamp', example: '2023-01-01T00:00:00.000Z', }) createdAt: string @ApiProperty({ description: 'Last update timestamp', example: '2023-01-01T00:00:00.000Z', }) updatedAt: string @ApiProperty({ description: 'Proxy URL for the region', example: 'https://proxy.example.com', nullable: true, required: false, }) proxyUrl?: string | null @ApiProperty({ description: 'SSH Gateway URL for the region', example: 'http://ssh-gateway.example.com', nullable: true, required: false, }) sshGatewayUrl?: string | null @ApiProperty({ description: 'Snapshot Manager URL for the region', example: 'http://snapshot-manager.example.com', nullable: true, required: false, }) snapshotManagerUrl?: string | null static fromRegion(region: Region): RegionDto { return { id: region.id, name: region.name, organizationId: region.organizationId, regionType: region.regionType, createdAt: region.createdAt?.toISOString(), updatedAt: region.updatedAt?.toISOString(), proxyUrl: region.proxyUrl, sshGatewayUrl: region.sshGatewayUrl, snapshotManagerUrl: region.snapshotManagerUrl, } } } ================================================ FILE: apps/api/src/region/dto/snapshot-manager-credentials.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiSchema, ApiProperty } from '@nestjs/swagger' import { IsString, IsNotEmpty } from 'class-validator' @ApiSchema({ name: 'SnapshotManagerCredentials' }) export class SnapshotManagerCredentialsDto { @ApiProperty({ description: 'Snapshot Manager username for the region', example: 'daytona', }) @IsString() @IsNotEmpty() username: string @ApiProperty({ description: 'Snapshot Manager password for the region', }) @IsString() @IsNotEmpty() password: string constructor(params: { username: string; password: string }) { this.username = params.username this.password = params.password } } ================================================ FILE: apps/api/src/region/dto/update-region.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'UpdateRegion' }) export class UpdateRegionDto { @ApiProperty({ description: 'Proxy URL for the region', example: 'https://proxy.example.com', nullable: true, required: false, }) proxyUrl?: string @ApiProperty({ description: 'SSH Gateway URL for the region', example: 'ssh://ssh-gateway.example.com', nullable: true, required: false, }) sshGatewayUrl?: string @ApiProperty({ description: 'Snapshot Manager URL for the region', example: 'https://snapshot-manager.example.com', nullable: true, required: false, }) snapshotManagerUrl?: string } ================================================ FILE: apps/api/src/region/entities/region.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BeforeInsert, BeforeUpdate, Check, Column, CreateDateColumn, Entity, Index, PrimaryColumn, UpdateDateColumn, } from 'typeorm' import { nanoid } from 'nanoid' import { RegionType } from '../enums/region-type.enum' @Entity() @Index('region_organizationId_name_unique', ['organizationId', 'name'], { unique: true, where: '"organizationId" IS NOT NULL', }) @Index('region_organizationId_null_name_unique', ['name'], { unique: true, where: '"organizationId" IS NULL', }) @Index('region_proxyApiKeyHash_unique', ['proxyApiKeyHash'], { unique: true, where: '"proxyApiKeyHash" IS NOT NULL', }) @Index('region_sshGatewayApiKeyHash_unique', ['sshGatewayApiKeyHash'], { unique: true, where: '"sshGatewayApiKeyHash" IS NOT NULL', }) @Index('idx_region_custom', ['organizationId'], { where: '"regionType" = \'custom\'', }) @Check('region_not_shared', '"organizationId" IS NULL OR "regionType" != \'shared\'') @Check('region_not_custom', '"organizationId" IS NOT NULL OR "regionType" != \'custom\'') export class Region { @PrimaryColumn() id: string @Column() name: string @Column({ type: 'uuid', nullable: true, }) organizationId: string | null @Column({ type: 'enum', enum: RegionType, }) regionType: RegionType @Column({ type: 'boolean', default: true, }) enforceQuotas: boolean @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date @Column({ nullable: true }) proxyUrl: string | null @Column({ nullable: true }) toolboxProxyUrl: string | null @Column({ nullable: true }) proxyApiKeyHash: string | null @Column({ nullable: true }) sshGatewayUrl: string | null @Column({ nullable: true }) sshGatewayApiKeyHash: string | null @Column({ nullable: true }) snapshotManagerUrl: string | null constructor(params: { name: string enforceQuotas: boolean regionType: RegionType id?: string organizationId?: string | null proxyUrl?: string | null toolboxProxyUrl?: string | null sshGatewayUrl?: string | null proxyApiKeyHash?: string | null sshGatewayApiKeyHash?: string | null snapshotManagerUrl?: string | null }) { this.name = params.name this.enforceQuotas = params.enforceQuotas this.regionType = params.regionType if (params.id) { this.id = params.id } else { this.id = this.name.toLowerCase() + '_' + nanoid(4) } if (params.organizationId) { this.organizationId = params.organizationId } this.proxyUrl = params.proxyUrl ?? null this.toolboxProxyUrl = params.toolboxProxyUrl ?? params.proxyUrl ?? null this.sshGatewayUrl = params.sshGatewayUrl ?? null this.proxyApiKeyHash = params.proxyApiKeyHash ?? null this.sshGatewayApiKeyHash = params.sshGatewayApiKeyHash ?? null this.snapshotManagerUrl = params.snapshotManagerUrl ?? null } @BeforeInsert() @BeforeUpdate() validateRegionType() { if (this.regionType === RegionType.SHARED) { if (this.organizationId) { throw new Error('Shared regions cannot be associated with an organization.') } } if (this.regionType === RegionType.CUSTOM) { if (!this.organizationId) { throw new Error('Custom regions must be associated with an organization.') } } } } ================================================ FILE: apps/api/src/region/enums/region-type.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum RegionType { /** * Shared by all organizations. */ SHARED = 'shared', /** * Dedicated to specific organizations. */ DEDICATED = 'dedicated', /** * Created by and owned by a specific organization. */ CUSTOM = 'custom', } ================================================ FILE: apps/api/src/region/events/region-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' import { Region } from '../entities/region.entity' export class RegionCreatedEvent { constructor( public readonly entityManager: EntityManager, public readonly region: Region, public readonly organizationId: string | null, public readonly snapshotManagerUsername?: string, public readonly snapshotManagerPassword?: string, ) {} } ================================================ FILE: apps/api/src/region/events/region-deleted.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' import { Region } from '../entities/region.entity' export class RegionDeletedEvent { constructor( public readonly entityManager: EntityManager, public readonly region: Region, ) {} } ================================================ FILE: apps/api/src/region/events/region-snapshot-manager-creds.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' import { Region } from '../entities/region.entity' export class RegionSnapshotManagerCredsRegeneratedEvent { constructor( public readonly regionId: string, public readonly snapshotManagerUrl: string, public readonly username: string, public readonly password: string, public readonly entityManager?: EntityManager, ) {} } export class RegionSnapshotManagerUpdatedEvent { constructor( public readonly region: Region, public readonly organizationId: string, public readonly snapshotManagerUrl: string | null, public readonly prevSnapshotManagerUrl: string | null, public readonly newUsername?: string, public readonly newPassword?: string, public readonly entityManager?: EntityManager, ) {} } ================================================ FILE: apps/api/src/region/guards/region-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException, Logger, } from '@nestjs/common' import { RegionService } from '../services/region.service' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { RegionType } from '../enums/region-type.enum' @Injectable() export class RegionAccessGuard implements CanActivate { private readonly logger = new Logger(RegionAccessGuard.name) constructor(private readonly regionService: RegionService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const regionId: string = request.params.regionId || request.params.id // TODO: initialize authContext safely const authContext: OrganizationAuthContext = request.user try { const region = await this.regionService.findOne(regionId) if (!region) { throw new NotFoundException('Region not found') } if (authContext.role !== SystemRole.ADMIN && region.organizationId !== authContext.organizationId) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } if (authContext.role !== SystemRole.ADMIN && region.regionType !== RegionType.CUSTOM) { throw new ForbiddenException('Region is not a custom region') } return true } catch (error) { if (!(error instanceof NotFoundException)) { this.logger.error(error) } throw new NotFoundException('Region not found') } } } ================================================ FILE: apps/api/src/region/region.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { Region } from './entities/region.entity' import { RegionService } from './services/region.service' import { Runner } from '../sandbox/entities/runner.entity' import { RegionController } from './controllers/region.controller' import { Snapshot } from '../sandbox/entities/snapshot.entity' @Module({ imports: [TypeOrmModule.forFeature([Region, Runner, Snapshot])], controllers: [RegionController], providers: [RegionService], exports: [RegionService], }) export class RegionModule {} ================================================ FILE: apps/api/src/region/services/region.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, NotFoundException, ConflictException, BadRequestException, HttpException, HttpStatus, } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { DataSource, In, IsNull, Like, Repository } from 'typeorm' import { REGION_NAME_REGEX } from '../constants/region-name-regex.constant' import { CreateRegionInternalDto } from '../dto/create-region-internal.dto' import { Region } from '../entities/region.entity' import { Runner } from '../../sandbox/entities/runner.entity' import { RegionType } from '../enums/region-type.enum' import { CreateRegionResponseDto } from '../dto/create-region.dto' import { generateApiKeyHash, generateApiKeyValue, generateRandomString } from '../../common/utils/api-key' import { RegionDto } from '../dto/region.dto' import { EventEmitter2 } from '@nestjs/event-emitter' import { RegionEvents } from '../constants/region-events.constant' import { RegionCreatedEvent } from '../events/region-created.event' import { RegionDeletedEvent } from '../events/region-deleted.event' import { SnapshotManagerCredentialsDto } from '../dto/snapshot-manager-credentials.dto' import { RegionSnapshotManagerCredsRegeneratedEvent, RegionSnapshotManagerUpdatedEvent, } from '../events/region-snapshot-manager-creds.event' import { UpdateRegionDto } from '../dto/update-region.dto' import { Snapshot } from '../../sandbox/entities/snapshot.entity' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { toolboxProxyUrlCacheKey } from '../../sandbox/utils/sandbox-lookup-cache.util' @Injectable() export class RegionService { private readonly logger = new Logger(RegionService.name) constructor( @InjectRepository(Region) private readonly regionRepository: Repository, @InjectRepository(Runner) private readonly runnerRepository: Repository, private readonly dataSource: DataSource, private readonly eventEmitter: EventEmitter2, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRedis() private readonly redis: Redis, ) {} /** * @param createRegionDto - The region details. * @param organizationId - The ID of the organization, or null for regions not associated with an organization. * @throws {BadRequestException} If the region name is invalid. * @throws {ConflictException} If the region with the same ID already exists or region with the same name already exists in the organization. */ async create( createRegionDto: CreateRegionInternalDto, organizationId: string | null, ): Promise { if (!REGION_NAME_REGEX.test(createRegionDto.name)) { throw new BadRequestException('Region name must contain only letters, numbers, underscores, periods, and hyphens') } if (createRegionDto.name.length < 2 || createRegionDto.name.length > 255) { throw new BadRequestException('Region name must be between 3 and 255 characters') } if (createRegionDto.id) { const existingRegion = await this.findOne(createRegionDto.id) if (existingRegion) { throw new ConflictException(`Region with id ${createRegionDto.id} already exists`) } } try { const proxyApiKey = createRegionDto.proxyUrl ? generateApiKeyValue() : undefined const sshGatewayApiKey = createRegionDto.sshGatewayUrl ? generateApiKeyValue() : undefined const snapshotManagerUsername = createRegionDto.snapshotManagerUrl ? 'daytona' : undefined const snapshotManagerPassword = createRegionDto.snapshotManagerUrl ? generateRandomString(16) : undefined const region = new Region({ name: createRegionDto.name, enforceQuotas: createRegionDto.enforceQuotas, regionType: createRegionDto.regionType, id: createRegionDto.id, organizationId, proxyUrl: createRegionDto.proxyUrl, sshGatewayUrl: createRegionDto.sshGatewayUrl, proxyApiKeyHash: proxyApiKey ? generateApiKeyHash(proxyApiKey) : null, sshGatewayApiKeyHash: sshGatewayApiKey ? generateApiKeyHash(sshGatewayApiKey) : null, snapshotManagerUrl: createRegionDto.snapshotManagerUrl, }) await this.dataSource.transaction(async (em) => { await em.save(region) await this.eventEmitter.emitAsync( RegionEvents.CREATED, new RegionCreatedEvent(em, region, organizationId, snapshotManagerUsername, snapshotManagerPassword), ) }) return new CreateRegionResponseDto({ id: region.id, proxyApiKey, sshGatewayApiKey, snapshotManagerUsername, snapshotManagerPassword, }) } catch (error) { if (error.code === '23505') { throw new ConflictException(`Region with name ${createRegionDto.name} already exists`) } throw error } } /** * @param id - The ID of the region. * @returns The region if found, or null otherwise. */ async findOne(id: string, cache = false): Promise { return await this.regionRepository.findOne({ where: { id, }, cache: cache ? { id: `region:${id}`, milliseconds: 30000, } : undefined, }) } /** * @param name - The name of the region. * @param organizationId - The organization ID, or null for regions not associated with an organization. * @returns The region if found, or null otherwise. */ async findOneByName(name: string, organizationId: string | null): Promise { return await this.regionRepository.findOne({ where: [{ name, organizationId: organizationId ?? IsNull() }], }) } /** * @param proxyApiKey - The proxy API key. * @returns The region if found, or null otherwise. */ async findOneByProxyApiKey(proxyApiKey: string): Promise { return await this.regionRepository.findOne({ where: { proxyApiKeyHash: generateApiKeyHash(proxyApiKey) }, }) } /** * @param sshGatewayApiKey - The SSH gateway API key. * @returns The region if found, or null otherwise. */ async findOneBySshGatewayApiKey(sshGatewayApiKey: string): Promise { return await this.regionRepository.findOne({ where: { sshGatewayApiKeyHash: generateApiKeyHash(sshGatewayApiKey) }, }) } /** * @param regionId - The ID of the region. * @returns The organization ID or null for for regions not associated with an organization if the region is found, or undefined if the region is not found. */ async getOrganizationId(regionId: string): Promise { const region = await this.regionRepository.findOne({ where: { id: regionId, }, select: ['organizationId'], loadEagerRelations: false, }) if (!region) { return undefined } return region.organizationId ?? null } /** * @param organizationId - The organization ID of the regions to find. * @param regionType - If provided, only return regions of the specified type. * @returns The regions found ordered by name ascending. */ async findAllByOrganization(organizationId: string, regionType?: RegionType): Promise { return this.regionRepository.find({ where: { organizationId, ...(regionType ? { regionType } : {}), }, order: { name: 'ASC', }, }) } /** * @param type - The type of the regions to find. * @returns The regions found ordered by name ascending. */ async findAllByRegionType(regionType: RegionType): Promise { const regions = await this.regionRepository.find({ where: { regionType, }, order: { name: 'ASC', }, }) return regions.map(RegionDto.fromRegion) } /** * @param ids - The IDs of the regions to find. * @returns The regions found. */ async findByIds(ids: string[]): Promise { if (ids.length === 0) { return [] } return this.regionRepository.find({ where: { id: In(ids), }, }) } /** * @param id - The ID of the region to delete. * @throws {NotFoundException} If the region is not found. */ async delete(id: string): Promise { const region = await this.findOne(id) if (!region) { throw new NotFoundException('Region not found') } const runnerCount = await this.runnerRepository.count({ where: { region: id, }, }) if (runnerCount > 0) { throw new HttpException( 'Cannot delete region which has runners associated with it', HttpStatus.PRECONDITION_REQUIRED, ) } await this.dataSource.transaction(async (em) => { await this.eventEmitter.emitAsync(RegionEvents.DELETED, new RegionDeletedEvent(em, region)) await em.remove(region) }) this.redis.del(toolboxProxyUrlCacheKey(id)).catch((err) => { this.logger.warn(`Failed to invalidate toolbox proxy URL cache for region ${id}: ${err.message}`) }) } async update(regionId: string, updateRegion: UpdateRegionDto): Promise { const region = await this.findOne(regionId) if (!region) { throw new NotFoundException('Region not found') } await this.dataSource.transaction(async (em) => { if (updateRegion.proxyUrl !== undefined) { region.proxyUrl = updateRegion.proxyUrl ?? null region.toolboxProxyUrl = updateRegion.proxyUrl ?? null } if (updateRegion.sshGatewayUrl !== undefined) { region.sshGatewayUrl = updateRegion.sshGatewayUrl ?? null } if (updateRegion.snapshotManagerUrl !== undefined) { if (region.snapshotManagerUrl) { // If snapshots already exist, prevent changing the snapshot manager URL const exists = await this.snapshotRepository.exists({ where: { ref: Like(`${region.snapshotManagerUrl.replace(/^https?:\/\//, '')}%`), }, }) if (exists) { throw new BadRequestException( 'Cannot change snapshot manager URL for region with existing snapshots. Please delete existing snapshots first.', ) } } const prevSnapshotManagerUrl = region.snapshotManagerUrl region.snapshotManagerUrl = updateRegion.snapshotManagerUrl ?? null let newUsername: string | undefined = undefined let newPassword: string | undefined = undefined // If the region did not have a snapshot manager, create new credentials if (!prevSnapshotManagerUrl) { newUsername = 'daytona' newPassword = generateRandomString(16) } await this.eventEmitter.emitAsync( RegionEvents.SNAPSHOT_MANAGER_UPDATED, new RegionSnapshotManagerUpdatedEvent( region, region.organizationId, region.snapshotManagerUrl, prevSnapshotManagerUrl, newUsername, newPassword, em, ), ) } await em.save(region) }) if (updateRegion.proxyUrl !== undefined) { this.redis.del(toolboxProxyUrlCacheKey(regionId)).catch((err) => { this.logger.warn(`Failed to invalidate toolbox proxy URL cache for region ${regionId}: ${err.message}`) }) } } /** * @param regionId - The ID of the region. * @throws {NotFoundException} If the region is not found. * @throws {BadRequestException} If the region does not have a proxy URL configured. * @returns The newly generated proxy API key. */ async regenerateProxyApiKey(regionId: string): Promise { const region = await this.findOne(regionId) if (!region) { throw new NotFoundException('Region not found') } if (!region.proxyUrl) { throw new BadRequestException('Region does not have a proxy URL configured') } const newApiKey = generateApiKeyValue() region.proxyApiKeyHash = generateApiKeyHash(newApiKey) await this.regionRepository.save(region) return newApiKey } /** * @param regionId - The ID of the region. * @throws {NotFoundException} If the region is not found. * @throws {BadRequestException} If the region does not have an SSH gateway URL configured. * @returns The newly generated SSH gateway API key. */ async regenerateSshGatewayApiKey(regionId: string): Promise { const region = await this.findOne(regionId) if (!region) { throw new NotFoundException('Region not found') } if (!region.sshGatewayUrl) { throw new BadRequestException('Region does not have an SSH gateway URL configured') } const newApiKey = generateApiKeyValue() region.sshGatewayApiKeyHash = generateApiKeyHash(newApiKey) await this.regionRepository.save(region) return newApiKey } /** * @param regionId - The ID of the region. * @throws {NotFoundException} If the region is not found. * @throws {BadRequestException} If the region does not have a snapshot manager URL configured. * @returns The newly generated snapshot manager credentials. */ async regenerateSnapshotManagerCredentials(regionId: string): Promise { const region = await this.findOne(regionId) if (!region) { throw new NotFoundException('Region not found') } if (!region.snapshotManagerUrl) { throw new BadRequestException('Region does not have a snapshot manager URL configured') } const newUsername = 'daytona' const newPassword = generateRandomString(16) await this.eventEmitter.emitAsync( RegionEvents.SNAPSHOT_MANAGER_CREDENTIALS_REGENERATED, new RegionSnapshotManagerCredsRegeneratedEvent(regionId, region.snapshotManagerUrl, newUsername, newPassword), ) return new SnapshotManagerCredentialsDto({ username: newUsername, password: newPassword, }) } } ================================================ FILE: apps/api/src/sandbox/common/redis-lock.provider.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { InjectRedis } from '@nestjs-modules/ioredis' import { Injectable } from '@nestjs/common' import { Redis } from 'ioredis' type Acquired = boolean export class LockCode { constructor(private readonly code: string) {} public getCode(): string { return this.code } } @Injectable() export class RedisLockProvider { constructor(@InjectRedis() private readonly redis: Redis) {} async lock(key: string, ttl: number, code?: LockCode | null): Promise { const keyValue = code ? code.getCode() : '1' const acquired = await this.redis.set(key, keyValue, 'EX', ttl, 'NX') return !!acquired } async getCode(key: string): Promise { const keyValue = await this.redis.get(key) return keyValue ? new LockCode(keyValue) : null } async unlock(key: string): Promise { await this.redis.del(key) } async isLocked(key: string): Promise { const exists = await this.redis.exists(key) return exists === 1 } async waitForLock(key: string, ttl: number): Promise { while (true) { const acquired = await this.lock(key, ttl) if (acquired) break await new Promise((resolve) => setTimeout(resolve, 50)) } } } ================================================ FILE: apps/api/src/sandbox/common/runner-service-info.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type RunnerServiceInfo = { serviceName: string healthy: boolean errorReason?: string } ================================================ FILE: apps/api/src/sandbox/constants/errors-for-recovery.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ // Substrings in an error message that should trigger an automatic restore export const RECOVERY_ERROR_SUBSTRINGS: string[] = ['Can not connect to the Docker daemon'] ================================================ FILE: apps/api/src/sandbox/constants/runner-events.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const RunnerEvents = { CREATED: 'runner.created', STATE_UPDATED: 'runner.state.updated', UNSCHEDULABLE_UPDATED: 'runner.unschedulable.updated', DELETED: 'runner.deleted', } as const ================================================ FILE: apps/api/src/sandbox/constants/runner-name-regex.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const RUNNER_NAME_REGEX = /^[a-zA-Z0-9_.-]+$/ ================================================ FILE: apps/api/src/sandbox/constants/sandbox-events.constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const SandboxEvents = { ARCHIVED: 'sandbox.archived', STATE_UPDATED: 'sandbox.state.updated', DESIRED_STATE_UPDATED: 'sandbox.desired-state.updated', CREATED: 'sandbox.created', STARTED: 'sandbox.started', STOPPED: 'sandbox.stopped', DESTROYED: 'sandbox.destroyed', PUBLIC_STATUS_UPDATED: 'sandbox.public-status.updated', ORGANIZATION_UPDATED: 'sandbox.organization.updated', BACKUP_CREATED: 'sandbox.backup.created', } as const ================================================ FILE: apps/api/src/sandbox/constants/sandbox.constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION = '00000000-0000-0000-0000-000000000000' ================================================ FILE: apps/api/src/sandbox/constants/snapshot-events.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const SnapshotEvents = { CREATED: 'snapshot.created', ACTIVATED: 'snapshot.activated', STATE_UPDATED: 'snapshot.state.updated', REMOVED: 'snapshot.removed', } as const ================================================ FILE: apps/api/src/sandbox/constants/volume-events.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const VolumeEvents = { CREATED: 'volume.created', STATE_UPDATED: 'volume.state.updated', LAST_USED_AT_UPDATED: 'volume.lastUsedAt.updated', } as const ================================================ FILE: apps/api/src/sandbox/constants/warmpool-events.constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const WarmPoolEvents = { TOPUP_REQUESTED: 'warmpool.topup-requested', } as const ================================================ FILE: apps/api/src/sandbox/controllers/job.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Post, Body, Param, Query, UseGuards, Logger, Req, NotFoundException } from '@nestjs/common' import { Request } from 'express' import { ApiOAuth2, ApiTags, ApiOperation, ApiBearerAuth, ApiResponse, ApiParam, ApiQuery } from '@nestjs/swagger' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { RunnerAuthGuard } from '../../auth/runner-auth.guard' import { RunnerContextDecorator } from '../../common/decorators/runner-context.decorator' import { RunnerContext } from '../../common/interfaces/runner-context.interface' import { JobDto, JobStatus, ListJobsQueryDto, PaginatedJobsDto, PollJobsResponseDto, UpdateJobStatusDto, } from '../dto/job.dto' import { JobService } from '../services/job.service' import { JobAccessGuard } from '../guards/job-access.guard' @ApiTags('jobs') @Controller('jobs') @UseGuards(CombinedAuthGuard, RunnerAuthGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class JobController { private readonly logger = new Logger(JobController.name) constructor(private readonly jobService: JobService) {} @Get() @ApiOperation({ summary: 'List jobs for the runner', operationId: 'listJobs', description: 'Returns a paginated list of jobs for the runner, optionally filtered by status.', }) @ApiQuery({ name: 'status', required: false, enum: JobStatus, enumName: 'JobStatus', example: JobStatus.PENDING, description: 'Filter jobs by status', }) @ApiQuery({ name: 'limit', required: false, type: Number, description: 'Maximum number of jobs to return (default: 100, max: 500)', }) @ApiQuery({ name: 'offset', required: false, type: Number, description: 'Number of jobs to skip for pagination (default: 0)', }) @ApiResponse({ status: 200, description: 'List of jobs for the runner', type: PaginatedJobsDto, }) async listJobs( @RunnerContextDecorator() runnerContext: RunnerContext, @Query() query: ListJobsQueryDto, ): Promise { return await this.jobService.findJobsForRunner(runnerContext.runnerId, query.status, query.page, query.limit) } @Get('poll') @ApiOperation({ summary: 'Long poll for jobs', operationId: 'pollJobs', description: 'Long poll endpoint for runners to fetch pending jobs. Returns immediately if jobs are available, otherwise waits up to timeout seconds.', }) @ApiQuery({ name: 'timeout', required: false, type: Number, description: 'Timeout in seconds for long polling (default: 30, max: 60)', }) @ApiQuery({ name: 'limit', required: false, type: Number, description: 'Maximum number of jobs to return (default: 10, max: 100)', }) @ApiResponse({ status: 200, description: 'List of jobs for the runner', type: PollJobsResponseDto, }) async pollJobs( @Req() req: Request, @RunnerContextDecorator() runnerContext: RunnerContext, @Query('timeout') timeout?: number, @Query('limit') limit?: number, ): Promise { this.logger.debug(`Runner ${runnerContext.runnerId} polling for jobs (timeout: ${timeout}s, limit: ${limit})`) const timeoutSeconds = timeout ? Math.min(Number(timeout), 60) : 30 const limitNumber = limit ? Math.min(Number(limit), 100) : 10 // Create AbortSignal from request's 'close' event const abortController = new AbortController() const onClose = () => { this.logger.debug(`Runner ${runnerContext.runnerId} disconnected during polling, aborting`) abortController.abort() } req.on('close', onClose) try { const jobs = await this.jobService.pollJobs( runnerContext.runnerId, limitNumber, timeoutSeconds, abortController.signal, ) this.logger.debug(`Returning ${jobs.length} jobs to runner ${runnerContext.runnerId}`) return { jobs } } catch (error) { if (abortController.signal.aborted) { this.logger.debug(`Polling aborted for disconnected runner ${runnerContext.runnerId}`) return { jobs: [] } // Return empty array on disconnect } this.logger.error(`Error polling jobs for runner ${runnerContext.runnerId}: ${error.message}`, error.stack) throw error } finally { req.off('close', onClose) } } @Get(':jobId') @ApiOperation({ summary: 'Get job details', operationId: 'getJob', }) @ApiParam({ name: 'jobId', description: 'ID of the job', type: 'string', }) @ApiResponse({ status: 200, description: 'Job details', type: JobDto, }) @UseGuards(JobAccessGuard) async getJob(@RunnerContextDecorator() runnerContext: RunnerContext, @Param('jobId') jobId: string): Promise { this.logger.log(`Runner ${runnerContext.runnerId} fetching job ${jobId}`) const job = await this.jobService.findOne(jobId) if (!job) { throw new NotFoundException(`Job ${jobId} not found`) } return new JobDto(job) } @Post(':jobId/status') @ApiOperation({ summary: 'Update job status', operationId: 'updateJobStatus', }) @ApiParam({ name: 'jobId', description: 'ID of the job', type: 'string', }) @ApiResponse({ status: 200, description: 'Job status updated successfully', type: JobDto, }) @UseGuards(JobAccessGuard) async updateJobStatus( @RunnerContextDecorator() runnerContext: RunnerContext, @Param('jobId') jobId: string, @Body() updateJobStatusDto: UpdateJobStatusDto, ): Promise { this.logger.debug(`Runner ${runnerContext.runnerId} updating job ${jobId} status to ${updateJobStatusDto.status}`) const job = await this.jobService.updateJobStatus( jobId, updateJobStatusDto.status, updateJobStatusDto.errorMessage, updateJobStatusDto.resultMetadata, ) return new JobDto(job) } } ================================================ FILE: apps/api/src/sandbox/controllers/preview.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import Redis from 'ioredis' import { Controller, Get, Param, Logger, NotFoundException, UseGuards, Req } from '@nestjs/common' import { SandboxService } from '../services/sandbox.service' import { ApiResponse, ApiOperation, ApiParam, ApiTags, ApiOAuth2, ApiBearerAuth } from '@nestjs/swagger' import { InjectRedis } from '@nestjs-modules/ioredis' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { OrganizationUserService } from '../../organization/services/organization-user.service' @ApiTags('preview') @Controller('preview') export class PreviewController { private readonly logger = new Logger(PreviewController.name) constructor( @InjectRedis() private readonly redis: Redis, private readonly sandboxService: SandboxService, private readonly organizationUserService: OrganizationUserService, ) {} @Get(':sandboxId/public') @ApiOperation({ summary: 'Check if sandbox is public', operationId: 'isSandboxPublic', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Public status of the sandbox', type: Boolean, }) async isSandboxPublic(@Param('sandboxId') sandboxId: string): Promise { const cached = await this.redis.get(`preview:public:${sandboxId}`) if (cached) { if (cached === '1') { return true } throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } try { const isPublic = await this.sandboxService.isSandboxPublic(sandboxId) // for private sandboxes, throw 404 as well // to prevent using the method to check if a sandbox exists if (!isPublic) { // cache the result for 3 seconds to avoid unnecessary requests to the database await this.redis.setex(`preview:public:${sandboxId}`, 3, '0') throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } // cache the result for 3 seconds to avoid unnecessary requests to the database await this.redis.setex(`preview:public:${sandboxId}`, 3, '1') return true } catch (ex) { if (ex instanceof NotFoundException) { // cache the not found sandbox as well // as it is the same case as for the private sandboxes await this.redis.setex(`preview:public:${sandboxId}`, 3, '0') throw ex } throw ex } } @Get(':sandboxId/validate/:authToken') @ApiOperation({ summary: 'Check if sandbox auth token is valid', operationId: 'isValidAuthToken', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiParam({ name: 'authToken', description: 'Auth token of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox auth token validation status', type: Boolean, }) async isValidAuthToken( @Param('sandboxId') sandboxId: string, @Param('authToken') authToken: string, ): Promise { const cached = await this.redis.get(`preview:token:${sandboxId}:${authToken}`) if (cached) { if (cached === '1') { return true } throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } const sandbox = await this.sandboxService.findOne(sandboxId) if (!sandbox) { await this.redis.setex(`preview:token:${sandboxId}:${authToken}`, 3, '0') throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } if (sandbox.authToken === authToken) { await this.redis.setex(`preview:token:${sandboxId}:${authToken}`, 3, '1') return true } await this.redis.setex(`preview:token:${sandboxId}:${authToken}`, 3, '0') throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } @Get(':sandboxId/access') @ApiOperation({ summary: 'Check if user has access to the sandbox', operationId: 'hasSandboxAccess', }) @ApiResponse({ status: 200, description: 'User access status to the sandbox', type: Boolean, }) @UseGuards(CombinedAuthGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() async hasSandboxAccess(@Req() req: Request, @Param('sandboxId') sandboxId: string): Promise { // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore const userId = req.user?.userId const cached = await this.redis.get(`preview:access:${sandboxId}:${userId}`) if (cached) { if (cached === '1') { return true } throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } const sandbox = await this.sandboxService.findOne(sandboxId) const hasAccess = await this.organizationUserService.exists(sandbox.organizationId, userId) if (!hasAccess) { await this.redis.setex(`preview:token:${sandboxId}:${userId}`, 3, '0') throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } // if user has access, keep it in cache longer await this.redis.setex(`preview:access:${sandboxId}:${userId}`, 30, '1') return true } @Get(':signedPreviewToken/:port/sandbox-id') @ApiOperation({ summary: 'Get sandbox ID from signed preview URL token', operationId: 'getSandboxIdFromSignedPreviewUrlToken', }) @ApiParam({ name: 'signedPreviewToken', description: 'Signed preview URL token', type: 'string', }) @ApiParam({ name: 'port', description: 'Port number to get sandbox ID from signed preview URL token', type: 'number', }) @ApiResponse({ status: 200, description: 'Sandbox ID from signed preview URL token', type: String, }) async getSandboxIdFromSignedPreviewUrlToken( @Param('signedPreviewToken') signedPreviewToken: string, @Param('port') port: number, ): Promise { return this.sandboxService.getSandboxIdFromSignedPreviewUrlToken(signedPreviewToken, port) } } ================================================ FILE: apps/api/src/sandbox/controllers/runner.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Get, Post, Param, Patch, UseGuards, Query, Delete, HttpCode, NotFoundException, ForbiddenException, ParseUUIDPipe, } from '@nestjs/common' import { CreateRunnerDto } from '../dto/create-runner.dto' import { RunnerService } from '../services/runner.service' import { ApiOAuth2, ApiTags, ApiOperation, ApiBearerAuth, ApiResponse, ApiQuery, ApiParam, ApiHeader, } from '@nestjs/swagger' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredApiRole } from '../../common/decorators/required-role.decorator' import { SystemRole } from '../../user/enums/system-role.enum' import { ProxyGuard } from '../guards/proxy.guard' import { RunnerDto } from '../dto/runner.dto' import { RunnerSnapshotDto } from '../dto/runner-snapshot.dto' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { SshGatewayGuard } from '../guards/ssh-gateway.guard' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { OrGuard } from '../../auth/or.guard' import { RunnerAuthGuard } from '../../auth/runner-auth.guard' import { RunnerContextDecorator } from '../../common/decorators/runner-context.decorator' import { RunnerContext } from '../../common/interfaces/runner-context.interface' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' import { RunnerAccessGuard } from '../guards/runner-access.guard' import { RegionRunnerAccessGuard } from '../guards/region-runner-access.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { CreateRunnerResponseDto } from '../dto/create-runner-response.dto' import { RegionSandboxAccessGuard } from '../guards/region-sandbox-access.guard' import { RunnerFullDto } from '../dto/runner-full.dto' import { RegionType } from '../../region/enums/region-type.enum' import { RegionService } from '../../region/services/region.service' import { RequireFlagsEnabled } from '@openfeature/nestjs-sdk' import { FeatureFlags } from '../../common/constants/feature-flags' import { RunnerHealthcheckDto } from '../dto/runner-health.dto' @ApiTags('runners') @Controller('runners') @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class RunnerController { constructor( private readonly runnerService: RunnerService, private readonly regionService: RegionService, ) {} @Post() @HttpCode(201) @ApiOperation({ summary: 'Create runner', operationId: 'createRunner', }) @ApiResponse({ status: 201, type: CreateRunnerResponseDto, }) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.RUNNER, targetIdFromResult: (result: CreateRunnerResponseDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ regionId: req.body?.regionId, name: req.body?.name, }), }, }) @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(OrganizationResourceActionGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_RUNNERS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async create( @Body() createRunnerDto: CreateRunnerDto, @AuthContext() authContext: OrganizationAuthContext, ): Promise { // validate that the runner region is a custom region owned by the organization const region = await this.regionService.findOne(createRunnerDto.regionId) if (!region || region.organizationId !== authContext.organizationId) { throw new NotFoundException('Region not found') } if (region.regionType !== RegionType.CUSTOM) { throw new ForbiddenException('Runner can only be created in a custom region') } // create the runner const { runner, apiKey } = await this.runnerService.create({ regionId: createRunnerDto.regionId, name: createRunnerDto.name, apiVersion: '2', }) return CreateRunnerResponseDto.fromRunner(runner, apiKey) } @Get('/me') @UseGuards(RunnerAuthGuard) @ApiOperation({ summary: 'Get info for authenticated runner', operationId: 'getInfoForAuthenticatedRunner', }) @ApiResponse({ status: 200, description: 'Runner info', type: RunnerFullDto, }) async getInfoForAuthenticatedRunner(@RunnerContextDecorator() runnerContext: RunnerContext): Promise { return this.runnerService.findOneFullOrFail(runnerContext.runnerId) } @Get(':id') @HttpCode(200) @ApiOperation({ summary: 'Get runner by ID', operationId: 'getRunnerById', }) @ApiResponse({ status: 200, type: RunnerDto, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(OrganizationResourceActionGuard, RunnerAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.READ_RUNNERS]) async getRunnerById(@Param('id', ParseUUIDPipe) id: string): Promise { const runner = await this.runnerService.findOneOrFail(id) return RunnerDto.fromRunner(runner) } @Get(':id/full') @HttpCode(200) @ApiOperation({ summary: 'Get runner by ID', operationId: 'getRunnerFullById', }) @ApiResponse({ status: 200, type: RunnerFullDto, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) @UseGuards(OrGuard([SystemActionGuard, ProxyGuard, SshGatewayGuard, RegionRunnerAccessGuard])) @RequiredApiRole([SystemRole.ADMIN]) async getRunnerByIdFull(@Param('id', ParseUUIDPipe) id: string): Promise { const runner = await this.runnerService.findOneOrFail(id) return RunnerFullDto.fromRunner(runner) } @Get() @HttpCode(200) @ApiOperation({ summary: 'List all runners', operationId: 'listRunners', }) @ApiResponse({ status: 200, type: [RunnerDto], }) @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(OrganizationResourceActionGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.READ_RUNNERS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async findAll(@AuthContext() authContext: OrganizationAuthContext): Promise { return this.runnerService.findAllByOrganization(authContext.organizationId, RegionType.CUSTOM) } @Patch(':id/scheduling') @HttpCode(200) @ApiOperation({ summary: 'Update runner scheduling status', operationId: 'updateRunnerScheduling', }) @ApiResponse({ status: 200, type: RunnerDto, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) @Audit({ action: AuditAction.UPDATE_SCHEDULING, targetType: AuditTarget.RUNNER, targetIdFromRequest: (req) => req.params.id, requestMetadata: { body: (req: TypedRequest<{ unschedulable: boolean }>) => ({ unschedulable: req.body?.unschedulable, }), }, }) @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(OrganizationResourceActionGuard, RunnerAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_RUNNERS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async updateSchedulingStatus( @Param('id', ParseUUIDPipe) id: string, @Body('unschedulable') unschedulable: boolean, ): Promise { const updatedRunner = await this.runnerService.updateSchedulingStatus(id, unschedulable) return RunnerDto.fromRunner(updatedRunner) } @Patch(':id/draining') @HttpCode(200) @ApiOperation({ summary: 'Update runner draining status', operationId: 'updateRunnerDraining', }) @ApiResponse({ status: 200, type: RunnerDto, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) @Audit({ action: AuditAction.UPDATE_DRAINING, targetType: AuditTarget.RUNNER, targetIdFromRequest: (req) => req.params.id, requestMetadata: { body: (req: TypedRequest<{ draining: boolean }>) => ({ draining: req.body?.draining, }), }, }) @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(OrganizationResourceActionGuard, RunnerAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_RUNNERS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async updateDrainingStatus( @Param('id', ParseUUIDPipe) id: string, @Body('draining') draining: boolean, ): Promise { const updatedRunner = await this.runnerService.updateDrainingStatus(id, draining) return RunnerDto.fromRunner(updatedRunner) } @Delete(':id') @HttpCode(204) @ApiOperation({ summary: 'Delete runner', operationId: 'deleteRunner', }) @ApiResponse({ status: 204, }) @ApiParam({ name: 'id', description: 'Runner ID', type: String, }) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.RUNNER, targetIdFromRequest: (req) => req.params.id, }) @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(OrganizationResourceActionGuard, RunnerAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_RUNNERS]) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.ORGANIZATION_INFRASTRUCTURE, defaultValue: false }] }) async delete(@Param('id', ParseUUIDPipe) id: string): Promise { return this.runnerService.remove(id) } @Get('/by-sandbox/:sandboxId') @HttpCode(200) @ApiOperation({ summary: 'Get runner by sandbox ID', operationId: 'getRunnerBySandboxId', }) @ApiResponse({ status: 200, type: RunnerFullDto, }) @UseGuards(OrGuard([SystemActionGuard, ProxyGuard, SshGatewayGuard, RegionSandboxAccessGuard])) @RequiredApiRole([SystemRole.ADMIN]) async getRunnerBySandboxId(@Param('sandboxId') sandboxId: string): Promise { const runner = await this.runnerService.findBySandboxId(sandboxId) if (!runner) { throw new NotFoundException('Runner not found') } return RunnerFullDto.fromRunner(runner) } @Get('/by-snapshot-ref') @HttpCode(200) @ApiOperation({ summary: 'Get runners by snapshot ref', operationId: 'getRunnersBySnapshotRef', }) @ApiResponse({ status: 200, type: [RunnerSnapshotDto], }) @ApiQuery({ name: 'ref', description: 'Snapshot ref', type: String, required: true, }) @UseGuards(OrGuard([SystemActionGuard, ProxyGuard, SshGatewayGuard])) @RequiredApiRole([SystemRole.ADMIN, 'proxy', 'ssh-gateway']) async getRunnersBySnapshotRef(@Query('ref') ref: string): Promise { return this.runnerService.getRunnersBySnapshotRef(ref) } @Post('healthcheck') @ApiOperation({ summary: 'Runner healthcheck', operationId: 'runnerHealthcheck', description: 'Endpoint for version 2 runners to send healthcheck and metrics. Updates lastChecked timestamp and runner metrics.', }) @ApiResponse({ status: 200, description: 'Healthcheck received', }) async runnerHealthcheck( @RunnerContextDecorator() runnerContext: RunnerContext, @Body() healthcheck: RunnerHealthcheckDto, ): Promise { await this.runnerService.updateRunnerHealth( runnerContext.runnerId, healthcheck.domain, healthcheck.apiUrl, healthcheck.proxyUrl, healthcheck.serviceHealth, healthcheck.metrics, healthcheck.appVersion, ) } } ================================================ FILE: apps/api/src/sandbox/controllers/sandbox.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Post, Delete, Body, Param, Query, Logger, UseGuards, HttpCode, UseInterceptors, Put, NotFoundException, Res, Request, RawBodyRequest, Next, ParseBoolPipe, } from '@nestjs/common' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { SandboxService } from '../services/sandbox.service' import { CreateSandboxDto } from '../dto/create-sandbox.dto' import { ApiOAuth2, ApiResponse, ApiQuery, ApiOperation, ApiParam, ApiTags, ApiHeader, ApiBearerAuth, } from '@nestjs/swagger' import { SandboxDto, SandboxLabelsDto } from '../dto/sandbox.dto' import { ResizeSandboxDto } from '../dto/resize-sandbox.dto' import { UpdateSandboxStateDto } from '../dto/update-sandbox-state.dto' import { PaginatedSandboxesDto } from '../dto/paginated-sandboxes.dto' import { RunnerService } from '../services/runner.service' import { RunnerAuthGuard } from '../../auth/runner-auth.guard' import { RunnerContextDecorator } from '../../common/decorators/runner-context.decorator' import { RunnerContext } from '../../common/interfaces/runner-context.interface' import { SandboxState } from '../enums/sandbox-state.enum' import { Sandbox } from '../entities/sandbox.entity' import { ContentTypeInterceptor } from '../../common/interceptors/content-type.interceptors' import { SandboxAccessGuard } from '../guards/sandbox-access.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { PortPreviewUrlDto, SignedPortPreviewUrlDto } from '../dto/port-preview-url.dto' import { IncomingMessage, ServerResponse } from 'http' import { NextFunction } from 'http-proxy-middleware/dist/types' import { LogProxy } from '../proxy/log-proxy' import { BadRequestError } from '../../exceptions/bad-request.exception' import { SandboxStateUpdatedEvent } from '../events/sandbox-state-updated.event' import { Audit, MASKED_AUDIT_VALUE, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' // import { UpdateSandboxNetworkSettingsDto } from '../dto/update-sandbox-network-settings.dto' import { SshAccessDto, SshAccessValidationDto } from '../dto/ssh-access.dto' import { ListSandboxesQueryDto } from '../dto/list-sandboxes-query.dto' import { ProxyGuard } from '../guards/proxy.guard' import { OrGuard } from '../../auth/or.guard' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' import { SkipThrottle } from '@nestjs/throttler' import { ThrottlerScope } from '../../common/decorators/throttler-scope.decorator' import { SshGatewayGuard } from '../guards/ssh-gateway.guard' import { ToolboxProxyUrlDto } from '../dto/toolbox-proxy-url.dto' import { UrlDto } from '../../common/dto/url.dto' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { SANDBOX_EVENT_CHANNEL } from '../../common/constants/constants' import { RequireFlagsEnabled } from '@openfeature/nestjs-sdk' import { FeatureFlags } from '../../common/constants/feature-flags' @ApiTags('sandbox') @Controller('sandbox') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class SandboxController { private readonly logger = new Logger(SandboxController.name) private readonly sandboxCallbacks: Map void> = new Map() private readonly redisSubscriber: Redis constructor( private readonly runnerService: RunnerService, private readonly sandboxService: SandboxService, @InjectRedis() private readonly redis: Redis, ) { this.redisSubscriber = this.redis.duplicate() this.redisSubscriber.subscribe(SANDBOX_EVENT_CHANNEL) this.redisSubscriber.on('message', (channel, message) => { if (channel !== SANDBOX_EVENT_CHANNEL) { return } try { const event = JSON.parse(message) as SandboxStateUpdatedEvent this.handleSandboxStateUpdated(event) } catch (error) { this.logger.error('Failed to parse sandbox state updated event:', error) return } }) } @Get() @ApiOperation({ summary: 'List all sandboxes', operationId: 'listSandboxes', }) @ApiResponse({ status: 200, description: 'List of all sandboxes', type: [SandboxDto], }) @ApiQuery({ name: 'verbose', required: false, type: Boolean, description: 'Include verbose output', }) @ApiQuery({ name: 'labels', type: String, required: false, example: '{"label1": "value1", "label2": "value2"}', description: 'JSON encoded labels to filter by', }) @ApiQuery({ name: 'includeErroredDeleted', required: false, type: Boolean, description: 'Include errored and deleted sandboxes', }) async listSandboxes( @AuthContext() authContext: OrganizationAuthContext, @Query('verbose') verbose?: boolean, @Query('labels') labelsQuery?: string, @Query('includeErroredDeleted') includeErroredDeleted?: boolean, ): Promise { const labels = labelsQuery ? JSON.parse(labelsQuery) : undefined const sandboxes = await this.sandboxService.findAllDeprecated( authContext.organizationId, labels, includeErroredDeleted, ) return this.sandboxService.toSandboxDtos(sandboxes) } @Get('paginated') @ApiOperation({ summary: 'List all sandboxes paginated', operationId: 'listSandboxesPaginated', }) @ApiResponse({ status: 200, description: 'Paginated list of all sandboxes', type: PaginatedSandboxesDto, }) async listSandboxesPaginated( @AuthContext() authContext: OrganizationAuthContext, @Query() queryParams: ListSandboxesQueryDto, ): Promise { const { page, limit, id, name, labels, includeErroredDeleted: includeErroredDestroyed, states, snapshots, regions, minCpu, maxCpu, minMemoryGiB, maxMemoryGiB, minDiskGiB, maxDiskGiB, lastEventAfter, lastEventBefore, sort: sortField, order: sortDirection, } = queryParams const result = await this.sandboxService.findAll( authContext.organizationId, page, limit, { id, name, labels: labels ? JSON.parse(labels) : undefined, includeErroredDestroyed, states, snapshots, regionIds: regions, minCpu, maxCpu, minMemoryGiB, maxMemoryGiB, minDiskGiB, maxDiskGiB, lastEventAfter, lastEventBefore, }, { field: sortField, direction: sortDirection, }, ) return { items: await this.sandboxService.toSandboxDtos(result.items), total: result.total, page: result.page, totalPages: result.totalPages, } } @Post() @HttpCode(200) // for Daytona Api compatibility @UseInterceptors(ContentTypeInterceptor) @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-create') @ApiOperation({ summary: 'Create a new sandbox', operationId: 'createSandbox', }) @ApiResponse({ status: 200, description: 'The sandbox has been successfully created.', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.SANDBOX, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, snapshot: req.body?.snapshot, user: req.body?.user, env: req.body?.env ? Object.fromEntries(Object.keys(req.body?.env).map((key) => [key, MASKED_AUDIT_VALUE])) : undefined, labels: req.body?.labels, public: req.body?.public, class: req.body?.class, target: req.body?.target, cpu: req.body?.cpu, gpu: req.body?.gpu, memory: req.body?.memory, disk: req.body?.disk, autoStopInterval: req.body?.autoStopInterval, autoArchiveInterval: req.body?.autoArchiveInterval, autoDeleteInterval: req.body?.autoDeleteInterval, volumes: req.body?.volumes, buildInfo: req.body?.buildInfo, networkBlockAll: req.body?.networkBlockAll, networkAllowList: req.body?.networkAllowList, }), }, }) async createSandbox( @AuthContext() authContext: OrganizationAuthContext, @Body() createSandboxDto: CreateSandboxDto, ): Promise { const organization = authContext.organization let sandbox: SandboxDto if (createSandboxDto.buildInfo) { if (createSandboxDto.snapshot) { throw new BadRequestError('Cannot specify a snapshot when using a build info entry') } sandbox = await this.sandboxService.createFromBuildInfo(createSandboxDto, organization) } else { if (createSandboxDto.cpu || createSandboxDto.gpu || createSandboxDto.memory || createSandboxDto.disk) { throw new BadRequestError('Cannot specify Sandbox resources when using a snapshot') } sandbox = await this.sandboxService.createFromSnapshot(createSandboxDto, organization) if (sandbox.state === SandboxState.STARTED) { return sandbox } await this.waitForSandboxStarted(sandbox, 30) } return sandbox } @Get('for-runner') @UseGuards(RunnerAuthGuard) @ApiOperation({ summary: 'Get sandboxes for the authenticated runner', operationId: 'getSandboxesForRunner', }) @ApiQuery({ name: 'states', required: false, type: String, description: 'Comma-separated list of sandbox states to filter by', }) @ApiQuery({ name: 'skipReconcilingSandboxes', required: false, type: Boolean, description: 'Skip sandboxes where state differs from desired state', }) @ApiResponse({ status: 200, description: 'List of sandboxes for the authenticated runner', type: [SandboxDto], }) async getSandboxesForRunner( @RunnerContextDecorator() runnerContext: RunnerContext, @Query('states') states?: string, @Query('skipReconcilingSandboxes') skipReconcilingSandboxes?: string, ): Promise { const stateArray = states ? states.split(',').map((s) => { if (!Object.values(SandboxState).includes(s as SandboxState)) { throw new BadRequestError(`Invalid sandbox state: ${s}`) } return s as SandboxState }) : undefined const skip = skipReconcilingSandboxes === 'true' const sandboxes = await this.sandboxService.findByRunnerId(runnerContext.runnerId, stateArray, skip) return this.sandboxService.toSandboxDtos(sandboxes) } @Get(':sandboxIdOrName') @ApiOperation({ summary: 'Get sandbox details', operationId: 'getSandbox', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiQuery({ name: 'verbose', required: false, type: Boolean, description: 'Include verbose output', }) @ApiResponse({ status: 200, description: 'Sandbox details', type: SandboxDto, }) @UseGuards(SandboxAccessGuard) async getSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, // eslint-disable-next-line @typescript-eslint/no-unused-vars @Query('verbose') verbose?: boolean, ): Promise { const sandbox = await this.sandboxService.findOneByIdOrName(sandboxIdOrName, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Delete(':sandboxIdOrName') @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-lifecycle') @ApiOperation({ summary: 'Delete sandbox', operationId: 'deleteSandbox', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox has been deleted', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, }) async deleteSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const sandbox = await this.sandboxService.destroy(sandboxIdOrName, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Post(':sandboxIdOrName/recover') @HttpCode(200) @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-lifecycle') @ApiOperation({ summary: 'Recover sandbox from error state', operationId: 'recoverSandbox', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Recovery initiated', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.RECOVER, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, }) async recoverSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const recoveredSandbox = await this.sandboxService.recover(sandboxIdOrName, authContext.organization) let sandboxDto = await this.sandboxService.toSandboxDto(recoveredSandbox) if (sandboxDto.state !== SandboxState.STARTED) { sandboxDto = await this.waitForSandboxStarted(sandboxDto, 30) } return sandboxDto } @Post(':sandboxIdOrName/start') @HttpCode(200) @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-lifecycle') @ApiOperation({ summary: 'Start sandbox', operationId: 'startSandbox', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox has been started or is being restored from archived state', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.START, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, }) async startSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const sbx = await this.sandboxService.start(sandboxIdOrName, authContext.organization) let sandbox = await this.sandboxService.toSandboxDto(sbx) if (![SandboxState.ARCHIVED, SandboxState.RESTORING, SandboxState.STARTED].includes(sandbox.state)) { sandbox = await this.waitForSandboxStarted(sandbox, 30) } return sandbox } @Post(':sandboxIdOrName/stop') @HttpCode(200) // for Daytona Api compatibility @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-lifecycle') @ApiOperation({ summary: 'Stop sandbox', operationId: 'stopSandbox', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox has been stopped', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.STOP, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, }) async stopSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const sandbox = await this.sandboxService.stop(sandboxIdOrName, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Post(':sandboxIdOrName/resize') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-lifecycle') @ApiOperation({ summary: 'Resize sandbox resources', operationId: 'resizeSandbox', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox has been resized', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @RequireFlagsEnabled({ flags: [{ flagKey: FeatureFlags.SANDBOX_RESIZE, defaultValue: false }] }) @Audit({ action: AuditAction.RESIZE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ cpu: req.body?.cpu, memory: req.body?.memory, disk: req.body?.disk, }), }, }) async resizeSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Body() resizeSandboxDto: ResizeSandboxDto, ): Promise { const sandbox = await this.sandboxService.resize(sandboxIdOrName, resizeSandboxDto, authContext.organization) return this.sandboxService.toSandboxDto(sandbox) } @Put(':sandboxIdOrName/labels') @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Replace sandbox labels', operationId: 'replaceLabels', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Labels have been successfully replaced', type: SandboxLabelsDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.REPLACE_LABELS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ labels: req.body?.labels, }), }, }) async replaceLabels( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Body() labelsDto: SandboxLabelsDto, ): Promise { const sandbox = await this.sandboxService.replaceLabels( sandboxIdOrName, labelsDto.labels, authContext.organizationId, ) return this.sandboxService.toSandboxDto(sandbox) } @Put(':sandboxId/state') @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Update sandbox state', operationId: 'updateSandboxState', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox state has been successfully updated', }) @UseGuards(RunnerAuthGuard) @UseGuards(SandboxAccessGuard) async updateSandboxState( @Param('sandboxId') sandboxId: string, @Body() updateStateDto: UpdateSandboxStateDto, ): Promise { await this.sandboxService.updateState( sandboxId, updateStateDto.state, updateStateDto.recoverable, updateStateDto.errorReason, ) } @Post(':sandboxIdOrName/backup') @ApiOperation({ summary: 'Create sandbox backup', operationId: 'createBackup', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Sandbox backup has been initiated', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.CREATE_BACKUP, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, }) async createBackup( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const sandbox = await this.sandboxService.createBackup(sandboxIdOrName, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Post(':sandboxIdOrName/public/:isPublic') @ApiOperation({ summary: 'Update public status', operationId: 'updatePublicStatus', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'isPublic', description: 'Public status to set', type: 'boolean', }) @ApiResponse({ status: 200, description: 'Public status has been successfully updated', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.UPDATE_PUBLIC_STATUS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { params: (req) => ({ isPublic: req.params.isPublic, }), }, }) async updatePublicStatus( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('isPublic') isPublic: boolean, ): Promise { const sandbox = await this.sandboxService.updatePublicStatus(sandboxIdOrName, isPublic, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Post(':sandboxId/last-activity') @ApiOperation({ summary: 'Update sandbox last activity', operationId: 'updateLastActivity', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 201, description: 'Last activity has been updated', }) @UseGuards(OrGuard([SandboxAccessGuard, ProxyGuard, SshGatewayGuard])) async updateLastActivity(@Param('sandboxId') sandboxId: string): Promise { await this.sandboxService.updateLastActivityAt(sandboxId, new Date()) } @Post(':sandboxIdOrName/autostop/:interval') @ApiOperation({ summary: 'Set sandbox auto-stop interval', operationId: 'setAutostopInterval', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'interval', description: 'Auto-stop interval in minutes (0 to disable)', type: 'number', }) @ApiResponse({ status: 200, description: 'Auto-stop interval has been set', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.SET_AUTO_STOP_INTERVAL, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { params: (req) => ({ interval: req.params.interval, }), }, }) async setAutostopInterval( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('interval') interval: number, ): Promise { const sandbox = await this.sandboxService.setAutostopInterval(sandboxIdOrName, interval, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Post(':sandboxIdOrName/autoarchive/:interval') @ApiOperation({ summary: 'Set sandbox auto-archive interval', operationId: 'setAutoArchiveInterval', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'interval', description: 'Auto-archive interval in minutes (0 means the maximum interval will be used)', type: 'number', }) @ApiResponse({ status: 200, description: 'Auto-archive interval has been set', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.SET_AUTO_ARCHIVE_INTERVAL, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { params: (req) => ({ interval: req.params.interval, }), }, }) async setAutoArchiveInterval( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('interval') interval: number, ): Promise { const sandbox = await this.sandboxService.setAutoArchiveInterval( sandboxIdOrName, interval, authContext.organizationId, ) return this.sandboxService.toSandboxDto(sandbox) } @Post(':sandboxIdOrName/autodelete/:interval') @ApiOperation({ summary: 'Set sandbox auto-delete interval', operationId: 'setAutoDeleteInterval', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'interval', description: 'Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping)', type: 'number', }) @ApiResponse({ status: 200, description: 'Auto-delete interval has been set', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.SET_AUTO_DELETE_INTERVAL, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { params: (req) => ({ interval: req.params.interval, }), }, }) async setAutoDeleteInterval( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('interval') interval: number, ): Promise { const sandbox = await this.sandboxService.setAutoDeleteInterval( sandboxIdOrName, interval, authContext.organizationId, ) return this.sandboxService.toSandboxDto(sandbox) } // TODO: Network settings endpoint will not be enabled for now // @Post(':sandboxIdOrName/network-settings') // @ApiOperation({ // summary: 'Update sandbox network settings', // operationId: 'updateNetworkSettings', // }) // @ApiParam({ // name: 'sandboxIdOrName', // description: 'ID or name of the sandbox', // type: 'string', // }) // @ApiResponse({ // status: 200, // description: 'Network settings have been updated', // type: SandboxDto, // }) // @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) // @UseGuards(SandboxAccessGuard) // @Audit({ // action: AuditAction.UPDATE_NETWORK_SETTINGS, // targetType: AuditTarget.SANDBOX, // targetIdFromRequest: (req) => req.params.sandboxIdOrName, // targetIdFromResult: (result: SandboxDto) => result?.id, // requestMetadata: { // body: (req: TypedRequest) => ({ // networkBlockAll: req.body?.networkBlockAll, // networkAllowList: req.body?.networkAllowList, // }), // }, // }) // async updateNetworkSettings( // @AuthContext() authContext: OrganizationAuthContext, // @Param('sandboxIdOrName') sandboxIdOrName: string, // @Body() networkSettings: UpdateSandboxNetworkSettingsDto, // ): Promise { // const sandbox = await this.sandboxService.updateNetworkSettings( // sandboxIdOrName, // networkSettings.networkBlockAll, // networkSettings.networkAllowList, // authContext.organizationId, // ) // return SandboxDto.fromSandbox(sandbox, '') // } @Post(':sandboxIdOrName/archive') @HttpCode(200) @SkipThrottle({ authenticated: true }) @ThrottlerScope('sandbox-lifecycle') @ApiOperation({ summary: 'Archive sandbox', operationId: 'archiveSandbox', }) @ApiResponse({ status: 200, description: 'Sandbox has been archived', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.ARCHIVE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, }) async archiveSandbox( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const sandbox = await this.sandboxService.archive(sandboxIdOrName, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Get(':sandboxIdOrName/ports/:port/preview-url') @ApiOperation({ summary: 'Get preview URL for a sandbox port', operationId: 'getPortPreviewUrl', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'port', description: 'Port number to get preview URL for', type: 'number', }) @ApiResponse({ status: 200, description: 'Preview URL for the specified port', type: PortPreviewUrlDto, }) @UseGuards(SandboxAccessGuard) async getPortPreviewUrl( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('port') port: number, ): Promise { return this.sandboxService.getPortPreviewUrl(sandboxIdOrName, authContext.organizationId, port) } @Get(':sandboxIdOrName/ports/:port/signed-preview-url') @ApiOperation({ summary: 'Get signed preview URL for a sandbox port', operationId: 'getSignedPortPreviewUrl', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'port', description: 'Port number to get signed preview URL for', type: 'integer', }) @ApiQuery({ name: 'expiresInSeconds', required: false, type: 'integer', description: 'Expiration time in seconds (default: 60 seconds)', }) @ApiResponse({ status: 200, description: 'Signed preview URL for the specified port', type: SignedPortPreviewUrlDto, }) @UseGuards(SandboxAccessGuard) async getSignedPortPreviewUrl( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('port') port: number, @Query('expiresInSeconds') expiresInSeconds?: number, ): Promise { return this.sandboxService.getSignedPortPreviewUrl( sandboxIdOrName, authContext.organizationId, port, expiresInSeconds, ) } @Post(':sandboxIdOrName/ports/:port/signed-preview-url/:token/expire') @ApiOperation({ summary: 'Expire signed preview URL for a sandbox port', operationId: 'expireSignedPortPreviewUrl', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiParam({ name: 'port', description: 'Port number to expire signed preview URL for', type: 'integer', }) @ApiParam({ name: 'token', description: 'Token to expire signed preview URL for', type: 'string', }) @ApiResponse({ status: 200, description: 'Signed preview URL has been expired', }) @UseGuards(SandboxAccessGuard) async expireSignedPortPreviewUrl( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Param('port') port: number, @Param('token') token: string, ): Promise { await this.sandboxService.expireSignedPreviewUrlToken(sandboxIdOrName, authContext.organizationId, token, port) } @Get(':sandboxIdOrName/build-logs') @ApiOperation({ summary: 'Get build logs', operationId: 'getBuildLogs', deprecated: true, description: 'This endpoint is deprecated. Use `getBuildLogsUrl` instead.', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Build logs stream', }) @ApiQuery({ name: 'follow', required: false, type: Boolean, description: 'Whether to follow the logs stream', }) @UseGuards(SandboxAccessGuard) async getBuildLogs( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Query('follow', new ParseBoolPipe({ optional: true })) follow?: boolean, ): Promise { const sandbox = await this.sandboxService.findOneByIdOrName(sandboxIdOrName, authContext.organizationId) if (!sandbox.runnerId) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} has no runner assigned`) } if (!sandbox.buildInfo) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} has no build info`) } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (!runner.apiUrl) { throw new NotFoundException(`Runner for sandbox ${sandboxIdOrName} has no API URL`) } const logProxy = new LogProxy( runner.apiUrl, sandbox.buildInfo.snapshotRef.split(':')[0], runner.apiKey, follow === true, req, res, next, ) return logProxy.create() } @Get(':sandboxIdOrName/build-logs-url') @ApiOperation({ summary: 'Get build logs URL', operationId: 'getBuildLogsUrl', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Build logs URL', type: UrlDto, }) @UseGuards(SandboxAccessGuard) async getBuildLogsUrl( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, ): Promise { const buildLogsUrl = await this.sandboxService.getBuildLogsUrl(sandboxIdOrName, authContext.organizationId) return new UrlDto(buildLogsUrl) } @Post(':sandboxIdOrName/ssh-access') @HttpCode(200) @ApiOperation({ summary: 'Create SSH access for sandbox', operationId: 'createSshAccess', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiQuery({ name: 'expiresInMinutes', required: false, type: Number, description: 'Expiration time in minutes (default: 60)', }) @ApiResponse({ status: 200, description: 'SSH access has been created', type: SshAccessDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.CREATE_SSH_ACCESS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SshAccessDto) => result?.sandboxId, requestMetadata: { query: (req) => ({ expiresInMinutes: req.query.expiresInMinutes, }), }, }) async createSshAccess( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Query('expiresInMinutes') expiresInMinutes?: number, ): Promise { return await this.sandboxService.createSshAccess(sandboxIdOrName, expiresInMinutes, authContext.organizationId) } @Delete(':sandboxIdOrName/ssh-access') @HttpCode(200) @ApiOperation({ summary: 'Revoke SSH access for sandbox', operationId: 'revokeSshAccess', }) @ApiParam({ name: 'sandboxIdOrName', description: 'ID or name of the sandbox', type: 'string', }) @ApiQuery({ name: 'token', required: false, type: String, description: 'SSH access token to revoke. If not provided, all SSH access for the sandbox will be revoked.', }) @ApiResponse({ status: 200, description: 'SSH access has been revoked', type: SandboxDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(SandboxAccessGuard) @Audit({ action: AuditAction.REVOKE_SSH_ACCESS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxIdOrName, targetIdFromResult: (result: SandboxDto) => result?.id, requestMetadata: { query: (req) => ({ token: req.query.token, }), }, }) async revokeSshAccess( @AuthContext() authContext: OrganizationAuthContext, @Param('sandboxIdOrName') sandboxIdOrName: string, @Query('token') token?: string, ): Promise { const sandbox = await this.sandboxService.revokeSshAccess(sandboxIdOrName, token, authContext.organizationId) return this.sandboxService.toSandboxDto(sandbox) } @Get('ssh-access/validate') @ApiOperation({ summary: 'Validate SSH access for sandbox', operationId: 'validateSshAccess', }) @ApiQuery({ name: 'token', required: true, type: String, description: 'SSH access token to validate', }) @ApiResponse({ status: 200, description: 'SSH access validation result', type: SshAccessValidationDto, }) async validateSshAccess(@Query('token') token: string): Promise { const result = await this.sandboxService.validateSshAccess(token) return SshAccessValidationDto.fromValidationResult(result.valid, result.sandboxId) } @Get(':sandboxId/toolbox-proxy-url') @ApiOperation({ summary: 'Get toolbox proxy URL for a sandbox', operationId: 'getToolboxProxyUrl', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Toolbox proxy URL for the specified sandbox', type: ToolboxProxyUrlDto, }) @UseGuards(SandboxAccessGuard) async getToolboxProxyUrl(@Param('sandboxId') sandboxId: string): Promise { const url = await this.sandboxService.getToolboxProxyUrl(sandboxId) return new ToolboxProxyUrlDto(url) } // wait up to `timeoutSeconds` for the sandbox to start; if it doesn’t, return current sandbox private async waitForSandboxStarted(sandbox: SandboxDto, timeoutSeconds: number): Promise { let latestSandbox: Sandbox const waitForStarted = new Promise((resolve, reject) => { // eslint-disable-next-line let timeout: NodeJS.Timeout const handleStateUpdated = (event: SandboxStateUpdatedEvent) => { if (event.sandbox.id !== sandbox.id) { return } latestSandbox = event.sandbox if (event.sandbox.state === SandboxState.STARTED) { this.sandboxCallbacks.delete(sandbox.id) clearTimeout(timeout) resolve(this.sandboxService.toSandboxDto(event.sandbox)) } if (event.sandbox.state === SandboxState.ERROR || event.sandbox.state === SandboxState.BUILD_FAILED) { this.sandboxCallbacks.delete(sandbox.id) clearTimeout(timeout) reject(new BadRequestError(`Sandbox failed to start: ${event.sandbox.errorReason}`)) } } this.sandboxCallbacks.set(sandbox.id, handleStateUpdated) timeout = setTimeout(() => { this.sandboxCallbacks.delete(sandbox.id) if (latestSandbox) { resolve(this.sandboxService.toSandboxDto(latestSandbox)) } else { resolve(sandbox) } }, timeoutSeconds * 1000) }) return waitForStarted } private handleSandboxStateUpdated(event: SandboxStateUpdatedEvent) { const callback = this.sandboxCallbacks.get(event.sandbox.id) if (callback) { callback(event) } } } ================================================ FILE: apps/api/src/sandbox/controllers/snapshot.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Body, Controller, Delete, Get, Param, Patch, Post, Query, UseGuards, HttpCode, ForbiddenException, Logger, NotFoundException, Res, Request, RawBodyRequest, Next, ParseBoolPipe, } from '@nestjs/common' import { IncomingMessage, ServerResponse } from 'http' import { NextFunction } from 'express' import { SnapshotService } from '../services/snapshot.service' import { RunnerService } from '../services/runner.service' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiParam, ApiQuery, ApiHeader, ApiBearerAuth, } from '@nestjs/swagger' import { CreateSnapshotDto } from '../dto/create-snapshot.dto' import { SnapshotDto } from '../dto/snapshot.dto' import { PaginatedSnapshotsDto } from '../dto/paginated-snapshots.dto' import { SnapshotAccessGuard } from '../guards/snapshot-access.guard' import { SnapshotReadAccessGuard } from '../guards/snapshot-read-access.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { SystemActionGuard } from '../../auth/system-action.guard' import { RequiredSystemRole } from '../../common/decorators/required-role.decorator' import { SystemRole } from '../../user/enums/system-role.enum' import { SetSnapshotGeneralStatusDto } from '../dto/update-snapshot.dto' import { LogProxy } from '../proxy/log-proxy' import { BadRequestError } from '../../exceptions/bad-request.exception' import { Snapshot } from '../entities/snapshot.entity' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { ListSnapshotsQueryDto } from '../dto/list-snapshots-query.dto' import { SnapshotState } from '../enums/snapshot-state.enum' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' import { UrlDto } from '../../common/dto/url.dto' @ApiTags('snapshots') @Controller('snapshots') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, SystemActionGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class SnapshotController { private readonly logger = new Logger(SnapshotController.name) constructor( private readonly snapshotService: SnapshotService, private readonly runnerService: RunnerService, ) {} @Post() @HttpCode(200) @ApiOperation({ summary: 'Create a new snapshot', operationId: 'createSnapshot', }) @ApiResponse({ status: 200, description: 'The snapshot has been successfully created.', type: SnapshotDto, }) @ApiResponse({ status: 400, description: 'Bad request - Snapshots with tag ":latest" are not allowed', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SNAPSHOTS]) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.SNAPSHOT, targetIdFromResult: (result: SnapshotDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, imageName: req.body?.imageName, entrypoint: req.body?.entrypoint, general: req.body?.general, cpu: req.body?.cpu, memory: req.body?.memory, disk: req.body?.disk, gpu: req.body?.gpu, buildInfo: req.body?.buildInfo, }), }, }) async createSnapshot( @AuthContext() authContext: OrganizationAuthContext, @Body() createSnapshotDto: CreateSnapshotDto, ): Promise { if (createSnapshotDto.general && authContext.role !== SystemRole.ADMIN) { throw new ForbiddenException('Insufficient permissions for creating general snapshots') } if (createSnapshotDto.buildInfo) { if (createSnapshotDto.imageName) { throw new BadRequestError('Cannot specify an image name when using a build info entry') } if (createSnapshotDto.entrypoint) { throw new BadRequestError('Cannot specify an entrypoint when using a build info entry') } } else { if (!createSnapshotDto.imageName) { throw new BadRequestError('Must specify an image name when not using a build info entry') } } // TODO: consider - if using transient registry, prepend the snapshot name with the username const snapshot = createSnapshotDto.buildInfo ? await this.snapshotService.createFromBuildInfo(authContext.organization, createSnapshotDto) : await this.snapshotService.createFromPull(authContext.organization, createSnapshotDto) return SnapshotDto.fromSnapshot(snapshot) } @Get('can-cleanup-image') @ApiOperation({ summary: 'Check if an image can be cleaned up', operationId: 'canCleanupImage', }) @ApiQuery({ name: 'imageName', required: true, type: String, description: 'Image name with tag to check', }) @ApiResponse({ status: 200, description: 'Boolean indicating if image can be cleaned up', type: Boolean, }) @RequiredSystemRole(SystemRole.ADMIN) async canCleanupImage(@Query('imageName') imageName: string): Promise { return this.snapshotService.canCleanupImage(imageName) } @Get(':id') @ApiOperation({ summary: 'Get snapshot by ID or name', operationId: 'getSnapshot', }) @ApiParam({ name: 'id', description: 'Snapshot ID or name', }) @ApiResponse({ status: 200, description: 'The snapshot', type: SnapshotDto, }) @ApiResponse({ status: 404, description: 'Snapshot not found', }) @UseGuards(SnapshotReadAccessGuard) async getSnapshot( @Param('id') snapshotIdOrName: string, @AuthContext() authContext: OrganizationAuthContext, ): Promise { const snapshot = await this.snapshotService.getSnapshotWithRegions(snapshotIdOrName, authContext.organizationId) return SnapshotDto.fromSnapshot(snapshot) } @Delete(':id') @ApiOperation({ summary: 'Delete snapshot', operationId: 'removeSnapshot', }) @ApiParam({ name: 'id', description: 'Snapshot ID', }) @ApiResponse({ status: 200, description: 'Snapshot has been deleted', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_SNAPSHOTS]) @UseGuards(SnapshotAccessGuard) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.SNAPSHOT, targetIdFromRequest: (req) => req.params.id, }) async removeSnapshot(@Param('id') snapshotId: string): Promise { await this.snapshotService.removeSnapshot(snapshotId) } @Get() @ApiOperation({ summary: 'List all snapshots', operationId: 'getAllSnapshots', }) @ApiResponse({ status: 200, description: 'Paginated list of all snapshots', type: PaginatedSnapshotsDto, }) async getAllSnapshots( @AuthContext() authContext: OrganizationAuthContext, @Query() queryParams: ListSnapshotsQueryDto, ): Promise { const { page, limit, name, sort, order } = queryParams const result = await this.snapshotService.getAllSnapshots( authContext.organizationId, page, limit, { name }, { field: sort, direction: order }, ) return { items: result.items.map(SnapshotDto.fromSnapshot), total: result.total, page: result.page, totalPages: result.totalPages, } } @Patch(':id/general') @ApiOperation({ summary: 'Set snapshot general status', operationId: 'setSnapshotGeneralStatus', }) @ApiParam({ name: 'id', description: 'Snapshot ID', }) @ApiResponse({ status: 200, description: 'Snapshot general status has been set', type: SnapshotDto, }) @RequiredSystemRole(SystemRole.ADMIN) @Audit({ action: AuditAction.SET_GENERAL_STATUS, targetType: AuditTarget.SNAPSHOT, targetIdFromRequest: (req) => req.params.id, requestMetadata: { body: (req: TypedRequest) => ({ general: req.body?.general, }), }, }) async setSnapshotGeneralStatus( @Param('id') snapshotId: string, @Body() dto: SetSnapshotGeneralStatusDto, ): Promise { const snapshot = await this.snapshotService.setSnapshotGeneralStatus(snapshotId, dto.general) return SnapshotDto.fromSnapshot(snapshot) } @Get(':id/build-logs') @ApiOperation({ summary: 'Get snapshot build logs', operationId: 'getSnapshotBuildLogs', deprecated: true, description: 'This endpoint is deprecated. Use `getSnapshotBuildLogsUrl` instead.', }) @ApiParam({ name: 'id', description: 'Snapshot ID', }) @ApiQuery({ name: 'follow', required: false, type: Boolean, description: 'Whether to follow the logs stream', }) @UseGuards(SnapshotAccessGuard) async getSnapshotBuildLogs( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, @Param('id') snapshotId: string, @Query('follow', new ParseBoolPipe({ optional: true })) follow?: boolean, ): Promise { let snapshot = await this.snapshotService.getSnapshot(snapshotId) // Check if the snapshot has build info if (!snapshot.buildInfo) { throw new NotFoundException(`Snapshot ${snapshotId} has no build info`) } if (snapshot.state == SnapshotState.ACTIVE) { // Close the connection res.end() return } // Retry until a runner is assigned or timeout after 30 seconds const startTime = Date.now() const timeoutMs = 30 * 1000 while (!snapshot.initialRunnerId) { if (Date.now() - startTime > timeoutMs) { throw new NotFoundException(`Timeout waiting for build runner assignment for snapshot ${snapshotId}`) } await new Promise((resolve) => setTimeout(resolve, 1000)) snapshot = await this.snapshotService.getSnapshot(snapshotId) } const runner = await this.runnerService.findOneOrFail(snapshot.initialRunnerId) if (!runner.apiUrl) { throw new NotFoundException(`Build runner for snapshot ${snapshotId} has no API URL`) } const logProxy = new LogProxy( runner.apiUrl, snapshot.buildInfo.snapshotRef, runner.apiKey, follow === true, req, res, next, ) return logProxy.create() } @Get(':id/build-logs-url') @ApiOperation({ summary: 'Get snapshot build logs URL', operationId: 'getSnapshotBuildLogsUrl', }) @ApiParam({ name: 'id', description: 'Snapshot ID', }) @ApiResponse({ status: 200, description: 'The snapshot build logs URL', type: UrlDto, }) @UseGuards(SnapshotAccessGuard) async getSnapshotBuildLogsUrl(@Param('id') snapshotId: string): Promise { let snapshot = await this.snapshotService.getSnapshot(snapshotId) // Check if the snapshot has build info if (!snapshot.buildInfo) { throw new NotFoundException(`Snapshot ${snapshotId} has no build info`) } // Retry until a runner is assigned or timeout after 30 seconds const startTime = Date.now() const timeoutMs = 30 * 1000 while (!snapshot.initialRunnerId) { if (Date.now() - startTime > timeoutMs) { throw new NotFoundException(`Timeout waiting for build runner assignment for snapshot ${snapshotId}`) } await new Promise((resolve) => setTimeout(resolve, 1000)) snapshot = await this.snapshotService.getSnapshot(snapshotId) } const url = await this.snapshotService.getBuildLogsUrl(snapshot) return new UrlDto(url) } @Post(':id/activate') @HttpCode(200) @ApiOperation({ summary: 'Activate a snapshot', operationId: 'activateSnapshot', }) @ApiParam({ name: 'id', description: 'Snapshot ID', }) @ApiResponse({ status: 200, description: 'The snapshot has been successfully activated.', type: SnapshotDto, }) @ApiResponse({ status: 400, description: 'Bad request - Snapshot is already active, not in inactive state, or has associated snapshot runners', }) @ApiResponse({ status: 404, description: 'Snapshot not found', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SNAPSHOTS]) @UseGuards(SnapshotAccessGuard) @Audit({ action: AuditAction.ACTIVATE, targetType: AuditTarget.SNAPSHOT, targetIdFromRequest: (req) => req.params.id, }) async activateSnapshot( @Param('id') snapshotId: string, @AuthContext() authContext: OrganizationAuthContext, ): Promise { const snapshot = await this.snapshotService.activateSnapshot(snapshotId, authContext.organization) return SnapshotDto.fromSnapshot(snapshot) } @Post(':id/deactivate') @HttpCode(204) @ApiOperation({ summary: 'Deactivate a snapshot', operationId: 'deactivateSnapshot', }) @ApiParam({ name: 'id', description: 'Snapshot ID', }) @ApiResponse({ status: 204, description: 'The snapshot has been successfully deactivated.', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SNAPSHOTS]) @UseGuards(SnapshotAccessGuard) @Audit({ action: AuditAction.DEACTIVATE, targetType: AuditTarget.SNAPSHOT, targetIdFromRequest: (req) => req.params.id, }) async deactivateSnapshot(@Param('id') snapshotId: string) { await this.snapshotService.deactivateSnapshot(snapshotId) } } ================================================ FILE: apps/api/src/sandbox/controllers/toolbox.deprecated.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Post, Delete, Body, Param, Request, Logger, UseGuards, HttpCode, UseInterceptors, RawBodyRequest, BadRequestException, Res, Next, } from '@nestjs/common' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { ApiOAuth2, ApiResponse, ApiQuery, ApiOperation, ApiConsumes, ApiBody, ApiTags, ApiParam, ApiHeader, ApiBearerAuth, } from '@nestjs/swagger' import { FileInfoDto, MatchDto, SearchFilesResponseDto, ReplaceRequestDto, ReplaceResultDto, GitAddRequestDto, GitBranchRequestDto, GitDeleteBranchRequestDto, GitCloneRequestDto, GitCommitRequestDto, GitCommitResponseDto, GitRepoRequestDto, GitStatusDto, ListBranchResponseDto, GitCommitInfoDto, GitCheckoutRequestDto, ExecuteRequestDto, ExecuteResponseDto, ProjectDirResponseDto, CreateSessionRequestDto, SessionExecuteRequestDto, SessionExecuteResponseDto, SessionDto, CommandDto, MousePositionDto, MouseMoveRequestDto, MouseMoveResponseDto, MouseClickRequestDto, MouseClickResponseDto, MouseDragRequestDto, MouseDragResponseDto, MouseScrollRequestDto, MouseScrollResponseDto, KeyboardTypeRequestDto, KeyboardPressRequestDto, KeyboardHotkeyRequestDto, ScreenshotResponseDto, RegionScreenshotResponseDto, CompressedScreenshotResponseDto, DisplayInfoResponseDto, WindowsResponseDto, ComputerUseStartResponseDto, ComputerUseStopResponseDto, ComputerUseStatusResponseDto, ProcessStatusResponseDto, ProcessRestartResponseDto, ProcessLogsResponseDto, ProcessErrorsResponseDto, UserHomeDirResponseDto, WorkDirResponseDto, PtyCreateRequestDto, PtyCreateResponseDto, PtySessionInfoDto, PtyListResponseDto, PtyResizeRequestDto, } from '../dto/toolbox.deprecated.dto' import { ToolboxService } from '../services/toolbox.deprecated.service' import { ContentTypeInterceptor } from '../../common/interceptors/content-type.interceptors' import { CompletionListDto, LspCompletionParamsDto, LspDocumentRequestDto, LspSymbolDto, LspServerRequestDto, } from '../dto/lsp.dto' import { createProxyMiddleware, RequestHandler, fixRequestBody, Options } from 'http-proxy-middleware' import { IncomingMessage } from 'http' import { NextFunction } from 'express' import { ServerResponse } from 'http' import { SandboxAccessGuard } from '../guards/sandbox-access.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import followRedirects from 'follow-redirects' import { UploadFileDto } from '../dto/upload-file.dto' import { AuditAction } from '../../audit/enums/audit-action.enum' import { Audit, MASKED_AUDIT_VALUE, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { DownloadFilesDto } from '../dto/download-files.dto' import { SkipThrottle } from '@nestjs/throttler' followRedirects.maxRedirects = 10 followRedirects.maxBodyLength = 50 * 1024 * 1024 type RunnerInfo = { apiKey: string apiUrl: string } const RUNNER_INFO_CACHE_PREFIX = 'proxy:sandbox-runner-info:' const RUNNER_INFO_CACHE_TTL = 2 * 60 // 2 minutes @ApiTags('toolbox') @Controller('toolbox') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @SkipThrottle({ anonymous: true, authenticated: true }) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, SandboxAccessGuard) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class ToolboxController { private readonly logger = new Logger(ToolboxController.name) private readonly toolboxProxy: RequestHandler< RawBodyRequest, ServerResponse, NextFunction > private readonly toolboxStreamProxy: RequestHandler< RawBodyRequest, ServerResponse, NextFunction > constructor( private readonly toolboxService: ToolboxService, @InjectRedis() private readonly redis: Redis, ) { const commonProxyOptions: Options = { router: async (req: RawBodyRequest) => { // eslint-disable-next-line no-useless-escape const sandboxId = req.url.match(/^\/api\/toolbox\/([^\/]+)\/toolbox/)?.[1] try { const runnerInfo = await this.getRunnerInfo(sandboxId) // @ts-expect-error - used later to set request headers req._runnerApiKey = runnerInfo.apiKey return runnerInfo.apiUrl } catch (err) { // @ts-expect-error - used later to throw error req._err = err } // Must return a valid url return 'http://target-error' }, pathRewrite: (path) => { // eslint-disable-next-line no-useless-escape const sandboxId = path.match(/^\/api\/toolbox\/([^\/]+)\/toolbox/)?.[1] const routePath = path.split(`/api/toolbox/${sandboxId}/toolbox`)[1] const newPath = `/sandboxes/${sandboxId}/toolbox${routePath}` // Handle files path which is served on /files/ in the daemon // TODO: Circle back to this after daemon versioning // We can then switch /files/ to /files and only perform this for older daemon versions const url = new URL(`http://runner${newPath}`) if (url.pathname.endsWith('/files')) { url.pathname = url.pathname + '/' return url.toString().replace('http://runner', '') } return newPath }, changeOrigin: true, autoRewrite: true, proxyTimeout: 5 * 60 * 1000, on: { proxyReq: (proxyReq, req, res) => { // @ts-expect-error - set when routing if (req._err) { res.writeHead(400, { 'Content-Type': 'application/json' }) // @ts-expect-error - set when routing res.end(JSON.stringify(req._err)) return } // @ts-expect-error - set when routing const runnerApiKey = req._runnerApiKey try { proxyReq.setHeader('Authorization', `Bearer ${runnerApiKey}`) } catch { // Ignore error - headers are already set return } fixRequestBody(proxyReq, req) }, proxyRes: (proxyRes, req, res) => { // console.log('proxyRes', proxyRes) }, }, } this.toolboxProxy = createProxyMiddleware({ ...commonProxyOptions, followRedirects: true, }) this.toolboxStreamProxy = createProxyMiddleware({ ...commonProxyOptions, followRedirects: false, }) } private async getRunnerInfo(sandboxId: string): Promise { let runnerInfo: RunnerInfo | null = null try { const cached: { value: RunnerInfo } = JSON.parse(await this.redis.get(`${RUNNER_INFO_CACHE_PREFIX}${sandboxId}`)) runnerInfo = cached.value } catch { // Ignore error and fetch from db } if (!runnerInfo) { const runner = await this.toolboxService.getRunner(sandboxId) if (!runner.proxyUrl) { throw new BadRequestException('Runner proxy URL not found') } runnerInfo = { apiKey: runner.apiKey, apiUrl: runner.proxyUrl, } await this.redis.set( `${RUNNER_INFO_CACHE_PREFIX}${sandboxId}`, JSON.stringify({ value: runnerInfo }), 'EX', RUNNER_INFO_CACHE_TTL, ) } return runnerInfo } @Get(':sandboxId/toolbox/project-dir') @ApiOperation({ summary: '[DEPRECATED] Get sandbox project dir', operationId: 'getProjectDir_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Project directory retrieved successfully', type: ProjectDirResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getProjectDir( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/user-home-dir') @ApiOperation({ summary: '[DEPRECATED] Get sandbox user home dir', operationId: 'getUserHomeDir_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'User home directory retrieved successfully', type: UserHomeDirResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getUserHomeDir( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/work-dir') @ApiOperation({ summary: '[DEPRECATED] Get sandbox work-dir', operationId: 'getWorkDir_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Work-dir retrieved successfully', type: WorkDirResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getWorkDir( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/files') @ApiOperation({ summary: '[DEPRECATED] List files', operationId: 'listFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Files listed successfully', type: [FileInfoDto], }) @ApiQuery({ name: 'path', type: String, required: false }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async listFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Delete(':sandboxId/toolbox/files') @ApiOperation({ summary: '[DEPRECATED] Delete file', description: 'Delete file inside sandbox', operationId: 'deleteFile_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'File deleted successfully', }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiQuery({ name: 'recursive', type: Boolean, required: false }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_DELETE_FILE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { query: (req) => ({ path: req.query.path, }), }, }) async deleteFile( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/files/download') @ApiOperation({ summary: '[DEPRECATED] Download file', description: 'Download file from sandbox', operationId: 'downloadFile_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'File downloaded successfully', schema: { type: 'string', format: 'binary', }, }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_DOWNLOAD_FILE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { query: (req) => ({ path: req.query.path, }), }, }) async downloadFile( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/files/bulk-download') @ApiOperation({ summary: '[DEPRECATED] Download multiple files', description: 'Streams back a multipart/form-data bundle of the requested paths', operationId: 'downloadFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'A multipart/form-data response with each file as a part', schema: { type: 'string', format: 'binary', }, }) @ApiBody({ type: DownloadFilesDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async downloadFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/files/find') @ApiOperation({ summary: '[DEPRECATED] Search for text/pattern in files', description: 'Search for text/pattern inside sandbox files', operationId: 'findInFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Search completed successfully', type: [MatchDto], }) @ApiQuery({ name: 'pattern', type: String, required: true }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async findInFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/files/folder') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Create folder', description: 'Create folder inside sandbox', operationId: 'createFolder_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Folder created successfully', }) @ApiQuery({ name: 'mode', type: String, required: true }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_CREATE_FOLDER, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { query: (req) => ({ path: req.query.path, mode: req.query.mode, }), }, }) async createFolder( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/files/info') @ApiOperation({ summary: '[DEPRECATED] Get file info', description: 'Get file info inside sandbox', operationId: 'getFileInfo_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'File info retrieved successfully', type: FileInfoDto, }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getFileInfo( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/files/move') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Move file', description: 'Move file inside sandbox', operationId: 'moveFile_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'File moved successfully', }) @ApiQuery({ name: 'destination', type: String, required: true }) @ApiQuery({ name: 'source', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_MOVE_FILE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { query: (req) => ({ destination: req.query.destination, source: req.query.source, }), }, }) async moveFile( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/files/permissions') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Set file permissions', description: 'Set file owner/group/permissions inside sandbox', operationId: 'setFilePermissions_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'File permissions updated successfully', }) @ApiQuery({ name: 'mode', type: String, required: false }) @ApiQuery({ name: 'group', type: String, required: false }) @ApiQuery({ name: 'owner', type: String, required: false }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_SET_FILE_PERMISSIONS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { query: (req) => ({ mode: req.query.mode, group: req.query.group, owner: req.query.owner, path: req.query.path, }), }, }) async setFilePermissions( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/files/replace') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Replace in files', description: 'Replace text/pattern in multiple files inside sandbox', operationId: 'replaceInFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Text replaced successfully', type: [ReplaceResultDto], }) @ApiBody({ type: ReplaceRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_REPLACE_IN_FILES, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ files: req.body?.files, pattern: req.body?.pattern, newValue: req.body?.newValue, }), }, }) async replaceInFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/files/search') @ApiOperation({ summary: '[DEPRECATED] Search files', description: 'Search for files inside sandbox', operationId: 'searchFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Search completed successfully', type: SearchFilesResponseDto, }) @ApiQuery({ name: 'pattern', type: String, required: true }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async searchFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @HttpCode(200) @Post(':sandboxId/toolbox/files/upload') @ApiOperation({ summary: '[DEPRECATED] Upload file', description: 'Upload file inside sandbox', operationId: 'uploadFile_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'File uploaded successfully', }) @ApiConsumes('multipart/form-data') @ApiBody({ schema: { type: 'object', properties: { file: { type: 'string', format: 'binary', }, }, }, }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_UPLOAD_FILE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { query: (req) => ({ path: req.query.path, }), }, }) async uploadFile( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return this.toolboxProxy(req, res, next) } @HttpCode(200) @Post(':sandboxId/toolbox/files/bulk-upload') @ApiOperation({ summary: '[DEPRECATED] Upload multiple files', description: 'Upload multiple files inside sandbox', operationId: 'uploadFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Files uploaded successfully', }) @ApiConsumes('multipart/form-data') @ApiBody({ type: [UploadFileDto] }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_BULK_UPLOAD_FILES, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, }) async uploadFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return this.toolboxStreamProxy(req, res, next) } // Git operations @Post(':sandboxId/toolbox/git/add') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Add files', description: 'Add files to git commit', operationId: 'gitAddFiles_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Files added to git successfully', }) @ApiBody({ type: GitAddRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_ADD_FILES, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, files: req.body?.files, }), }, }) async gitAddFiles( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/git/branches') @ApiOperation({ summary: '[DEPRECATED] Get branch list', description: 'Get branch list from git repository', operationId: 'gitListBranches_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Branch list retrieved successfully', type: ListBranchResponseDto, }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async gitBranchList( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/git/branches') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Create branch', description: 'Create branch on git repository', operationId: 'gitCreateBranch_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Branch created successfully', }) @ApiBody({ type: GitBranchRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_CREATE_BRANCH, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, name: req.body?.name, }), }, }) async gitCreateBranch( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Delete(':sandboxId/toolbox/git/branches') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Delete branch', description: 'Delete branch on git repository', operationId: 'gitDeleteBranch_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Branch deleted successfully', }) @ApiBody({ type: GitDeleteBranchRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_DELETE_BRANCH, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, name: req.body?.name, }), }, }) async gitDeleteBranch( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/git/clone') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Clone repository', description: 'Clone git repository', operationId: 'gitCloneRepository_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Repository cloned successfully', }) @ApiBody({ type: GitCloneRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_CLONE_REPOSITORY, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ url: req.body?.url, path: req.body?.path, username: req.body?.username, password: req.body?.password ? MASKED_AUDIT_VALUE : undefined, branch: req.body?.branch, commit_id: req.body?.commit_id, }), }, }) async gitCloneRepository( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/git/commit') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Commit changes', description: 'Commit changes to git repository', operationId: 'gitCommitChanges_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Changes committed successfully', type: GitCommitResponseDto, }) @ApiBody({ type: GitCommitRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_COMMIT_CHANGES, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, message: req.body?.message, author: req.body?.author, email: req.body?.email, allow_empty: req.body?.allow_empty, }), }, }) async gitCommitChanges( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/git/history') @ApiOperation({ summary: '[DEPRECATED] Get commit history', description: 'Get commit history from git repository', operationId: 'gitGetHistory_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Commit history retrieved successfully', type: [GitCommitInfoDto], }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async gitCommitHistory( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/git/pull') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Pull changes', description: 'Pull changes from remote', operationId: 'gitPullChanges_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Changes pulled successfully', }) @ApiBody({ type: GitRepoRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_PULL_CHANGES, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, username: req.body?.username, password: req.body?.password ? MASKED_AUDIT_VALUE : undefined, }), }, }) async gitPullChanges( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/git/push') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Push changes', description: 'Push changes to remote', operationId: 'gitPushChanges_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Changes pushed successfully', }) @ApiBody({ type: GitRepoRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_PUSH_CHANGES, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, username: req.body?.username, password: req.body?.password ? MASKED_AUDIT_VALUE : undefined, }), }, }) async gitPushChanges( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/git/checkout') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Checkout branch', description: 'Checkout branch or commit in git repository', operationId: 'gitCheckoutBranch_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Branch checked out successfully', }) @ApiBody({ type: GitCheckoutRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_GIT_CHECKOUT_BRANCH, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ path: req.body?.path, branch: req.body?.branch, }), }, }) async gitCheckoutBranch( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/git/status') @ApiOperation({ summary: '[DEPRECATED] Get git status', description: 'Get status from git repository', operationId: 'gitGetStatus_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Git status retrieved successfully', type: GitStatusDto, }) @ApiQuery({ name: 'path', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async gitStatus( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/process/execute') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Execute command', description: 'Execute command synchronously inside sandbox', operationId: 'executeCommand_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Command executed successfully', type: ExecuteResponseDto, }) @Audit({ action: AuditAction.TOOLBOX_EXECUTE_COMMAND, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ command: req.body?.command, cwd: req.body?.cwd, timeout: req.body?.timeout, }), }, }) async executeCommand( @Param('sandboxId') sandboxId: string, @Body() executeRequest: ExecuteRequestDto, ): Promise { const response = await this.toolboxService.forwardRequestToRunner( sandboxId, 'POST', '/toolbox/process/execute', executeRequest, ) // TODO: use new proxy - can't use it now because of this return { exitCode: response.exitCode ?? response.code, result: response.result, } } // Session management endpoints @Get(':sandboxId/toolbox/process/session') @ApiOperation({ summary: '[DEPRECATED] List sessions', description: 'List all active sessions in the sandbox', operationId: 'listSessions_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Sessions retrieved successfully', type: [SessionDto], }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async listSessions( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/process/session/:sessionId') @ApiOperation({ summary: '[DEPRECATED] Get session', description: 'Get session by ID', operationId: 'getSession_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Session retrieved successfully', type: SessionDto, }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getSession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/process/session') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Create session', description: 'Create a new session in the sandbox', operationId: 'createSession_deprecated', deprecated: true, }) @ApiResponse({ status: 200, }) @ApiBody({ type: CreateSessionRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_CREATE_SESSION, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { body: (req: TypedRequest) => ({ sessionId: req.body?.sessionId, }), }, }) async createSession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/process/session/:sessionId/exec') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Execute command in session', description: 'Execute a command in a specific session', operationId: 'executeSessionCommand_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Command executed successfully', type: SessionExecuteResponseDto, }) @ApiResponse({ status: 202, description: 'Command accepted and is being processed', type: SessionExecuteResponseDto, }) @ApiBody({ type: SessionExecuteRequestDto, }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_SESSION_EXECUTE_COMMAND, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { params: (req) => ({ sessionId: req.params.sessionId, }), body: (req: TypedRequest) => ({ command: req.body?.command, runAsync: req.body?.runAsync, async: req.body?.async, }), }, }) async executeSessionCommand( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Delete(':sandboxId/toolbox/process/session/:sessionId') @ApiOperation({ summary: '[DEPRECATED] Delete session', description: 'Delete a specific session', operationId: 'deleteSession_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Session deleted successfully', }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_DELETE_SESSION, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { params: (req) => ({ sessionId: req.params.sessionId, }), }, }) async deleteSession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/process/session/:sessionId/command/:commandId') @ApiOperation({ summary: '[DEPRECATED] Get session command', description: 'Get session command by ID', operationId: 'getSessionCommand_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Session command retrieved successfully', type: CommandDto, }) @ApiParam({ name: 'commandId', type: String, required: true }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getSessionCommand( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/process/session/:sessionId/command/:commandId/logs') @ApiOperation({ summary: '[DEPRECATED] Get command logs', description: 'Get logs for a specific command in a session', operationId: 'getSessionCommandLogs_deprecated', deprecated: true, }) // When follow is true, the response is an octet stream @ApiResponse({ status: 200, description: 'Command log stream marked with stdout and stderr prefixes', content: { 'text/plain': { schema: { type: 'string', }, }, }, }) @ApiQuery({ name: 'follow', type: Boolean, required: false, description: 'Whether to stream the logs' }) @ApiParam({ name: 'commandId', type: String, required: true }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getSessionCommandLogs( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return this.toolboxProxy(req, res, next) } // PTY endpoints @Get(':sandboxId/toolbox/process/pty') @ApiOperation({ summary: '[DEPRECATED] List PTY sessions', description: 'List all active PTY sessions in the sandbox', operationId: 'listPTYSessions_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'PTY sessions retrieved successfully', type: PtyListResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async listPtySessions( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/process/pty') @HttpCode(201) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Create PTY session', description: 'Create a new PTY session in the sandbox', operationId: 'createPTYSession_deprecated', deprecated: true, }) @ApiResponse({ status: 201, description: 'PTY session created successfully', type: PtyCreateResponseDto, }) @ApiBody({ type: PtyCreateRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async createPtySession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/process/pty/:sessionId') @ApiOperation({ summary: '[DEPRECATED] Get PTY session', description: 'Get PTY session information by ID', operationId: 'getPTYSession_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'PTY session retrieved successfully', type: PtySessionInfoDto, }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getPtySession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/process/pty/:sessionId/resize') @ApiOperation({ summary: '[DEPRECATED] Resize PTY session', description: 'Resize a PTY session', operationId: 'resizePTYSession_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'PTY session resized successfully', type: PtySessionInfoDto, }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @ApiBody({ type: PtyResizeRequestDto, }) async resizePtySession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Delete(':sandboxId/toolbox/process/pty/:sessionId') @ApiOperation({ summary: '[DEPRECATED] Delete PTY session', description: 'Delete a PTY session and terminate the associated process', operationId: 'deletePTYSession_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'PTY session deleted successfully', }) @ApiParam({ name: 'sessionId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async deletePtySession( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/lsp/completions') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Get Lsp Completions', description: 'The Completion request is sent from the client to the server to compute completion items at a given cursor position.', operationId: 'LspCompletions_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', type: CompletionListDto, }) @ApiBody({ type: LspCompletionParamsDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getLspCompletions( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/lsp/did-close') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Call Lsp DidClose', description: 'The document close notification is sent from the client to the server when the document got closed in the client.', operationId: 'LspDidClose_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', }) @ApiBody({ type: LspDocumentRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async lspDidClose( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/lsp/did-open') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Call Lsp DidOpen', description: 'The document open notification is sent from the client to the server to signal newly opened text documents.', operationId: 'LspDidOpen_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', }) @ApiBody({ type: LspDocumentRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async lspDidOpen( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/lsp/document-symbols') @ApiOperation({ summary: '[DEPRECATED] Call Lsp DocumentSymbols', description: 'The document symbol request is sent from the client to the server.', operationId: 'LspDocumentSymbols_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', type: [LspSymbolDto], }) @ApiQuery({ name: 'uri', type: String, required: true }) @ApiQuery({ name: 'pathToProject', type: String, required: true }) @ApiQuery({ name: 'languageId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getLspDocumentSymbols( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/lsp/start') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Start Lsp server', description: 'Start Lsp server process inside sandbox project', operationId: 'LspStart_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', }) @ApiBody({ type: LspServerRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async startLspServer( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/lsp/stop') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Stop Lsp server', description: 'Stop Lsp server process inside sandbox project', operationId: 'LspStop_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', }) @ApiBody({ type: LspServerRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async stopLspServer( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/lsp/workspace-symbols') @ApiOperation({ summary: '[DEPRECATED] Call Lsp WorkspaceSymbols', description: 'The workspace symbol request is sent from the client to the server to list project-wide symbols matching the query string.', operationId: 'LspWorkspaceSymbols_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'OK', type: [LspSymbolDto], }) @ApiQuery({ name: 'query', type: String, required: true }) @ApiQuery({ name: 'pathToProject', type: String, required: true }) @ApiQuery({ name: 'languageId', type: String, required: true }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getLspWorkspaceSymbols( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } // Computer Use endpoints // Computer use management endpoints @Post(':sandboxId/toolbox/computeruse/start') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Start computer use processes', description: 'Start all VNC desktop processes (Xvfb, xfce4, x11vnc, novnc)', operationId: 'startComputerUse_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Computer use processes started successfully', type: ComputerUseStartResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_COMPUTER_USE_START, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, }) async startComputerUse( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/stop') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Stop computer use processes', description: 'Stop all VNC desktop processes (Xvfb, xfce4, x11vnc, novnc)', operationId: 'stopComputerUse_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Computer use processes stopped successfully', type: ComputerUseStopResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_COMPUTER_USE_STOP, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, }) async stopComputerUse( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/status') @ApiOperation({ summary: '[DEPRECATED] Get computer use status', description: 'Get status of all VNC desktop processes', operationId: 'getComputerUseStatus_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Computer use status retrieved successfully', type: ComputerUseStatusResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getComputerUseStatus( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/process/:processName/status') @ApiOperation({ summary: '[DEPRECATED] Get process status', description: 'Get status of a specific VNC process', operationId: 'getProcessStatus_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Process status retrieved successfully', type: ProcessStatusResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @ApiParam({ name: 'processName', type: String, required: true }) async getProcessStatus( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/process/:processName/restart') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Restart process', description: 'Restart a specific VNC process', operationId: 'restartProcess_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Process restarted successfully', type: ProcessRestartResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @ApiParam({ name: 'processName', type: String, required: true }) @Audit({ action: AuditAction.TOOLBOX_COMPUTER_USE_RESTART_PROCESS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.sandboxId, requestMetadata: { params: (req) => ({ processName: req.params.processName, }), }, }) async restartProcess( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/process/:processName/logs') @ApiOperation({ summary: '[DEPRECATED] Get process logs', description: 'Get logs for a specific VNC process', operationId: 'getProcessLogs_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Process logs retrieved successfully', type: ProcessLogsResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @ApiParam({ name: 'processName', type: String, required: true }) async getProcessLogs( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/process/:processName/errors') @ApiOperation({ summary: '[DEPRECATED] Get process errors', description: 'Get error logs for a specific VNC process', operationId: 'getProcessErrors_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Process errors retrieved successfully', type: ProcessErrorsResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) @ApiParam({ name: 'processName', type: String, required: true }) async getProcessErrors( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } // Mouse endpoints @Get(':sandboxId/toolbox/computeruse/mouse/position') @ApiOperation({ summary: '[DEPRECATED] Get mouse position', description: 'Get current mouse cursor position', operationId: 'getMousePosition_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Mouse position retrieved successfully', type: MousePositionDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getMousePosition( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/mouse/move') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Move mouse', description: 'Move mouse cursor to specified coordinates', operationId: 'moveMouse_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Mouse moved successfully', type: MouseMoveResponseDto, }) @ApiBody({ type: MouseMoveRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async moveMouse( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/mouse/click') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Click mouse', description: 'Click mouse at specified coordinates', operationId: 'clickMouse_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Mouse clicked successfully', type: MouseClickResponseDto, }) @ApiBody({ type: MouseClickRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async clickMouse( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/mouse/drag') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Drag mouse', description: 'Drag mouse from start to end coordinates', operationId: 'dragMouse_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Mouse dragged successfully', type: MouseDragResponseDto, }) @ApiBody({ type: MouseDragRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async dragMouse( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/mouse/scroll') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Scroll mouse', description: 'Scroll mouse at specified coordinates', operationId: 'scrollMouse_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Mouse scrolled successfully', type: MouseScrollResponseDto, }) @ApiBody({ type: MouseScrollRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async scrollMouse( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } // Keyboard endpoints @Post(':sandboxId/toolbox/computeruse/keyboard/type') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Type text', description: 'Type text using keyboard', operationId: 'typeText_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Text typed successfully', }) @ApiBody({ type: KeyboardTypeRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async typeText( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/keyboard/key') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Press key', description: 'Press a key with optional modifiers', operationId: 'pressKey_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Key pressed successfully', }) @ApiBody({ type: KeyboardPressRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async pressKey( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Post(':sandboxId/toolbox/computeruse/keyboard/hotkey') @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Press hotkey', description: 'Press a hotkey combination', operationId: 'pressHotkey_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Hotkey pressed successfully', }) @ApiBody({ type: KeyboardHotkeyRequestDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async pressHotkey( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } // Screenshot endpoints @Get(':sandboxId/toolbox/computeruse/screenshot') @ApiOperation({ summary: '[DEPRECATED] Take screenshot', description: 'Take a screenshot of the entire screen', operationId: 'takeScreenshot_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Screenshot taken successfully', type: ScreenshotResponseDto, }) @ApiQuery({ name: 'show_cursor', type: Boolean, required: false }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async takeScreenshot( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/screenshot/region') @ApiOperation({ summary: '[DEPRECATED] Take region screenshot', description: 'Take a screenshot of a specific region', operationId: 'takeRegionScreenshot_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Region screenshot taken successfully', type: RegionScreenshotResponseDto, }) @ApiQuery({ name: 'x', type: Number, required: true }) @ApiQuery({ name: 'y', type: Number, required: true }) @ApiQuery({ name: 'width', type: Number, required: true }) @ApiQuery({ name: 'height', type: Number, required: true }) @ApiQuery({ name: 'show_cursor', type: Boolean, required: false }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async takeRegionScreenshot( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/screenshot/compressed') @ApiOperation({ summary: '[DEPRECATED] Take compressed screenshot', description: 'Take a compressed screenshot with format, quality, and scale options', operationId: 'takeCompressedScreenshot_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Compressed screenshot taken successfully', type: CompressedScreenshotResponseDto, }) @ApiQuery({ name: 'show_cursor', type: Boolean, required: false }) @ApiQuery({ name: 'format', type: String, required: false }) @ApiQuery({ name: 'quality', type: Number, required: false }) @ApiQuery({ name: 'scale', type: Number, required: false }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async takeCompressedScreenshot( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/screenshot/region/compressed') @ApiOperation({ summary: '[DEPRECATED] Take compressed region screenshot', description: 'Take a compressed screenshot of a specific region', operationId: 'takeCompressedRegionScreenshot_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Compressed region screenshot taken successfully', type: CompressedScreenshotResponseDto, }) @ApiQuery({ name: 'x', type: Number, required: true }) @ApiQuery({ name: 'y', type: Number, required: true }) @ApiQuery({ name: 'width', type: Number, required: true }) @ApiQuery({ name: 'height', type: Number, required: true }) @ApiQuery({ name: 'show_cursor', type: Boolean, required: false }) @ApiQuery({ name: 'format', type: String, required: false }) @ApiQuery({ name: 'quality', type: Number, required: false }) @ApiQuery({ name: 'scale', type: Number, required: false }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async takeCompressedRegionScreenshot( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } // Display endpoints @Get(':sandboxId/toolbox/computeruse/display/info') @ApiOperation({ summary: '[DEPRECATED] Get display info', description: 'Get information about displays', operationId: 'getDisplayInfo_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Display info retrieved successfully', type: DisplayInfoResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getDisplayInfo( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } @Get(':sandboxId/toolbox/computeruse/display/windows') @ApiOperation({ summary: '[DEPRECATED] Get windows', description: 'Get list of open windows', operationId: 'getWindows_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Windows list retrieved successfully', type: WindowsResponseDto, }) @ApiParam({ name: 'sandboxId', type: String, required: true }) async getWindows( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, ): Promise { return await this.toolboxProxy(req, res, next) } } ================================================ FILE: apps/api/src/sandbox/controllers/volume.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Post, Delete, Body, Param, Logger, UseGuards, HttpCode, UseInterceptors, Query, } from '@nestjs/common' import { ApiOAuth2, ApiResponse, ApiOperation, ApiParam, ApiTags, ApiHeader, ApiQuery, ApiBearerAuth, } from '@nestjs/swagger' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { VolumeService } from '../services/volume.service' import { CreateVolumeDto } from '../dto/create-volume.dto' import { ContentTypeInterceptor } from '../../common/interceptors/content-type.interceptors' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { VolumeDto } from '../dto/volume.dto' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { VolumeAccessGuard } from '../guards/volume-access.guard' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('volumes') @Controller('volumes') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class VolumeController { private readonly logger = new Logger(VolumeController.name) constructor(private readonly volumeService: VolumeService) {} @Get() @ApiOperation({ summary: 'List all volumes', operationId: 'listVolumes', }) @ApiResponse({ status: 200, description: 'List of all volumes', type: [VolumeDto], }) @ApiQuery({ name: 'includeDeleted', required: false, type: Boolean, description: 'Include deleted volumes in the response', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.READ_VOLUMES]) async listVolumes( @AuthContext() authContext: OrganizationAuthContext, @Query('includeDeleted') includeDeleted = false, ): Promise { const volumes = await this.volumeService.findAll(authContext.organizationId, includeDeleted) return volumes.map(VolumeDto.fromVolume) } @Post() @HttpCode(200) @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: 'Create a new volume', operationId: 'createVolume', }) @ApiResponse({ status: 200, description: 'The volume has been successfully created.', type: VolumeDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_VOLUMES]) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.VOLUME, targetIdFromResult: (result: VolumeDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ name: req.body?.name, }), }, }) async createVolume( @AuthContext() authContext: OrganizationAuthContext, @Body() createVolumeDto: CreateVolumeDto, ): Promise { const volume = await this.volumeService.create(authContext.organization, createVolumeDto) return VolumeDto.fromVolume(volume) } @Get(':volumeId') @ApiOperation({ summary: 'Get volume details', operationId: 'getVolume', }) @ApiParam({ name: 'volumeId', description: 'ID of the volume', type: 'string', }) @ApiResponse({ status: 200, description: 'Volume details', type: VolumeDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.READ_VOLUMES]) @UseGuards(VolumeAccessGuard) async getVolume(@Param('volumeId') volumeId: string): Promise { const volume = await this.volumeService.findOne(volumeId) return VolumeDto.fromVolume(volume) } @Delete(':volumeId') @ApiOperation({ summary: 'Delete volume', operationId: 'deleteVolume', }) @ApiParam({ name: 'volumeId', description: 'ID of the volume', type: 'string', }) @ApiResponse({ status: 200, description: 'Volume has been marked for deletion', }) @ApiResponse({ status: 409, description: 'Volume is in use by one or more sandboxes', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_VOLUMES]) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.VOLUME, targetIdFromRequest: (req) => req.params.volumeId, }) @UseGuards(VolumeAccessGuard) async deleteVolume(@Param('volumeId') volumeId: string): Promise { return this.volumeService.delete(volumeId) } @Get('by-name/:name') @ApiOperation({ summary: 'Get volume details by name', operationId: 'getVolumeByName', }) @ApiParam({ name: 'name', description: 'Name of the volume', type: 'string', }) @ApiResponse({ status: 200, description: 'Volume details', type: VolumeDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.READ_VOLUMES]) @UseGuards(VolumeAccessGuard) async getVolumeByName( @AuthContext() authContext: OrganizationAuthContext, @Param('name') name: string, ): Promise { const volume = await this.volumeService.findByName(authContext.organizationId, name) return VolumeDto.fromVolume(volume) } } ================================================ FILE: apps/api/src/sandbox/controllers/workspace.deprecated.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Post, Delete, Body, Param, Query, Logger, UseGuards, HttpCode, UseInterceptors, Put, NotFoundException, ForbiddenException, Res, Request, RawBodyRequest, Next, ParseBoolPipe, } from '@nestjs/common' import Redis from 'ioredis' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { SandboxService as WorkspaceService } from '../services/sandbox.service' import { ApiOAuth2, ApiResponse, ApiQuery, ApiOperation, ApiParam, ApiTags, ApiHeader, ApiBearerAuth, } from '@nestjs/swagger' import { SandboxLabelsDto as WorkspaceLabelsDto } from '../dto/sandbox.dto' import { WorkspaceDto } from '../dto/workspace.deprecated.dto' import { RunnerService } from '../services/runner.service' import { SandboxState as WorkspaceState } from '../enums/sandbox-state.enum' import { ContentTypeInterceptor } from '../../common/interceptors/content-type.interceptors' import { InjectRedis } from '@nestjs-modules/ioredis' import { SandboxAccessGuard as WorkspaceAccessGuard } from '../guards/sandbox-access.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { AuthContext } from '../../common/decorators/auth-context.decorator' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { RequiredOrganizationResourcePermissions } from '../../organization/decorators/required-organization-resource-permissions.decorator' import { OrganizationResourcePermission } from '../../organization/enums/organization-resource-permission.enum' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { WorkspacePortPreviewUrlDto } from '../dto/workspace-port-preview-url.deprecated.dto' import { IncomingMessage, ServerResponse } from 'http' import { NextFunction } from 'http-proxy-middleware/dist/types' import { LogProxy } from '../proxy/log-proxy' import { CreateWorkspaceDto } from '../dto/create-workspace.deprecated.dto' import { TypedConfigService } from '../../config/typed-config.service' import { BadRequestError } from '../../exceptions/bad-request.exception' import { Audit, MASKED_AUDIT_VALUE, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('workspace') @Controller('workspace') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class WorkspaceController { private readonly logger = new Logger(WorkspaceController.name) constructor( @InjectRedis() private readonly redis: Redis, private readonly runnerService: RunnerService, private readonly workspaceService: WorkspaceService, private readonly configService: TypedConfigService, ) {} @Get() @ApiOperation({ summary: '[DEPRECATED] List all workspaces', operationId: 'listWorkspaces_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'List of all workspacees', type: [WorkspaceDto], }) @ApiQuery({ name: 'verbose', required: false, type: Boolean, description: 'Include verbose output', }) @ApiQuery({ name: 'labels', type: String, required: false, example: '{"label1": "value1", "label2": "value2"}', description: 'JSON encoded labels to filter by', }) async listWorkspacees( @AuthContext() authContext: OrganizationAuthContext, @Query('verbose') verbose?: boolean, @Query('labels') labelsQuery?: string, ): Promise { const labels = labelsQuery ? JSON.parse(labelsQuery) : {} const workspacees = await this.workspaceService.findAllDeprecated(authContext.organizationId, labels) const dtos = workspacees.map(async (workspace) => { const dto = WorkspaceDto.fromSandbox(workspace) return dto }) return await Promise.all(dtos) } @Post() @HttpCode(200) // for Daytona Api compatibility @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Create a new workspace', operationId: 'createWorkspace_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'The workspace has been successfully created.', type: WorkspaceDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.SANDBOX, targetIdFromResult: (result: WorkspaceDto) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ image: req.body?.image, user: req.body?.user, env: req.body?.env ? Object.fromEntries(Object.keys(req.body?.env).map((key) => [key, MASKED_AUDIT_VALUE])) : undefined, labels: req.body?.labels, public: req.body?.public, class: req.body?.class, target: req.body?.target, cpu: req.body?.cpu, gpu: req.body?.gpu, memory: req.body?.memory, disk: req.body?.disk, autoStopInterval: req.body?.autoStopInterval, autoArchiveInterval: req.body?.autoArchiveInterval, volumes: req.body?.volumes, buildInfo: req.body?.buildInfo, }), }, }) async createWorkspace( @AuthContext() authContext: OrganizationAuthContext, @Body() createWorkspaceDto: CreateWorkspaceDto, ): Promise { if (createWorkspaceDto.buildInfo) { throw new ForbiddenException('Build info is not supported in this deprecated API - please upgrade your client') } const organization = authContext.organization const workspace = WorkspaceDto.fromSandboxDto( await this.workspaceService.createFromSnapshot( { ...createWorkspaceDto, snapshot: createWorkspaceDto.image, }, organization, true, ), ) // Wait for the workspace to start const sandboxState = await this.waitForWorkspaceState( workspace.id, WorkspaceState.STARTED, 30000, // 30 seconds timeout ) workspace.state = sandboxState return workspace } @Get(':workspaceId') @ApiOperation({ summary: '[DEPRECATED] Get workspace details', operationId: 'getWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiQuery({ name: 'verbose', required: false, type: Boolean, description: 'Include verbose output', }) @ApiResponse({ status: 200, description: 'Workspace details', type: WorkspaceDto, }) @UseGuards(WorkspaceAccessGuard) async getWorkspace( @Param('workspaceId') workspaceId: string, // eslint-disable-next-line @typescript-eslint/no-unused-vars @Query('verbose') verbose?: boolean, ): Promise { const workspace = await this.workspaceService.findOne(workspaceId, true) return WorkspaceDto.fromSandbox(workspace) } @Delete(':workspaceId') @ApiOperation({ summary: '[DEPRECATED] Delete workspace', operationId: 'deleteWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiResponse({ status: 200, description: 'Workspace has been deleted', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.DELETE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.DELETE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, }) async removeWorkspace( @Param('workspaceId') workspaceId: string, // eslint-disable-next-line @typescript-eslint/no-unused-vars @Query('force') force?: boolean, ): Promise { await this.workspaceService.destroy(workspaceId) } @Post(':workspaceId/start') @HttpCode(200) @ApiOperation({ summary: '[DEPRECATED] Start workspace', operationId: 'startWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiResponse({ status: 200, description: 'Workspace has been started', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.START, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, }) async startWorkspace( @AuthContext() authContext: OrganizationAuthContext, @Param('workspaceId') workspaceId: string, ): Promise { await this.workspaceService.start(workspaceId, authContext.organization) } @Post(':workspaceId/stop') @HttpCode(200) // for Daytona Api compatibility @ApiOperation({ summary: '[DEPRECATED] Stop workspace', operationId: 'stopWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiResponse({ status: 200, description: 'Workspace has been stopped', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.STOP, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, }) async stopWorkspace(@Param('workspaceId') workspaceId: string): Promise { await this.workspaceService.stop(workspaceId) } @Put(':workspaceId/labels') @UseInterceptors(ContentTypeInterceptor) @ApiOperation({ summary: '[DEPRECATED] Replace workspace labels', operationId: 'replaceLabelsWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiResponse({ status: 200, description: 'Labels have been successfully replaced', type: WorkspaceLabelsDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.REPLACE_LABELS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, requestMetadata: { body: (req: TypedRequest) => ({ labels: req.body?.labels, }), }, }) async replaceLabels( @Param('workspaceId') workspaceId: string, @Body() labelsDto: WorkspaceLabelsDto, ): Promise { const { labels } = await this.workspaceService.replaceLabels(workspaceId, labelsDto.labels) return { labels } } @Post(':workspaceId/backup') @ApiOperation({ summary: '[DEPRECATED] Create workspace backup', operationId: 'createBackupWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiResponse({ status: 200, description: 'Workspace backup has been initiated', type: WorkspaceDto, }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.CREATE_BACKUP, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, }) async createBackup(@Param('workspaceId') workspaceId: string): Promise { await this.workspaceService.createBackup(workspaceId) } @Post(':workspaceId/public/:isPublic') @ApiOperation({ summary: '[DEPRECATED] Update public status', operationId: 'updatePublicStatusWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiParam({ name: 'isPublic', description: 'Public status to set', type: 'boolean', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.UPDATE_PUBLIC_STATUS, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, requestMetadata: { params: (req) => ({ isPublic: req.params.isPublic, }), }, }) async updatePublicStatus( @Param('workspaceId') workspaceId: string, @Param('isPublic') isPublic: boolean, ): Promise { await this.workspaceService.updatePublicStatus(workspaceId, isPublic) } @Post(':workspaceId/autostop/:interval') @ApiOperation({ summary: '[DEPRECATED] Set workspace auto-stop interval', operationId: 'setAutostopIntervalWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiParam({ name: 'interval', description: 'Auto-stop interval in minutes (0 to disable)', type: 'number', }) @ApiResponse({ status: 200, description: 'Auto-stop interval has been set', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.SET_AUTO_STOP_INTERVAL, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, requestMetadata: { params: (req) => ({ interval: req.params.interval, }), }, }) async setAutostopInterval( @Param('workspaceId') workspaceId: string, @Param('interval') interval: number, ): Promise { await this.workspaceService.setAutostopInterval(workspaceId, interval) } @Post(':workspaceId/autoarchive/:interval') @ApiOperation({ summary: '[DEPRECATED] Set workspace auto-archive interval', operationId: 'setAutoArchiveIntervalWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiParam({ name: 'interval', description: 'Auto-archive interval in minutes (0 means the maximum interval will be used)', type: 'number', }) @ApiResponse({ status: 200, description: 'Auto-archive interval has been set', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.SET_AUTO_ARCHIVE_INTERVAL, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, requestMetadata: { params: (req) => ({ interval: req.params.interval, }), }, }) async setAutoArchiveInterval( @Param('workspaceId') workspaceId: string, @Param('interval') interval: number, ): Promise { await this.workspaceService.setAutoArchiveInterval(workspaceId, interval) } @Post(':workspaceId/archive') @HttpCode(200) @ApiOperation({ summary: '[DEPRECATED] Archive workspace', operationId: 'archiveWorkspace_deprecated', deprecated: true, }) @ApiResponse({ status: 200, description: 'Workspace has been archived', }) @RequiredOrganizationResourcePermissions([OrganizationResourcePermission.WRITE_SANDBOXES]) @UseGuards(WorkspaceAccessGuard) @Audit({ action: AuditAction.ARCHIVE, targetType: AuditTarget.SANDBOX, targetIdFromRequest: (req) => req.params.workspaceId, }) async archiveWorkspace(@Param('workspaceId') workspaceId: string): Promise { await this.workspaceService.archive(workspaceId) } @Get(':workspaceId/ports/:port/preview-url') @ApiOperation({ summary: '[DEPRECATED] Get preview URL for a workspace port', operationId: 'getPortPreviewUrlWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiParam({ name: 'port', description: 'Port number to get preview URL for', type: 'number', }) @ApiResponse({ status: 200, description: 'Preview URL for the specified port', type: WorkspacePortPreviewUrlDto, }) @UseGuards(WorkspaceAccessGuard) async getPortPreviewUrl( @Param('workspaceId') workspaceId: string, @Param('port') port: number, ): Promise { if (port < 1 || port > 65535) { throw new BadRequestError('Invalid port') } const proxyDomain = this.configService.getOrThrow('proxy.domain') const proxyProtocol = this.configService.getOrThrow('proxy.protocol') const workspace = await this.workspaceService.findOne(workspaceId) if (!workspace) { throw new NotFoundException(`Workspace with ID ${workspaceId} not found`) } return { url: `${proxyProtocol}://${port}-${workspaceId}.${proxyDomain}`, token: workspace.authToken, } } @Get(':workspaceId/build-logs') @ApiOperation({ summary: '[DEPRECATED] Get build logs', operationId: 'getBuildLogsWorkspace_deprecated', deprecated: true, }) @ApiParam({ name: 'workspaceId', description: 'ID of the workspace', type: 'string', }) @ApiResponse({ status: 200, description: 'Build logs stream', }) @ApiQuery({ name: 'follow', required: false, type: Boolean, description: 'Whether to follow the logs stream', }) @UseGuards(WorkspaceAccessGuard) async getBuildLogs( @Request() req: RawBodyRequest, @Res() res: ServerResponse, @Next() next: NextFunction, @Param('workspaceId') workspaceId: string, @Query('follow', new ParseBoolPipe({ optional: true })) follow?: boolean, ): Promise { const workspace = await this.workspaceService.findOne(workspaceId) if (!workspace || !workspace.runnerId) { throw new NotFoundException(`Workspace with ID ${workspaceId} not found or has no runner assigned`) } if (!workspace.buildInfo) { throw new NotFoundException(`Workspace with ID ${workspaceId} has no build info`) } const runner = await this.runnerService.findOneOrFail(workspace.runnerId) if (!runner.apiUrl) { throw new NotFoundException(`Runner for workspace ${workspaceId} has no API URL`) } const logProxy = new LogProxy( runner.apiUrl, workspace.buildInfo.snapshotRef.split(':')[0], runner.apiKey, follow === true, req, res, next, ) return logProxy.create() } private async waitForWorkspaceState( workspaceId: string, desiredState: WorkspaceState, timeout: number, ): Promise { const startTime = Date.now() let workspaceState: WorkspaceState while (Date.now() - startTime < timeout) { const workspace = await this.workspaceService.findOne(workspaceId) workspaceState = workspace.state if ( workspaceState === desiredState || workspaceState === WorkspaceState.ERROR || workspaceState === WorkspaceState.BUILD_FAILED ) { return workspaceState } await new Promise((resolve) => setTimeout(resolve, 100)) // Wait 100 ms before checking again } return workspaceState } } ================================================ FILE: apps/api/src/sandbox/dto/build-info.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'BuildInfo' }) export class BuildInfoDto { @ApiPropertyOptional({ description: 'The Dockerfile content used for the build', example: 'FROM node:14\nWORKDIR /app\nCOPY . .\nRUN npm install\nCMD ["npm", "start"]', }) dockerfileContent?: string @ApiPropertyOptional({ description: 'The context hashes used for the build', type: [String], example: ['hash1', 'hash2'], }) contextHashes?: string[] @ApiProperty({ description: 'The creation timestamp', }) createdAt: Date @ApiProperty({ description: 'The last update timestamp', }) updatedAt: Date @ApiProperty({ description: 'The snapshot reference', example: 'daytonaio/sandbox:latest', }) snapshotRef: string } ================================================ FILE: apps/api/src/sandbox/dto/create-build-info.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsNotEmpty, IsOptional, IsString } from 'class-validator' @ApiSchema({ name: 'CreateBuildInfo' }) export class CreateBuildInfoDto { @ApiProperty({ description: 'The Dockerfile content used for the build', example: 'FROM node:14\nWORKDIR /app\nCOPY . .\nRUN npm install\nCMD ["npm", "start"]', }) @IsString() @IsNotEmpty() dockerfileContent: string @ApiPropertyOptional({ description: 'The context hashes used for the build', type: [String], example: ['hash1', 'hash2'], }) @IsString({ each: true }) @IsOptional() contextHashes?: string[] } ================================================ FILE: apps/api/src/sandbox/dto/create-runner-internal.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type CreateRunnerV0InternalDto = { domain: string apiUrl: string proxyUrl: string cpu: number memoryGiB: number diskGiB: number regionId: string name: string apiKey?: string apiVersion: '0' appVersion?: string } export type CreateRunnerV2InternalDto = { apiKey?: string regionId: string name: string apiVersion: '2' appVersion?: string } export type CreateRunnerInternalDto = CreateRunnerV0InternalDto | CreateRunnerV2InternalDto ================================================ FILE: apps/api/src/sandbox/dto/create-runner-response.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { Runner } from '../entities/runner.entity' @ApiSchema({ name: 'CreateRunnerResponse' }) export class CreateRunnerResponseDto { @ApiProperty({ description: 'The ID of the runner', example: 'runner123', }) id: string @ApiProperty({ description: 'The API key for the runner', example: 'dtn_1234567890', }) apiKey: string static fromRunner(runner: Runner, apiKey: string): CreateRunnerResponseDto { return { id: runner.id, apiKey, } } } ================================================ FILE: apps/api/src/sandbox/dto/create-runner.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsString } from 'class-validator' import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'CreateRunner' }) export class CreateRunnerDto { @IsString() @ApiProperty() regionId: string @IsString() @ApiProperty() name: string } ================================================ FILE: apps/api/src/sandbox/dto/create-sandbox.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsEnum, IsObject, IsOptional, IsString, IsNumber, IsBoolean, IsArray } from 'class-validator' import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { SandboxClass } from '../enums/sandbox-class.enum' import { SandboxVolume } from './sandbox.dto' import { CreateBuildInfoDto } from './create-build-info.dto' @ApiSchema({ name: 'CreateSandbox' }) export class CreateSandboxDto { @ApiPropertyOptional({ description: 'The name of the sandbox. If not provided, the sandbox ID will be used as the name', example: 'MySandbox', }) @IsOptional() @IsString() name?: string @ApiPropertyOptional({ description: 'The ID or name of the snapshot used for the sandbox', example: 'ubuntu-4vcpu-8ram-100gb', }) @IsOptional() @IsString() snapshot?: string @ApiPropertyOptional({ description: 'The user associated with the project', example: 'daytona', }) @IsOptional() @IsString() user?: string @ApiPropertyOptional({ description: 'Environment variables for the sandbox', type: 'object', additionalProperties: { type: 'string' }, example: { NODE_ENV: 'production' }, }) @IsOptional() @IsObject() env?: { [key: string]: string } @ApiPropertyOptional({ description: 'Labels for the sandbox', type: 'object', additionalProperties: { type: 'string' }, example: { 'daytona.io/public': 'true' }, }) @IsOptional() @IsObject() labels?: { [key: string]: string } @ApiPropertyOptional({ description: 'Whether the sandbox http preview is publicly accessible', example: false, }) @IsOptional() @IsBoolean() public?: boolean @ApiPropertyOptional({ description: 'Whether to block all network access for the sandbox', example: false, }) @IsOptional() @IsBoolean() networkBlockAll?: boolean @ApiPropertyOptional({ description: 'Comma-separated list of allowed CIDR network addresses for the sandbox', example: '192.168.1.0/16,10.0.0.0/24', }) @IsOptional() @IsString() networkAllowList?: string @ApiPropertyOptional({ description: 'The sandbox class type', enum: SandboxClass, example: Object.values(SandboxClass)[0], }) @IsOptional() @IsEnum(SandboxClass) class?: SandboxClass @ApiPropertyOptional({ description: 'The target (region) where the sandbox will be created', example: 'us', }) @IsOptional() @IsString() target?: string @ApiPropertyOptional({ description: 'CPU cores allocated to the sandbox', example: 2, type: 'integer', }) @IsOptional() @IsNumber() cpu?: number @ApiPropertyOptional({ description: 'GPU units allocated to the sandbox', example: 1, type: 'integer', }) @IsOptional() @IsNumber() gpu?: number @ApiPropertyOptional({ description: 'Memory allocated to the sandbox in GB', example: 1, type: 'integer', }) @IsOptional() @IsNumber() memory?: number @ApiPropertyOptional({ description: 'Disk space allocated to the sandbox in GB', example: 3, type: 'integer', }) @IsOptional() @IsNumber() disk?: number @ApiPropertyOptional({ description: 'Auto-stop interval in minutes (0 means disabled)', example: 30, type: 'integer', }) @IsOptional() @IsNumber() autoStopInterval?: number @ApiPropertyOptional({ description: 'Auto-archive interval in minutes (0 means the maximum interval will be used)', example: 7 * 24 * 60, type: 'integer', }) @IsOptional() @IsNumber() autoArchiveInterval?: number @ApiPropertyOptional({ description: 'Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping)', example: 30, type: 'integer', }) @IsOptional() @IsNumber() autoDeleteInterval?: number @ApiPropertyOptional({ description: 'Array of volumes to attach to the sandbox', type: [SandboxVolume], required: false, }) @IsOptional() @IsArray() volumes?: SandboxVolume[] @ApiPropertyOptional({ description: 'Build information for the sandbox', type: CreateBuildInfoDto, }) @IsOptional() @IsObject() buildInfo?: CreateBuildInfoDto } ================================================ FILE: apps/api/src/sandbox/dto/create-snapshot.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsArray, IsObject, IsBoolean, IsNumber, IsOptional, IsString } from 'class-validator' import { CreateBuildInfoDto } from './create-build-info.dto' @ApiSchema({ name: 'CreateSnapshot' }) export class CreateSnapshotDto { @ApiProperty({ description: 'The name of the snapshot', example: 'ubuntu-4vcpu-8ram-100gb', }) @IsString() name: string @ApiPropertyOptional({ description: 'The image name of the snapshot', example: 'ubuntu:22.04', }) @IsOptional() @IsString() imageName?: string @ApiPropertyOptional({ description: 'The entrypoint command for the snapshot', example: 'sleep infinity', }) @IsString({ each: true, }) @IsArray() @IsOptional() entrypoint?: string[] @ApiPropertyOptional({ description: 'Whether the snapshot is general', }) @IsBoolean() @IsOptional() general?: boolean @ApiPropertyOptional({ description: 'CPU cores allocated to the resulting sandbox', example: 1, type: 'integer', }) @IsOptional() @IsNumber() cpu?: number @ApiPropertyOptional({ description: 'GPU units allocated to the resulting sandbox', example: 0, type: 'integer', }) @IsOptional() @IsNumber() gpu?: number @ApiPropertyOptional({ description: 'Memory allocated to the resulting sandbox in GB', example: 1, type: 'integer', }) @IsOptional() @IsNumber() memory?: number @ApiPropertyOptional({ description: 'Disk space allocated to the sandbox in GB', example: 3, type: 'integer', }) @IsOptional() @IsNumber() disk?: number @ApiPropertyOptional({ description: 'Build information for the snapshot', type: CreateBuildInfoDto, }) @IsOptional() @IsObject() buildInfo?: CreateBuildInfoDto @ApiPropertyOptional({ description: 'ID of the region where the snapshot will be available. Defaults to organization default region if not specified.', }) @IsOptional() @IsString() regionId?: string } ================================================ FILE: apps/api/src/sandbox/dto/create-volume.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString } from 'class-validator' @ApiSchema({ name: 'CreateVolume' }) export class CreateVolumeDto { @ApiProperty() @IsString() name?: string } ================================================ FILE: apps/api/src/sandbox/dto/create-workspace.deprecated.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsEnum, IsObject, IsOptional, IsString, IsNumber, IsBoolean } from 'class-validator' import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { SandboxClass } from '../enums/sandbox-class.enum' import { SandboxVolume } from './sandbox.dto' import { CreateBuildInfoDto } from './create-build-info.dto' enum RunnerRegion { EU = 'eu', US = 'us', ASIA = 'asia', } @ApiSchema({ name: 'CreateWorkspace' }) export class CreateWorkspaceDto { @ApiPropertyOptional({ description: 'The image used for the workspace', example: 'daytonaio/workspace:latest', }) @IsOptional() @IsString() image?: string @ApiPropertyOptional({ description: 'The user associated with the project', example: 'daytona', }) @IsOptional() @IsString() user?: string @ApiPropertyOptional({ description: 'Environment variables for the workspace', type: 'object', additionalProperties: { type: 'string' }, example: { NODE_ENV: 'production' }, }) @IsOptional() @IsObject() env?: { [key: string]: string } @ApiPropertyOptional({ description: 'Labels for the workspace', type: 'object', additionalProperties: { type: 'string' }, example: { 'daytona.io/public': 'true' }, }) @IsOptional() @IsObject() labels?: { [key: string]: string } @ApiPropertyOptional({ description: 'Whether the workspace http preview is publicly accessible', example: false, }) @IsOptional() @IsBoolean() public?: boolean @ApiPropertyOptional({ description: 'The workspace class type', enum: SandboxClass, example: Object.values(SandboxClass)[0], }) @IsOptional() @IsEnum(SandboxClass) class?: SandboxClass @ApiPropertyOptional({ description: 'The target (region) where the workspace will be created', enum: RunnerRegion, example: Object.values(RunnerRegion)[0], }) @IsOptional() @IsEnum(RunnerRegion) target?: RunnerRegion @ApiPropertyOptional({ description: 'CPU cores allocated to the workspace', example: 2, type: 'integer', }) @IsOptional() @IsNumber() cpu?: number @ApiPropertyOptional({ description: 'GPU units allocated to the workspace', example: 1, type: 'integer', }) @IsOptional() @IsNumber() gpu?: number @ApiPropertyOptional({ description: 'Memory allocated to the workspace in GB', example: 1, type: 'integer', }) @IsOptional() @IsNumber() memory?: number @ApiPropertyOptional({ description: 'Disk space allocated to the workspace in GB', example: 3, type: 'integer', }) @IsOptional() @IsNumber() disk?: number @ApiPropertyOptional({ description: 'Auto-stop interval in minutes (0 means disabled)', example: 30, type: 'integer', }) @IsOptional() @IsNumber() autoStopInterval?: number @ApiPropertyOptional({ description: 'Auto-archive interval in minutes (0 means the maximum interval will be used)', example: 7 * 24 * 60, type: 'integer', }) @IsOptional() @IsNumber() autoArchiveInterval?: number @ApiPropertyOptional({ description: 'Array of volumes to attach to the workspace', type: [SandboxVolume], required: false, }) @IsOptional() volumes?: SandboxVolume[] @ApiPropertyOptional({ description: 'Build information for the workspace', type: CreateBuildInfoDto, }) @IsOptional() @IsObject() buildInfo?: CreateBuildInfoDto } ================================================ FILE: apps/api/src/sandbox/dto/download-files.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'DownloadFiles' }) export class DownloadFilesDto { @ApiProperty({ description: 'List of remote file paths to download', type: [String], }) paths: string[] } ================================================ FILE: apps/api/src/sandbox/dto/job-type-map.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { JobType } from '../enums/job-type.enum' import { ResourceType } from '../enums/resource-type.enum' /** * Type-safe mapping between JobType and its corresponding ResourceType(s) + Payload * This ensures compile-time safety when creating jobs * resourceType is an array of allowed ResourceTypes - the user can supply any of them */ export interface JobTypeMap { [JobType.CREATE_SANDBOX]: { resourceType: [ResourceType.SANDBOX] } [JobType.START_SANDBOX]: { resourceType: [ResourceType.SANDBOX] } [JobType.STOP_SANDBOX]: { resourceType: [ResourceType.SANDBOX] } [JobType.DESTROY_SANDBOX]: { resourceType: [ResourceType.SANDBOX] } [JobType.RESIZE_SANDBOX]: { resourceType: [ResourceType.SANDBOX] } [JobType.CREATE_BACKUP]: { resourceType: [ResourceType.SANDBOX] } [JobType.BUILD_SNAPSHOT]: { resourceType: [ResourceType.SANDBOX, ResourceType.SNAPSHOT] } [JobType.PULL_SNAPSHOT]: { resourceType: [ResourceType.SNAPSHOT] } [JobType.REMOVE_SNAPSHOT]: { resourceType: [ResourceType.SNAPSHOT] } [JobType.UPDATE_SANDBOX_NETWORK_SETTINGS]: { resourceType: [ResourceType.SANDBOX] } [JobType.INSPECT_SNAPSHOT_IN_REGISTRY]: { resourceType: [ResourceType.SNAPSHOT] } [JobType.RECOVER_SANDBOX]: { resourceType: [ResourceType.SANDBOX] } } /** * Helper type to extract the allowed resource types for a given JobType as a union */ export type ResourceTypeForJobType = JobTypeMap[T]['resourceType'][number] ================================================ FILE: apps/api/src/sandbox/dto/job.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsEnum, IsObject, IsOptional, IsString } from 'class-validator' import { JobType } from '../enums/job-type.enum' import { JobStatus } from '../enums/job-status.enum' import { ResourceType } from '../enums/resource-type.enum' import { Job } from '../entities/job.entity' import { PageNumber } from '../../common/decorators/page-number.decorator' import { PageLimit } from '../../common/decorators/page-limit.decorator' // Re-export enums for convenience export { JobType, JobStatus, ResourceType } @ApiSchema({ name: 'Job' }) export class JobDto { @ApiProperty({ description: 'The ID of the job', example: 'job123', }) id: string @ApiProperty({ description: 'The type of the job', enum: JobType, enumName: 'JobType', example: JobType.CREATE_SANDBOX, }) @IsEnum(JobType) type: JobType @ApiProperty({ description: 'The status of the job', enum: JobStatus, enumName: 'JobStatus', example: JobStatus.PENDING, }) @IsEnum(JobStatus) status: JobStatus @ApiProperty({ description: 'The type of resource this job operates on', enum: ResourceType, example: ResourceType.SANDBOX, }) @IsEnum(ResourceType) resourceType: ResourceType @ApiProperty({ description: 'The ID of the resource this job operates on (sandboxId, snapshotRef, etc.)', example: 'sandbox123', }) @IsString() resourceId: string @ApiPropertyOptional({ description: 'Job-specific JSON-encoded payload data (operational metadata)', }) @IsOptional() payload?: string @ApiPropertyOptional({ description: 'OpenTelemetry trace context for distributed tracing (W3C Trace Context format)', type: 'object', additionalProperties: true, example: { traceparent: '00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01' }, }) @IsOptional() @IsObject() traceContext?: Record @ApiPropertyOptional({ description: 'Error message if the job failed', example: 'Failed to create sandbox', }) @IsOptional() @IsString() errorMessage?: string @ApiProperty({ description: 'The creation timestamp of the job', example: '2024-10-01T12:00:00Z', }) createdAt: string @ApiPropertyOptional({ description: 'The last update timestamp of the job', example: '2024-10-01T12:00:00Z', }) @IsOptional() updatedAt?: string constructor(job: Job) { this.id = job.id this.type = job.type this.status = job.status this.resourceType = job.resourceType this.resourceId = job.resourceId this.payload = job.payload || undefined this.traceContext = job.traceContext || undefined this.errorMessage = job.errorMessage || undefined this.createdAt = job.createdAt.toISOString() this.updatedAt = job.updatedAt?.toISOString() } } @ApiSchema({ name: 'PollJobsRequest' }) export class PollJobsRequestDto { @ApiPropertyOptional({ description: 'Timeout in seconds for long polling (default: 30)', example: 30, }) @IsOptional() timeout?: number @ApiPropertyOptional({ description: 'Maximum number of jobs to return', example: 10, }) @IsOptional() limit?: number } @ApiSchema({ name: 'PollJobsResponse' }) export class PollJobsResponseDto { @ApiProperty({ description: 'List of jobs', type: [JobDto], }) jobs: JobDto[] } @ApiSchema({ name: 'PaginatedJobs' }) export class PaginatedJobsDto { @ApiProperty({ type: [JobDto] }) items: JobDto[] @ApiProperty() total: number @ApiProperty() page: number @ApiProperty() totalPages: number } @ApiSchema({ name: 'ListJobsQuery' }) export class ListJobsQueryDto { @PageNumber(1) page = 1 @PageLimit(100) limit = 100 @ApiPropertyOptional({ description: 'Filter by job status', enum: JobStatus, enumName: 'JobStatus', example: JobStatus.PENDING, }) @IsOptional() @IsEnum(JobStatus) status?: JobStatus } @ApiSchema({ name: 'UpdateJobStatus' }) export class UpdateJobStatusDto { @ApiProperty({ description: 'The new status of the job', enum: JobStatus, enumName: 'JobStatus', example: JobStatus.IN_PROGRESS, }) @IsEnum(JobStatus) status: JobStatus @ApiPropertyOptional({ description: 'Error message if the job failed', example: 'Failed to create sandbox', }) @IsOptional() @IsString() errorMessage?: string @ApiPropertyOptional({ description: 'Result metadata for the job', }) @IsOptional() @IsString() resultMetadata?: string } ================================================ FILE: apps/api/src/sandbox/dto/list-sandboxes-query.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsBoolean, IsInt, IsOptional, IsString, IsArray, IsEnum, IsDate, Min } from 'class-validator' import { Type } from 'class-transformer' import { SandboxState } from '../enums/sandbox-state.enum' import { ToArray } from '../../common/decorators/to-array.decorator' import { PageNumber } from '../../common/decorators/page-number.decorator' import { PageLimit } from '../../common/decorators/page-limit.decorator' export enum SandboxSortField { ID = 'id', NAME = 'name', STATE = 'state', SNAPSHOT = 'snapshot', REGION = 'region', UPDATED_AT = 'updatedAt', CREATED_AT = 'createdAt', } export enum SandboxSortDirection { ASC = 'asc', DESC = 'desc', } export const DEFAULT_SANDBOX_SORT_FIELD = SandboxSortField.CREATED_AT export const DEFAULT_SANDBOX_SORT_DIRECTION = SandboxSortDirection.DESC const VALID_QUERY_STATES = Object.values(SandboxState).filter((state) => state !== SandboxState.DESTROYED) @ApiSchema({ name: 'ListSandboxesQuery' }) export class ListSandboxesQueryDto { @PageNumber(1) page = 1 @PageLimit(100) limit = 100 @ApiProperty({ name: 'id', description: 'Filter by partial ID match', required: false, type: String, example: 'abc123', }) @IsOptional() @IsString() id?: string @ApiProperty({ name: 'name', description: 'Filter by partial name match', required: false, type: String, example: 'abc123', }) @IsOptional() @IsString() name?: string @ApiProperty({ name: 'labels', description: 'JSON encoded labels to filter by', required: false, type: String, example: '{"label1": "value1", "label2": "value2"}', }) @IsOptional() @IsString() labels?: string @ApiProperty({ name: 'includeErroredDeleted', description: 'Include results with errored state and deleted desired state', required: false, type: Boolean, default: false, }) @IsOptional() @Type(() => Boolean) @IsBoolean() includeErroredDeleted?: boolean @ApiProperty({ name: 'states', description: 'List of states to filter by', required: false, enum: VALID_QUERY_STATES, isArray: true, }) @IsOptional() @ToArray() @IsArray() @IsEnum(VALID_QUERY_STATES, { each: true, message: `each value must be one of the following values: ${VALID_QUERY_STATES.join(', ')}`, }) states?: SandboxState[] @ApiProperty({ name: 'snapshots', description: 'List of snapshot names to filter by', required: false, type: [String], }) @IsOptional() @ToArray() @IsArray() @IsString({ each: true }) snapshots?: string[] @ApiProperty({ name: 'regions', description: 'List of regions to filter by', required: false, type: [String], }) @IsOptional() @ToArray() @IsArray() @IsString({ each: true }) regions?: string[] @ApiProperty({ name: 'minCpu', description: 'Minimum CPU', required: false, type: Number, minimum: 1, }) @IsOptional() @Type(() => Number) @IsInt() @Min(1) minCpu?: number @ApiProperty({ name: 'maxCpu', description: 'Maximum CPU', required: false, type: Number, minimum: 1, }) @IsOptional() @Type(() => Number) @IsInt() @Min(1) maxCpu?: number @ApiProperty({ name: 'minMemoryGiB', description: 'Minimum memory in GiB', required: false, type: Number, minimum: 1, }) @IsOptional() @Type(() => Number) @IsInt() @Min(1) minMemoryGiB?: number @ApiProperty({ name: 'maxMemoryGiB', description: 'Maximum memory in GiB', required: false, type: Number, minimum: 1, }) @IsOptional() @Type(() => Number) @IsInt() @Min(1) maxMemoryGiB?: number @ApiProperty({ name: 'minDiskGiB', description: 'Minimum disk space in GiB', required: false, type: Number, minimum: 1, }) @IsOptional() @Type(() => Number) @IsInt() @Min(1) minDiskGiB?: number @ApiProperty({ name: 'maxDiskGiB', description: 'Maximum disk space in GiB', required: false, type: Number, minimum: 1, }) @IsOptional() @Type(() => Number) @IsInt() @Min(1) maxDiskGiB?: number @ApiProperty({ name: 'lastEventAfter', description: 'Include items with last event after this timestamp', required: false, type: String, format: 'date-time', example: '2024-01-01T00:00:00Z', }) @IsOptional() @Type(() => Date) @IsDate() lastEventAfter?: Date @ApiProperty({ name: 'lastEventBefore', description: 'Include items with last event before this timestamp', required: false, type: String, format: 'date-time', example: '2024-12-31T23:59:59Z', }) @IsOptional() @Type(() => Date) @IsDate() lastEventBefore?: Date @ApiProperty({ name: 'sort', description: 'Field to sort by', required: false, enum: SandboxSortField, default: DEFAULT_SANDBOX_SORT_FIELD, }) @IsOptional() @IsEnum(SandboxSortField) sort = DEFAULT_SANDBOX_SORT_FIELD @ApiProperty({ name: 'order', description: 'Direction to sort by', required: false, enum: SandboxSortDirection, default: DEFAULT_SANDBOX_SORT_DIRECTION, }) @IsOptional() @IsEnum(SandboxSortDirection) order = DEFAULT_SANDBOX_SORT_DIRECTION } ================================================ FILE: apps/api/src/sandbox/dto/list-snapshots-query.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsOptional, IsString, IsEnum } from 'class-validator' import { PageNumber } from '../../common/decorators/page-number.decorator' import { PageLimit } from '../../common/decorators/page-limit.decorator' export enum SnapshotSortField { NAME = 'name', STATE = 'state', LAST_USED_AT = 'lastUsedAt', CREATED_AT = 'createdAt', } export enum SnapshotSortDirection { ASC = 'asc', DESC = 'desc', } @ApiSchema({ name: 'ListSnapshotsQuery' }) export class ListSnapshotsQueryDto { @PageNumber(1) page = 1 @PageLimit(100) limit = 100 @ApiProperty({ name: 'name', description: 'Filter by partial name match', required: false, type: String, example: 'abc123', }) @IsOptional() @IsString() name?: string @ApiProperty({ name: 'sort', description: 'Field to sort by', required: false, enum: SnapshotSortField, default: SnapshotSortField.LAST_USED_AT, }) @IsOptional() @IsEnum(SnapshotSortField) sort = SnapshotSortField.LAST_USED_AT @ApiProperty({ name: 'order', description: 'Direction to sort by', required: false, enum: SnapshotSortDirection, default: SnapshotSortDirection.DESC, }) @IsOptional() @IsEnum(SnapshotSortDirection) order = SnapshotSortDirection.DESC } ================================================ FILE: apps/api/src/sandbox/dto/lsp.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsString, IsNumber, IsOptional, ValidateNested, IsArray, IsBoolean } from 'class-validator' import { Type } from 'class-transformer' @ApiSchema({ name: 'LspServerRequest' }) export class LspServerRequestDto { @ApiProperty({ description: 'Language identifier' }) @IsString() languageId: string @ApiProperty({ description: 'Path to the project' }) @IsString() pathToProject: string } @ApiSchema({ name: 'LspDocumentRequest' }) export class LspDocumentRequestDto extends LspServerRequestDto { @ApiProperty({ description: 'Document URI' }) @IsString() uri: string } @ApiSchema({ name: 'Position' }) export class PositionDto { @ApiProperty() @IsNumber() line: number @ApiProperty() @IsNumber() character: number } @ApiSchema({ name: 'CompletionContext' }) export class CompletionContextDto { @ApiProperty() @IsNumber() triggerKind: number @ApiPropertyOptional() @IsOptional() @IsString() triggerCharacter?: string } @ApiSchema({ name: 'LspCompletionParams' }) export class LspCompletionParamsDto extends LspDocumentRequestDto { @ApiProperty() @ValidateNested() @Type(() => PositionDto) position: PositionDto @ApiPropertyOptional() @IsOptional() @ValidateNested() @Type(() => CompletionContextDto) context?: CompletionContextDto } @ApiSchema({ name: 'Range' }) export class RangeDto { @ApiProperty() @ValidateNested() @Type(() => PositionDto) start: PositionDto @ApiProperty() @ValidateNested() @Type(() => PositionDto) end: PositionDto } @ApiSchema({ name: 'CompletionItem' }) export class CompletionItemDto { @ApiProperty() @IsString() label: string @ApiPropertyOptional() @IsOptional() @IsNumber() kind?: number @ApiPropertyOptional() @IsOptional() @IsString() detail?: string @ApiPropertyOptional() @IsOptional() documentation?: any @ApiPropertyOptional() @IsOptional() @IsString() sortText?: string @ApiPropertyOptional() @IsOptional() @IsString() filterText?: string @ApiPropertyOptional() @IsOptional() @IsString() insertText?: string } @ApiSchema({ name: 'CompletionList' }) export class CompletionListDto { @ApiProperty() @IsBoolean() isIncomplete: boolean @ApiProperty({ type: [CompletionItemDto] }) @IsArray() @ValidateNested({ each: true }) @Type(() => CompletionItemDto) items: CompletionItemDto[] } @ApiSchema({ name: 'LspLocation' }) export class LspLocationDto { @ApiProperty() @ValidateNested() @Type(() => RangeDto) range: RangeDto @ApiProperty() @IsString() uri: string } @ApiSchema({ name: 'LspSymbol' }) export class LspSymbolDto { @ApiProperty() @IsNumber() kind: number @ApiProperty() @ValidateNested() @Type(() => LspLocationDto) location: LspLocationDto @ApiProperty() @IsString() name: string } @ApiSchema({ name: 'WorkspaceSymbolParams' }) export class WorkspaceSymbolParamsDto { @ApiProperty() @IsString() query: string } ================================================ FILE: apps/api/src/sandbox/dto/paginated-sandboxes.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { SandboxDto } from './sandbox.dto' @ApiSchema({ name: 'PaginatedSandboxes' }) export class PaginatedSandboxesDto { @ApiProperty({ type: [SandboxDto] }) items: SandboxDto[] @ApiProperty() total: number @ApiProperty() page: number @ApiProperty() totalPages: number } ================================================ FILE: apps/api/src/sandbox/dto/paginated-snapshots.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { SnapshotDto } from './snapshot.dto' @ApiSchema({ name: 'PaginatedSnapshots' }) export class PaginatedSnapshotsDto { @ApiProperty({ type: [SnapshotDto] }) items: SnapshotDto[] @ApiProperty() total: number @ApiProperty() page: number @ApiProperty() totalPages: number } ================================================ FILE: apps/api/src/sandbox/dto/port-preview-url.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsNumber, IsString } from 'class-validator' @ApiSchema({ name: 'PortPreviewUrl' }) export class PortPreviewUrlDto { @ApiProperty({ description: 'ID of the sandbox', example: '123456', }) @IsString() sandboxId: string @ApiProperty({ description: 'Preview url', example: 'https://{port}-{sandboxId}.{proxyDomain}', }) @IsString() url: string @ApiProperty({ description: 'Access token', example: 'ul67qtv-jl6wb9z5o3eii-ljqt9qed6l', }) @IsString() token: string } @ApiSchema({ name: 'SignedPortPreviewUrl' }) export class SignedPortPreviewUrlDto { @ApiProperty({ description: 'ID of the sandbox', example: '123456', }) @IsString() sandboxId: string @ApiProperty({ description: 'Port number of the signed preview URL', example: 3000, type: 'integer', }) @IsNumber() port: number @ApiProperty({ description: 'Token of the signed preview URL', example: 'jl6wb9z5o3eii', }) @IsString() token: string @ApiProperty({ description: 'Signed preview url', example: 'https://{port}-{token}.{proxyDomain}', }) @IsString() url: string } ================================================ FILE: apps/api/src/sandbox/dto/registry-push-access-dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty } from '@nestjs/swagger' export class RegistryPushAccessDto { @ApiProperty({ description: 'Temporary username for registry authentication', example: 'temp-user-123', }) username: string @ApiProperty({ description: 'Temporary secret for registry authentication', example: 'eyJhbGciOiJIUzI1NiIs...', }) secret: string @ApiProperty({ description: 'Registry URL', example: 'registry.example.com', }) registryUrl: string @ApiProperty({ description: 'Registry ID', example: '123e4567-e89b-12d3-a456-426614174000', }) registryId: string @ApiProperty({ description: 'Registry project ID', example: 'library', }) project: string @ApiProperty({ description: 'Token expiration time in ISO format', example: '2023-12-31T23:59:59Z', }) expiresAt: string } ================================================ FILE: apps/api/src/sandbox/dto/resize-sandbox.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsOptional, IsNumber, Min } from 'class-validator' import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'ResizeSandbox' }) export class ResizeSandboxDto { @ApiPropertyOptional({ description: 'CPU cores to allocate to the sandbox (minimum: 1)', example: 2, type: 'integer', minimum: 1, }) @IsOptional() @IsNumber() @Min(1) cpu?: number @ApiPropertyOptional({ description: 'Memory in GB to allocate to the sandbox (minimum: 1)', example: 4, type: 'integer', minimum: 1, }) @IsOptional() @IsNumber() @Min(1) memory?: number @ApiPropertyOptional({ description: 'Disk space in GB to allocate to the sandbox (can only be increased)', example: 20, type: 'integer', minimum: 1, }) @IsOptional() @IsNumber() @Min(1) disk?: number } ================================================ FILE: apps/api/src/sandbox/dto/runner-full.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsEnum, IsOptional } from 'class-validator' import { Runner } from '../entities/runner.entity' import { RunnerDto } from './runner.dto' import { RegionType } from '../../region/enums/region-type.enum' @ApiSchema({ name: 'RunnerFull' }) export class RunnerFullDto extends RunnerDto { @ApiProperty({ description: 'The API key for the runner', example: 'dtn_1234567890', }) apiKey: string @ApiPropertyOptional({ description: 'The region type of the runner', enum: RegionType, enumName: 'RegionType', example: Object.values(RegionType)[0], }) @IsOptional() @IsEnum(RegionType) regionType?: RegionType static fromRunner(runner: Runner, regionType?: RegionType): RunnerFullDto { return { ...RunnerDto.fromRunner(runner), apiKey: runner.apiKey, regionType, } } } ================================================ FILE: apps/api/src/sandbox/dto/runner-health.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsArray, IsBoolean, IsNumber, IsOptional, IsString, ValidateNested } from 'class-validator' import { Type } from 'class-transformer' @ApiSchema({ name: 'RunnerHealthMetrics' }) export class RunnerHealthMetricsDto { @ApiProperty({ description: 'Current CPU load average', example: 0.98, }) @IsNumber() currentCpuLoadAverage: number @ApiProperty({ description: 'Current CPU usage percentage', example: 45.5, }) @IsNumber() currentCpuUsagePercentage: number @ApiProperty({ description: 'Current memory usage percentage', example: 60.2, }) @IsNumber() currentMemoryUsagePercentage: number @ApiProperty({ description: 'Current disk usage percentage', example: 35.8, }) @IsNumber() currentDiskUsagePercentage: number @ApiProperty({ description: 'Currently allocated CPU cores', example: 8, }) @IsNumber() currentAllocatedCpu: number @ApiProperty({ description: 'Currently allocated memory in GiB', example: 16, }) @IsNumber() currentAllocatedMemoryGiB: number @ApiProperty({ description: 'Currently allocated disk in GiB', example: 100, }) @IsNumber() currentAllocatedDiskGiB: number @ApiProperty({ description: 'Number of snapshots currently stored', example: 5, }) @IsNumber() currentSnapshotCount: number @ApiProperty({ description: 'Number of started sandboxes', example: 10, }) @IsNumber() currentStartedSandboxes: number @ApiProperty({ description: 'Total CPU cores on the runner', example: 8, }) @IsNumber() cpu: number @ApiProperty({ description: 'Total RAM in GiB on the runner', example: 16, }) @IsNumber() memoryGiB: number @ApiProperty({ description: 'Total disk space in GiB on the runner', example: 100, }) @IsNumber() diskGiB: number } @ApiSchema({ name: 'RunnerServiceHealth' }) export class RunnerServiceHealthDto { @ApiProperty({ description: 'Name of the service being checked', example: 'runner', }) @IsString() serviceName: string @ApiProperty({ description: 'Whether the service is healthy', example: false, }) @IsBoolean() healthy: boolean @ApiPropertyOptional({ description: 'Error reason if the service is unhealthy', example: 'Cannot connect to the runner', }) @IsOptional() @IsString() errorReason?: string } @ApiSchema({ name: 'RunnerHealthcheck' }) export class RunnerHealthcheckDto { @ApiPropertyOptional({ description: 'Runner metrics', type: RunnerHealthMetricsDto, }) @IsOptional() metrics?: RunnerHealthMetricsDto @ApiPropertyOptional({ description: 'Health status of individual services on the runner', type: [RunnerServiceHealthDto], }) @IsOptional() @IsArray() @ValidateNested({ each: true }) @Type(() => RunnerServiceHealthDto) serviceHealth?: RunnerServiceHealthDto[] @ApiPropertyOptional({ description: 'Runner domain', example: 'runner-123.daytona.example.com', }) @IsOptional() domain?: string @ApiPropertyOptional({ description: 'Runner proxy URL', example: 'http://proxy.daytona.example.com:8080', }) @IsOptional() proxyUrl?: string @ApiPropertyOptional({ description: 'Runner API URL', example: 'http://api.daytona.example.com:8080', }) @IsOptional() apiUrl?: string @ApiProperty({ description: 'Runner app version', example: 'v0.0.0-dev', }) @IsString() appVersion: string } ================================================ FILE: apps/api/src/sandbox/dto/runner-snapshot.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger' export class RunnerSnapshotDto { @ApiProperty({ description: 'Runner snapshot ID', example: '123e4567-e89b-12d3-a456-426614174000', }) runnerSnapshotId: string @ApiProperty({ description: 'Runner ID', example: '123e4567-e89b-12d3-a456-426614174000', }) runnerId: string @ApiPropertyOptional({ description: 'Runner domain', example: 'runner.example.com', }) runnerDomain?: string constructor(runnerSnapshotId: string, runnerId: string, runnerDomain: string | null) { this.runnerSnapshotId = runnerSnapshotId this.runnerId = runnerId this.runnerDomain = runnerDomain ?? undefined } } ================================================ FILE: apps/api/src/sandbox/dto/runner-status.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'RunnerStatus' }) export class RunnerStatusDto { @ApiProperty({ description: 'Current CPU usage percentage', example: 45.6, }) currentCpuUsagePercentage: number @ApiProperty({ description: 'Current RAM usage percentage', example: 68.2, }) currentMemoryUsagePercentage: number @ApiProperty({ description: 'Current disk usage percentage', example: 33.8, }) currentDiskUsagePercentage: number @ApiProperty({ description: 'Current allocated CPU', example: 4000, }) currentAllocatedCpu: number @ApiProperty({ description: 'Current allocated memory', example: 8000, }) currentAllocatedMemoryGiB: number @ApiProperty({ description: 'Current allocated disk', example: 50000, }) currentAllocatedDiskGiB: number @ApiProperty({ description: 'Current snapshot count', example: 12, }) currentSnapshotCount: number @ApiProperty({ description: 'Runner status', example: 'ok', }) status: string @ApiProperty({ description: 'Runner version', example: '0.0.1', }) version: string } ================================================ FILE: apps/api/src/sandbox/dto/runner.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsEnum, IsOptional } from 'class-validator' import { Runner } from '../entities/runner.entity' import { SandboxClass } from '../enums/sandbox-class.enum' import { RunnerState } from '../enums/runner-state.enum' @ApiSchema({ name: 'Runner' }) export class RunnerDto { @ApiProperty({ description: 'The ID of the runner', example: 'runner123', }) id: string @ApiProperty({ description: 'The domain of the runner', example: 'runner1.example.com', required: false, }) @IsOptional() domain?: string @ApiProperty({ description: 'The API URL of the runner', example: 'https://api.runner1.example.com', required: false, }) @IsOptional() apiUrl?: string @ApiProperty({ description: 'The proxy URL of the runner', example: 'https://proxy.runner1.example.com', required: false, }) @IsOptional() proxyUrl?: string @ApiProperty({ description: 'The CPU capacity of the runner', example: 8, }) cpu: number @ApiProperty({ description: 'The memory capacity of the runner in GiB', example: 16, }) memory: number @ApiProperty({ description: 'The disk capacity of the runner in GiB', example: 100, }) disk: number @ApiProperty({ description: 'The GPU capacity of the runner', example: 1, required: false, }) @IsOptional() gpu?: number @ApiProperty({ description: 'The type of GPU', required: false, }) @IsOptional() gpuType?: string @ApiProperty({ description: 'The class of the runner', enum: SandboxClass, enumName: 'SandboxClass', example: SandboxClass.SMALL, }) @IsEnum(SandboxClass) class: SandboxClass @ApiPropertyOptional({ description: 'Current CPU usage percentage', example: 45.6, }) currentCpuUsagePercentage: number @ApiPropertyOptional({ description: 'Current RAM usage percentage', example: 68.2, }) currentMemoryUsagePercentage: number @ApiPropertyOptional({ description: 'Current disk usage percentage', example: 33.8, }) currentDiskUsagePercentage: number @ApiPropertyOptional({ description: 'Current allocated CPU', example: 4000, }) currentAllocatedCpu: number @ApiPropertyOptional({ description: 'Current allocated memory in GiB', example: 8000, }) currentAllocatedMemoryGiB: number @ApiPropertyOptional({ description: 'Current allocated disk in GiB', example: 50000, }) currentAllocatedDiskGiB: number @ApiPropertyOptional({ description: 'Current snapshot count', example: 12, }) currentSnapshotCount: number @ApiPropertyOptional({ description: 'Current number of started sandboxes', example: 5, }) currentStartedSandboxes: number @ApiPropertyOptional({ description: 'Runner availability score', example: 85, }) availabilityScore: number @ApiProperty({ description: 'The region of the runner', example: 'us', }) region: string @ApiProperty({ description: 'The name of the runner', example: 'runner1', }) name: string @ApiProperty({ description: 'The state of the runner', enum: RunnerState, enumName: 'RunnerState', example: RunnerState.INITIALIZING, }) @IsEnum(RunnerState) state: RunnerState @ApiPropertyOptional({ description: 'The last time the runner was checked', example: '2024-10-01T12:00:00Z', required: false, }) @IsOptional() lastChecked?: string @ApiProperty({ description: 'Whether the runner is unschedulable', example: false, }) unschedulable: boolean @ApiProperty({ description: 'The creation timestamp of the runner', example: '2023-10-01T12:00:00Z', }) createdAt: string @ApiProperty({ description: 'The last update timestamp of the runner', example: '2023-10-01T12:00:00Z', }) updatedAt: string @ApiProperty({ description: 'The version of the runner (deprecated in favor of apiVersion)', example: '0', deprecated: true, }) version: string @ApiProperty({ description: 'The api version of the runner', example: '0', deprecated: true, }) apiVersion: string @ApiPropertyOptional({ description: 'The app version of the runner', example: 'v0.0.0-dev', deprecated: true, }) @IsOptional() appVersion?: string static fromRunner(runner: Runner): RunnerDto { return { id: runner.id, domain: runner.domain, apiUrl: runner.apiUrl, proxyUrl: runner.proxyUrl, cpu: runner.cpu, memory: runner.memoryGiB, disk: runner.diskGiB, gpu: runner.gpu, gpuType: runner.gpuType, class: runner.class, currentCpuUsagePercentage: runner.currentCpuUsagePercentage, currentMemoryUsagePercentage: runner.currentMemoryUsagePercentage, currentDiskUsagePercentage: runner.currentDiskUsagePercentage, currentAllocatedCpu: runner.currentAllocatedCpu, currentAllocatedMemoryGiB: runner.currentAllocatedMemoryGiB, currentAllocatedDiskGiB: runner.currentAllocatedDiskGiB, currentSnapshotCount: runner.currentSnapshotCount, currentStartedSandboxes: runner.currentStartedSandboxes, availabilityScore: runner.availabilityScore, region: runner.region, name: runner.name, state: runner.state, lastChecked: runner.lastChecked?.toISOString(), unschedulable: runner.unschedulable, createdAt: runner.createdAt.toISOString(), updatedAt: runner.updatedAt.toISOString(), version: runner.apiVersion, apiVersion: runner.apiVersion, appVersion: runner.appVersion, } } } ================================================ FILE: apps/api/src/sandbox/dto/sandbox.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { SandboxState } from '../enums/sandbox-state.enum' import { IsEnum, IsOptional } from 'class-validator' import { BackupState } from '../enums/backup-state.enum' import { Sandbox } from '../entities/sandbox.entity' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { BuildInfoDto } from './build-info.dto' import { SandboxClass } from '../enums/sandbox-class.enum' @ApiSchema({ name: 'SandboxVolume' }) export class SandboxVolume { @ApiProperty({ description: 'The ID of the volume', example: 'volume123', }) volumeId: string @ApiProperty({ description: 'The mount path for the volume', example: '/data', }) mountPath: string @ApiPropertyOptional({ description: 'Optional subpath within the volume to mount. When specified, only this S3 prefix will be accessible. When omitted, the entire volume is mounted.', example: 'users/alice', }) subpath?: string } @ApiSchema({ name: 'Sandbox' }) export class SandboxDto { @ApiProperty({ description: 'The ID of the sandbox', example: 'sandbox123', }) id: string @ApiProperty({ description: 'The organization ID of the sandbox', example: 'organization123', }) organizationId: string @ApiProperty({ description: 'The name of the sandbox', example: 'MySandbox', }) name: string @ApiPropertyOptional({ description: 'The snapshot used for the sandbox', example: 'daytonaio/sandbox:latest', }) snapshot: string @ApiProperty({ description: 'The user associated with the project', example: 'daytona', }) user: string @ApiProperty({ description: 'Environment variables for the sandbox', type: 'object', additionalProperties: { type: 'string' }, example: { NODE_ENV: 'production' }, }) env: Record @ApiProperty({ description: 'Labels for the sandbox', type: 'object', additionalProperties: { type: 'string' }, example: { 'daytona.io/public': 'true' }, }) labels: { [key: string]: string } @ApiProperty({ description: 'Whether the sandbox http preview is public', example: false, }) public: boolean @ApiProperty({ description: 'Whether to block all network access for the sandbox', example: false, }) networkBlockAll: boolean @ApiPropertyOptional({ description: 'Comma-separated list of allowed CIDR network addresses for the sandbox', example: '192.168.1.0/16,10.0.0.0/24', }) networkAllowList?: string @ApiProperty({ description: 'The target environment for the sandbox', example: 'local', }) target: string @ApiProperty({ description: 'The CPU quota for the sandbox', example: 2, }) cpu: number @ApiProperty({ description: 'The GPU quota for the sandbox', example: 0, }) gpu: number @ApiProperty({ description: 'The memory quota for the sandbox', example: 4, }) memory: number @ApiProperty({ description: 'The disk quota for the sandbox', example: 10, }) disk: number @ApiPropertyOptional({ description: 'The state of the sandbox', enum: SandboxState, enumName: 'SandboxState', example: Object.values(SandboxState)[0], required: false, }) @IsEnum(SandboxState) @IsOptional() state?: SandboxState @ApiPropertyOptional({ description: 'The desired state of the sandbox', enum: SandboxDesiredState, enumName: 'SandboxDesiredState', example: Object.values(SandboxDesiredState)[0], required: false, }) @IsEnum(SandboxDesiredState) @IsOptional() desiredState?: SandboxDesiredState @ApiPropertyOptional({ description: 'The error reason of the sandbox', example: 'The sandbox is not running', required: false, }) @IsOptional() errorReason?: string @ApiPropertyOptional({ description: 'Whether the sandbox error is recoverable.', example: true, required: false, }) @IsOptional() recoverable?: boolean @ApiPropertyOptional({ description: 'The state of the backup', enum: BackupState, example: Object.values(BackupState)[0], required: false, }) @IsEnum(BackupState) @IsOptional() backupState?: BackupState @ApiPropertyOptional({ description: 'The creation timestamp of the last backup', example: '2024-10-01T12:00:00Z', required: false, }) @IsOptional() backupCreatedAt?: string @ApiPropertyOptional({ description: 'Auto-stop interval in minutes (0 means disabled)', example: 30, required: false, }) @IsOptional() autoStopInterval?: number @ApiPropertyOptional({ description: 'Auto-archive interval in minutes', example: 7 * 24 * 60, required: false, }) @IsOptional() autoArchiveInterval?: number @ApiPropertyOptional({ description: 'Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping)', example: 30, required: false, }) @IsOptional() autoDeleteInterval?: number @ApiPropertyOptional({ description: 'Array of volumes attached to the sandbox', type: [SandboxVolume], required: false, }) @IsOptional() volumes?: SandboxVolume[] @ApiPropertyOptional({ description: 'Build information for the sandbox', type: BuildInfoDto, required: false, }) @IsOptional() buildInfo?: BuildInfoDto @ApiPropertyOptional({ description: 'The creation timestamp of the sandbox', example: '2024-10-01T12:00:00Z', required: false, }) @IsOptional() createdAt?: string @ApiPropertyOptional({ description: 'The last update timestamp of the sandbox', example: '2024-10-01T12:00:00Z', required: false, }) @IsOptional() updatedAt?: string @ApiPropertyOptional({ description: 'The class of the sandbox', enum: SandboxClass, example: Object.values(SandboxClass)[0], required: false, deprecated: true, }) @IsEnum(SandboxClass) @IsOptional() class?: SandboxClass @ApiPropertyOptional({ description: 'The version of the daemon running in the sandbox', example: '1.0.0', required: false, }) @IsOptional() daemonVersion?: string @ApiPropertyOptional({ description: 'The runner ID of the sandbox', example: 'runner123', required: false, }) @IsOptional() runnerId?: string @ApiProperty({ description: 'The toolbox proxy URL for the sandbox', example: 'https://proxy.app.daytona.io/toolbox', }) toolboxProxyUrl: string static fromSandbox(sandbox: Sandbox, toolboxProxyUrl: string): SandboxDto { return { id: sandbox.id, organizationId: sandbox.organizationId, name: sandbox.name, target: sandbox.region, snapshot: sandbox.snapshot, user: sandbox.osUser, env: sandbox.env, cpu: sandbox.cpu, gpu: sandbox.gpu, memory: sandbox.mem, disk: sandbox.disk, public: sandbox.public, networkBlockAll: sandbox.networkBlockAll, networkAllowList: sandbox.networkAllowList, labels: sandbox.labels, volumes: sandbox.volumes, state: this.getSandboxState(sandbox), desiredState: sandbox.desiredState, errorReason: sandbox.errorReason, recoverable: sandbox.recoverable, backupState: sandbox.backupState, backupCreatedAt: sandbox.lastBackupAt ? new Date(sandbox.lastBackupAt).toISOString() : undefined, autoStopInterval: sandbox.autoStopInterval, autoArchiveInterval: sandbox.autoArchiveInterval, autoDeleteInterval: sandbox.autoDeleteInterval, class: sandbox.class, createdAt: sandbox.createdAt ? new Date(sandbox.createdAt).toISOString() : undefined, updatedAt: sandbox.updatedAt ? new Date(sandbox.updatedAt).toISOString() : undefined, buildInfo: sandbox.buildInfo ? { dockerfileContent: sandbox.buildInfo.dockerfileContent, contextHashes: sandbox.buildInfo.contextHashes, createdAt: sandbox.buildInfo.createdAt, updatedAt: sandbox.buildInfo.updatedAt, snapshotRef: sandbox.buildInfo.snapshotRef, } : undefined, daemonVersion: sandbox.daemonVersion, runnerId: sandbox.runnerId, toolboxProxyUrl, } } private static getSandboxState(sandbox: Sandbox): SandboxState { switch (sandbox.state) { case SandboxState.STARTED: if (sandbox.desiredState === SandboxDesiredState.STOPPED) { return SandboxState.STOPPING } if (sandbox.desiredState === SandboxDesiredState.DESTROYED) { return SandboxState.DESTROYING } break case SandboxState.STOPPED: if (sandbox.desiredState === SandboxDesiredState.STARTED) { return SandboxState.STARTING } if (sandbox.desiredState === SandboxDesiredState.DESTROYED) { return SandboxState.DESTROYING } if (sandbox.desiredState === SandboxDesiredState.ARCHIVED) { return SandboxState.ARCHIVING } break case SandboxState.UNKNOWN: if (sandbox.desiredState === SandboxDesiredState.STARTED) { return SandboxState.CREATING } break } return sandbox.state } } @ApiSchema({ name: 'SandboxLabels' }) export class SandboxLabelsDto { @ApiProperty({ description: 'Key-value pairs of labels', example: { environment: 'dev', team: 'backend' }, type: 'object', additionalProperties: { type: 'string' }, }) labels: { [key: string]: string } } ================================================ FILE: apps/api/src/sandbox/dto/snapshot.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger' import { SnapshotState } from '../enums/snapshot-state.enum' import { Snapshot } from '../entities/snapshot.entity' import { BuildInfoDto } from './build-info.dto' import { IsOptional } from 'class-validator' export class SnapshotDto { @ApiProperty() id: string @ApiPropertyOptional() organizationId?: string @ApiProperty() general: boolean @ApiProperty() name: string @ApiPropertyOptional() imageName?: string @ApiProperty({ enum: SnapshotState, enumName: 'SnapshotState', }) state: SnapshotState @ApiProperty({ nullable: true }) size?: number @ApiProperty({ nullable: true }) entrypoint?: string[] @ApiProperty() cpu: number @ApiProperty() gpu: number @ApiProperty() mem: number @ApiProperty() disk: number @ApiProperty({ nullable: true }) errorReason?: string @ApiProperty() createdAt: Date @ApiProperty() updatedAt: Date @ApiProperty({ nullable: true }) lastUsedAt?: Date @ApiPropertyOptional({ description: 'Build information for the snapshot', type: BuildInfoDto, }) buildInfo?: BuildInfoDto @ApiPropertyOptional({ description: 'IDs of regions where the snapshot is available', type: [String], }) regionIds?: string[] @ApiPropertyOptional({ description: 'The initial runner ID of the snapshot', example: 'runner123', required: false, }) @IsOptional() initialRunnerId?: string @ApiPropertyOptional({ description: 'The snapshot reference', example: 'daytonaio/sandbox:latest', required: false, }) @IsOptional() ref?: string static fromSnapshot(snapshot: Snapshot): SnapshotDto { return { id: snapshot.id, organizationId: snapshot.organizationId, general: snapshot.general, name: snapshot.name, imageName: snapshot.imageName, state: snapshot.state, size: snapshot.size, entrypoint: snapshot.entrypoint, cpu: snapshot.cpu, gpu: snapshot.gpu, mem: snapshot.mem, disk: snapshot.disk, errorReason: snapshot.errorReason, createdAt: snapshot.createdAt, updatedAt: snapshot.updatedAt, lastUsedAt: snapshot.lastUsedAt, buildInfo: snapshot.buildInfo ? { dockerfileContent: snapshot.buildInfo.dockerfileContent, contextHashes: snapshot.buildInfo.contextHashes, createdAt: snapshot.buildInfo.createdAt, updatedAt: snapshot.buildInfo.updatedAt, snapshotRef: snapshot.buildInfo.snapshotRef, } : undefined, regionIds: snapshot.snapshotRegions?.map((sr) => sr.regionId) ?? undefined, initialRunnerId: snapshot.initialRunnerId, ref: snapshot.ref, } } } ================================================ FILE: apps/api/src/sandbox/dto/ssh-access.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty } from '@nestjs/swagger' import { SshAccess } from '../entities/ssh-access.entity' export class SshAccessDto { @ApiProperty({ description: 'Unique identifier for the SSH access', example: '123e4567-e89b-12d3-a456-426614174000', }) id: string @ApiProperty({ description: 'ID of the sandbox this SSH access is for', example: '123e4567-e89b-12d3-a456-426614174000', }) sandboxId: string @ApiProperty({ description: 'SSH access token', example: 'abc123def456ghi789jkl012mno345pqr678stu901vwx234yz', }) token: string @ApiProperty({ description: 'When the SSH access expires', example: '2025-01-01T12:00:00.000Z', }) expiresAt: Date @ApiProperty({ description: 'When the SSH access was created', example: '2025-01-01T11:00:00.000Z', }) createdAt: Date @ApiProperty({ description: 'When the SSH access was last updated', example: '2025-01-01T11:00:00.000Z', }) updatedAt: Date @ApiProperty({ description: 'SSH command to connect to the sandbox', example: 'ssh -p 2222 token@localhost', }) sshCommand: string static fromSshAccess(sshAccess: SshAccess, sshGatewayUrl: string): SshAccessDto { const dto = new SshAccessDto() dto.id = sshAccess.id dto.sandboxId = sshAccess.sandboxId dto.token = sshAccess.token dto.expiresAt = sshAccess.expiresAt dto.createdAt = sshAccess.createdAt dto.updatedAt = sshAccess.updatedAt // Robustly extract host and port from sshGatewayUrl let host: string let port: string try { // If protocol is present, use URL if (sshGatewayUrl.includes('://')) { const url = new URL(sshGatewayUrl) host = url.hostname port = url.port || '22' } else { // No protocol, parse manually const [hostPart, portPart] = sshGatewayUrl.split(':') host = hostPart port = portPart || '22' } } catch { // Fallback: treat as host only host = sshGatewayUrl port = '22' } if (port === '22') { dto.sshCommand = `ssh ${sshAccess.token}@${host}` } else { dto.sshCommand = `ssh -p ${port} ${sshAccess.token}@${host}` } return dto } } export class SshAccessValidationDto { @ApiProperty({ description: 'Whether the SSH access token is valid', example: true, }) valid: boolean @ApiProperty({ description: 'ID of the sandbox this SSH access is for', example: '123e4567-e89b-12d3-a456-426614174000', }) sandboxId: string static fromValidationResult(valid: boolean, sandboxId: string): SshAccessValidationDto { const dto = new SshAccessValidationDto() dto.valid = valid dto.sandboxId = sandboxId return dto } } export class RevokeSshAccessDto { @ApiProperty({ description: 'ID of the sandbox', example: '123e4567-e89b-12d3-a456-426614174000', }) sandboxId: string @ApiProperty({ description: 'SSH access token to revoke', example: 'abc123def456ghi789jkl012mno345pqr678stu901vwx234yz', }) token: string } ================================================ FILE: apps/api/src/sandbox/dto/storage-access-dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty } from '@nestjs/swagger' export class StorageAccessDto { @ApiProperty({ description: 'Access key for storage authentication', example: 'temp-user-123', }) accessKey: string @ApiProperty({ description: 'Secret key for storage authentication', example: 'abchbGciOiJIUzI1NiIs...', }) secret: string @ApiProperty({ description: 'Session token for storage authentication', example: 'eyJhbGciOiJIUzI1NiIs...', }) sessionToken: string @ApiProperty({ description: 'Storage URL', example: 'storage.example.com', }) storageUrl: string @ApiProperty({ description: 'Organization ID', example: '123e4567-e89b-12d3-a456-426614174000', }) organizationId: string @ApiProperty({ description: 'S3 bucket name', example: 'daytona', }) bucket: string } ================================================ FILE: apps/api/src/sandbox/dto/toolbox-proxy-url.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'ToolboxProxyUrl' }) export class ToolboxProxyUrlDto { @ApiProperty({ description: 'The toolbox proxy URL for the sandbox', example: 'https://proxy.app.daytona.io/toolbox', }) url: string constructor(url: string) { this.url = url } } ================================================ FILE: apps/api/src/sandbox/dto/toolbox.deprecated.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsString, IsBoolean, IsOptional, IsArray } from 'class-validator' @ApiSchema({ name: 'FileInfo' }) export class FileInfoDto { @ApiProperty() name: string @ApiProperty() isDir: boolean @ApiProperty() size: number @ApiProperty() modTime: string @ApiProperty() mode: string @ApiProperty() permissions: string @ApiProperty() owner: string @ApiProperty() group: string } @ApiSchema({ name: 'Match' }) export class MatchDto { @ApiProperty() file: string @ApiProperty() line: number @ApiProperty() content: string } @ApiSchema({ name: 'SearchFilesResponse' }) export class SearchFilesResponseDto { @ApiProperty({ type: [String] }) files: string[] } @ApiSchema({ name: 'ReplaceRequest' }) export class ReplaceRequestDto { @ApiProperty({ type: [String] }) files: string[] @ApiProperty() pattern: string @ApiProperty() newValue: string } @ApiSchema({ name: 'ReplaceResult' }) export class ReplaceResultDto { @ApiPropertyOptional() file?: string @ApiPropertyOptional() success?: boolean @ApiPropertyOptional() error?: string } @ApiSchema({ name: 'GitAddRequest' }) export class GitAddRequestDto { @ApiProperty() path: string @ApiProperty({ type: [String], description: 'files to add (use . for all files)', }) files: string[] } @ApiSchema({ name: 'GitBranchRequest' }) export class GitBranchRequestDto { @ApiProperty() path: string @ApiProperty() name: string } @ApiSchema({ name: 'GitDeleteBranchRequest' }) export class GitDeleteBranchRequestDto { @ApiProperty() path: string @ApiProperty() name: string } @ApiSchema({ name: 'GitCloneRequest' }) export class GitCloneRequestDto { @ApiProperty() url: string @ApiProperty() path: string @ApiPropertyOptional() username?: string @ApiPropertyOptional() password?: string @ApiPropertyOptional() branch?: string @ApiPropertyOptional() commit_id?: string } @ApiSchema({ name: 'GitCommitRequest' }) export class GitCommitRequestDto { @ApiProperty() path: string @ApiProperty() message: string @ApiProperty() author: string @ApiProperty() email: string @ApiPropertyOptional({ description: 'Allow creating an empty commit when no changes are staged', default: false, }) allow_empty?: boolean } @ApiSchema({ name: 'GitCommitResponse' }) export class GitCommitResponseDto { @ApiProperty() hash: string } @ApiSchema({ name: 'GitCheckoutRequest' }) export class GitCheckoutRequestDto { @ApiProperty() path: string @ApiProperty() branch: string } @ApiSchema({ name: 'GitRepoRequest' }) export class GitRepoRequestDto { @ApiProperty() path: string @ApiPropertyOptional() username?: string @ApiPropertyOptional() password?: string } @ApiSchema({ name: 'FileStatus' }) export class FileStatusDto { @ApiProperty() name: string @ApiProperty() staging: string @ApiProperty() worktree: string @ApiProperty() extra: string } @ApiSchema({ name: 'GitStatus' }) export class GitStatusDto { @ApiProperty() currentBranch: string @ApiProperty({ type: [FileStatusDto], }) fileStatus: FileStatusDto[] @ApiPropertyOptional() ahead?: number @ApiPropertyOptional() behind?: number @ApiPropertyOptional() branchPublished?: boolean } @ApiSchema({ name: 'ListBranchResponse' }) export class ListBranchResponseDto { @ApiProperty({ type: [String] }) branches: string[] } @ApiSchema({ name: 'GitCommitInfo' }) export class GitCommitInfoDto { @ApiProperty() hash: string @ApiProperty() message: string @ApiProperty() author: string @ApiProperty() email: string @ApiProperty() timestamp: string } @ApiSchema({ name: 'ExecuteRequest' }) export class ExecuteRequestDto { @ApiProperty() command: string @ApiPropertyOptional({ description: 'Current working directory', }) cwd?: string @ApiPropertyOptional({ description: 'Timeout in seconds, defaults to 10 seconds', }) timeout?: number } @ApiSchema({ name: 'ExecuteResponse' }) export class ExecuteResponseDto { @ApiProperty({ type: Number, description: 'Exit code', example: 0, }) exitCode: number @ApiProperty({ type: String, description: 'Command output', example: 'Command output here', }) result: string } @ApiSchema({ name: 'ProjectDirResponse' }) export class ProjectDirResponseDto { @ApiPropertyOptional() dir?: string } @ApiSchema({ name: 'UserHomeDirResponse' }) export class UserHomeDirResponseDto { @ApiPropertyOptional() dir?: string } @ApiSchema({ name: 'WorkDirResponse' }) export class WorkDirResponseDto { @ApiPropertyOptional() dir?: string } @ApiSchema({ name: 'CreateSessionRequest' }) export class CreateSessionRequestDto { @ApiProperty({ description: 'The ID of the session', example: 'session-123', }) @IsString() sessionId: string } @ApiSchema({ name: 'SessionExecuteRequest' }) export class SessionExecuteRequestDto { @ApiProperty({ description: 'The command to execute', example: 'ls -la', }) @IsString() command: string @ApiPropertyOptional({ description: 'Whether to execute the command asynchronously', example: false, }) @IsBoolean() @IsOptional() runAsync?: boolean @ApiPropertyOptional({ description: 'Deprecated: Use runAsync instead. Whether to execute the command asynchronously', example: false, deprecated: true, }) @IsBoolean() @IsOptional() async?: boolean constructor(partial: Partial) { Object.assign(this, partial) // Migrate async to runAsync if async is set and runAsync is not set if (this.async !== undefined && this.runAsync === undefined) { this.runAsync = this.async } } } @ApiSchema({ name: 'SessionExecuteResponse' }) export class SessionExecuteResponseDto { @ApiPropertyOptional({ description: 'The ID of the executed command', example: 'cmd-123', }) @IsString() @IsOptional() cmdId?: string @ApiPropertyOptional({ description: 'The output of the executed command marked with stdout and stderr prefixes', example: 'total 20\ndrwxr-xr-x 4 user group 128 Mar 15 10:30 .', }) @IsString() @IsOptional() output?: string @ApiPropertyOptional({ description: 'The exit code of the executed command', example: 0, }) @IsOptional() exitCode?: number } @ApiSchema({ name: 'Command' }) export class CommandDto { @ApiProperty({ description: 'The ID of the command', example: 'cmd-123', }) @IsString() id: string @ApiProperty({ description: 'The command that was executed', example: 'ls -la', }) @IsString() command: string @ApiPropertyOptional({ description: 'The exit code of the command', example: 0, }) @IsOptional() exitCode?: number } @ApiSchema({ name: 'Session' }) export class SessionDto { @ApiProperty({ description: 'The ID of the session', example: 'session-123', }) @IsString() sessionId: string @ApiProperty({ description: 'The list of commands executed in this session', type: [CommandDto], nullable: true, }) @IsArray() @IsOptional() commands?: CommandDto[] | null } // Computer Use DTOs @ApiSchema({ name: 'MousePosition' }) export class MousePositionDto { @ApiProperty({ description: 'The X coordinate of the mouse cursor position', example: 100, }) x: number @ApiProperty({ description: 'The Y coordinate of the mouse cursor position', example: 200, }) y: number } @ApiSchema({ name: 'MouseMoveRequest' }) export class MouseMoveRequestDto { @ApiProperty({ description: 'The target X coordinate to move the mouse cursor to', example: 150, }) x: number @ApiProperty({ description: 'The target Y coordinate to move the mouse cursor to', example: 250, }) y: number } @ApiSchema({ name: 'MouseMoveResponse' }) export class MouseMoveResponseDto { @ApiProperty({ description: 'The actual X coordinate where the mouse cursor ended up', example: 150, }) x: number @ApiProperty({ description: 'The actual Y coordinate where the mouse cursor ended up', example: 250, }) y: number } @ApiSchema({ name: 'MouseClickRequest' }) export class MouseClickRequestDto { @ApiProperty({ description: 'The X coordinate where to perform the mouse click', example: 100, }) x: number @ApiProperty({ description: 'The Y coordinate where to perform the mouse click', example: 200, }) y: number @ApiPropertyOptional({ description: 'The mouse button to click (left, right, middle). Defaults to left', example: 'left', }) button?: string @ApiPropertyOptional({ description: 'Whether to perform a double-click instead of a single click', example: false, }) double?: boolean } @ApiSchema({ name: 'MouseClickResponse' }) export class MouseClickResponseDto { @ApiProperty({ description: 'The actual X coordinate where the click occurred', example: 100, }) x: number @ApiProperty({ description: 'The actual Y coordinate where the click occurred', example: 200, }) y: number } @ApiSchema({ name: 'MouseDragRequest' }) export class MouseDragRequestDto { @ApiProperty({ description: 'The starting X coordinate for the drag operation', example: 100, }) startX: number @ApiProperty({ description: 'The starting Y coordinate for the drag operation', example: 200, }) startY: number @ApiProperty({ description: 'The ending X coordinate for the drag operation', example: 300, }) endX: number @ApiProperty({ description: 'The ending Y coordinate for the drag operation', example: 400, }) endY: number @ApiPropertyOptional({ description: 'The mouse button to use for dragging (left, right, middle). Defaults to left', example: 'left', }) button?: string } @ApiSchema({ name: 'MouseDragResponse' }) export class MouseDragResponseDto { @ApiProperty({ description: 'The actual X coordinate where the drag ended', example: 300, }) x: number @ApiProperty({ description: 'The actual Y coordinate where the drag ended', example: 400, }) y: number } @ApiSchema({ name: 'MouseScrollRequest' }) export class MouseScrollRequestDto { @ApiProperty({ description: 'The X coordinate where to perform the scroll operation', example: 100, }) x: number @ApiProperty({ description: 'The Y coordinate where to perform the scroll operation', example: 200, }) y: number @ApiProperty({ description: 'The scroll direction (up, down)', example: 'down', }) direction: string @ApiPropertyOptional({ description: 'The number of scroll units to scroll. Defaults to 1', example: 3, }) amount?: number } @ApiSchema({ name: 'MouseScrollResponse' }) export class MouseScrollResponseDto { @ApiProperty({ description: 'Whether the mouse scroll operation was successful', example: true, }) success: boolean } @ApiSchema({ name: 'KeyboardTypeRequest' }) export class KeyboardTypeRequestDto { @ApiProperty({ description: 'The text to type using the keyboard', example: 'Hello, World!', }) text: string @ApiPropertyOptional({ description: 'Delay in milliseconds between keystrokes. Defaults to 0', example: 100, }) delay?: number } @ApiSchema({ name: 'KeyboardPressRequest' }) export class KeyboardPressRequestDto { @ApiProperty({ description: 'The key to press (e.g., a, b, c, enter, space, etc.)', example: 'enter', }) key: string @ApiPropertyOptional({ description: 'Array of modifier keys to press along with the main key (ctrl, alt, shift, cmd)', type: [String], example: ['ctrl', 'shift'], }) modifiers?: string[] } @ApiSchema({ name: 'KeyboardHotkeyRequest' }) export class KeyboardHotkeyRequestDto { @ApiProperty({ description: 'The hotkey combination to press (e.g., "ctrl+c", "cmd+v", "alt+tab")', example: 'ctrl+c', }) keys: string } @ApiSchema({ name: 'ScreenshotResponse' }) export class ScreenshotResponseDto { @ApiProperty({ description: 'Base64 encoded screenshot image data', example: 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==', }) screenshot: string @ApiPropertyOptional({ description: 'The current cursor position when the screenshot was taken', example: { x: 500, y: 300 }, }) cursorPosition?: { x: number; y: number } @ApiPropertyOptional({ description: 'The size of the screenshot data in bytes', example: 24576, }) sizeBytes?: number } @ApiSchema({ name: 'RegionScreenshotRequest' }) export class RegionScreenshotRequestDto { @ApiProperty({ description: 'The X coordinate of the top-left corner of the region to capture', example: 100, }) x: number @ApiProperty({ description: 'The Y coordinate of the top-left corner of the region to capture', example: 100, }) y: number @ApiProperty({ description: 'The width of the region to capture in pixels', example: 800, }) width: number @ApiProperty({ description: 'The height of the region to capture in pixels', example: 600, }) height: number } @ApiSchema({ name: 'RegionScreenshotResponse' }) export class RegionScreenshotResponseDto { @ApiProperty({ description: 'Base64 encoded screenshot image data of the specified region', example: 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==', }) screenshot: string @ApiPropertyOptional({ description: 'The current cursor position when the region screenshot was taken', example: { x: 500, y: 300 }, }) cursorPosition?: { x: number; y: number } @ApiPropertyOptional({ description: 'The size of the screenshot data in bytes', example: 24576, }) sizeBytes?: number } @ApiSchema({ name: 'CompressedScreenshotResponse' }) export class CompressedScreenshotResponseDto { @ApiProperty({ description: 'Base64 encoded compressed screenshot image data', example: 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==', }) screenshot: string @ApiPropertyOptional({ description: 'The current cursor position when the compressed screenshot was taken', example: { x: 250, y: 150 }, }) cursorPosition?: { x: number; y: number } @ApiPropertyOptional({ description: 'The size of the compressed screenshot data in bytes', example: 12288, }) sizeBytes?: number } @ApiSchema({ name: 'DisplayInfoResponse' }) export class DisplayInfoResponseDto { @ApiProperty({ description: 'Array of display information for all connected displays', type: [Object], example: [ { id: 0, x: 0, y: 0, width: 1920, height: 1080, is_active: true, }, ], }) displays: Array<{ id: number; x: number; y: number; width: number; height: number; is_active: boolean }> } @ApiSchema({ name: 'WindowsResponse' }) export class WindowsResponseDto { @ApiProperty({ description: 'Array of window information for all visible windows', type: [Object], example: [ { id: 12345, title: 'Terminal', }, ], }) windows: Array<{ id: number; title: string }> @ApiProperty({ description: 'The total number of windows found', example: 5, }) count: number } // Computer Use Management Response DTOs @ApiSchema({ name: 'ComputerUseStartResponse' }) export class ComputerUseStartResponseDto { @ApiProperty({ description: 'A message indicating the result of starting computer use processes', example: 'Computer use processes started successfully', }) message: string @ApiProperty({ description: 'Status information about all VNC desktop processes after starting', type: Object, example: { xvfb: { running: true, priority: 100, autoRestart: true, pid: 12345 }, xfce4: { running: true, priority: 200, autoRestart: true, pid: 12346 }, x11vnc: { running: true, priority: 300, autoRestart: true, pid: 12347 }, novnc: { running: true, priority: 400, autoRestart: true, pid: 12348 }, }, }) status: Record } @ApiSchema({ name: 'ComputerUseStopResponse' }) export class ComputerUseStopResponseDto { @ApiProperty({ description: 'A message indicating the result of stopping computer use processes', example: 'Computer use processes stopped successfully', }) message: string @ApiProperty({ description: 'Status information about all VNC desktop processes after stopping', type: Object, example: { xvfb: { running: false, priority: 100, autoRestart: true }, xfce4: { running: false, priority: 200, autoRestart: true }, x11vnc: { running: false, priority: 300, autoRestart: true }, novnc: { running: false, priority: 400, autoRestart: true }, }, }) status: Record } @ApiSchema({ name: 'ComputerUseStatusResponse' }) export class ComputerUseStatusResponseDto { @ApiProperty({ description: 'Status of computer use services (active, partial, inactive, error)', example: 'active', enum: ['active', 'partial', 'inactive', 'error'], }) status: string } @ApiSchema({ name: 'ProcessStatusResponse' }) export class ProcessStatusResponseDto { @ApiProperty({ description: 'The name of the VNC process being checked', example: 'xfce4', }) processName: string @ApiProperty({ description: 'Whether the specified VNC process is currently running', example: true, }) running: boolean } @ApiSchema({ name: 'ProcessRestartResponse' }) export class ProcessRestartResponseDto { @ApiProperty({ description: 'A message indicating the result of restarting the process', example: 'Process xfce4 restarted successfully', }) message: string @ApiProperty({ description: 'The name of the VNC process that was restarted', example: 'xfce4', }) processName: string } @ApiSchema({ name: 'ProcessLogsResponse' }) export class ProcessLogsResponseDto { @ApiProperty({ description: 'The name of the VNC process whose logs were retrieved', example: 'novnc', }) processName: string @ApiProperty({ description: 'The log output from the specified VNC process', example: '2024-01-15 10:30:45 [INFO] NoVNC server started on port 6080', }) logs: string } @ApiSchema({ name: 'ProcessErrorsResponse' }) export class ProcessErrorsResponseDto { @ApiProperty({ description: 'The name of the VNC process whose error logs were retrieved', example: 'x11vnc', }) processName: string @ApiProperty({ description: 'The error log output from the specified VNC process', example: '2024-01-15 10:30:45 [ERROR] Failed to bind to port 5901', }) errors: string } // PTY DTOs @ApiSchema({ name: 'PtyCreateRequest' }) export class PtyCreateRequestDto { @ApiProperty({ description: 'The unique identifier for the PTY session', example: 'pty-session-12345', }) id: string @ApiPropertyOptional({ description: "Starting directory for the PTY session, defaults to the sandbox's working directory", example: '/home/user', }) cwd?: string @ApiPropertyOptional({ description: 'Environment variables for the PTY session', type: Object, example: { TERM: 'xterm-256color', PS1: '\\u@daytona:\\w$ ' }, }) envs?: Record @ApiPropertyOptional({ description: 'Number of terminal columns', example: 80, }) cols?: number @ApiPropertyOptional({ description: 'Number of terminal rows', example: 24, }) rows?: number @ApiPropertyOptional({ description: 'Whether to start the PTY session lazily (only start when first client connects)', example: false, default: false, }) lazyStart?: boolean } @ApiSchema({ name: 'PtyCreateResponse' }) export class PtyCreateResponseDto { @ApiProperty({ description: 'The unique identifier for the created PTY session', example: 'pty-session-12345', }) sessionId: string } @ApiSchema({ name: 'PtySessionInfo' }) export class PtySessionInfoDto { @ApiProperty({ description: 'The unique identifier for the PTY session', example: 'pty-session-12345', }) id: string @ApiProperty({ description: "Starting directory for the PTY session, defaults to the sandbox's working directory", example: '/home/user', }) cwd: string @ApiProperty({ description: 'Environment variables for the PTY session', type: Object, example: { TERM: 'xterm-256color', PS1: '\\u@daytona:\\w$ ' }, }) envs: Record @ApiProperty({ description: 'Number of terminal columns', example: 80, }) cols: number @ApiProperty({ description: 'Number of terminal rows', example: 24, }) rows: number @ApiProperty({ description: 'When the PTY session was created', example: '2024-01-15T10:30:45Z', }) createdAt: string @ApiProperty({ description: 'Whether the PTY session is currently active', example: true, }) active: boolean @ApiProperty({ description: 'Whether the PTY session uses lazy start (only start when first client connects)', example: false, default: false, }) lazyStart: boolean } @ApiSchema({ name: 'PtyListResponse' }) export class PtyListResponseDto { @ApiProperty({ description: 'List of active PTY sessions', type: [PtySessionInfoDto], }) sessions: PtySessionInfoDto[] } @ApiSchema({ name: 'PtyResizeRequest' }) export class PtyResizeRequestDto { @ApiProperty({ description: 'Number of terminal columns', example: 80, }) cols: number @ApiProperty({ description: 'Number of terminal rows', example: 24, }) rows: number } ================================================ FILE: apps/api/src/sandbox/dto/update-sandbox-network-settings.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsOptional, IsString, IsBoolean } from 'class-validator' import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'UpdateSandboxNetworkSettings' }) export class UpdateSandboxNetworkSettingsDto { @ApiPropertyOptional({ description: 'Whether to block all network access for the sandbox', example: false, }) @IsOptional() @IsBoolean() networkBlockAll?: boolean @ApiPropertyOptional({ description: 'Comma-separated list of allowed CIDR network addresses for the sandbox', example: '192.168.1.0/16,10.0.0.0/24', }) @IsOptional() @IsString() networkAllowList?: string } ================================================ FILE: apps/api/src/sandbox/dto/update-sandbox-state.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger' import { IsBoolean, IsEnum, IsOptional, IsString } from 'class-validator' import { SandboxState } from '../enums/sandbox-state.enum' export class UpdateSandboxStateDto { @IsEnum(SandboxState) @ApiProperty({ description: 'The new state for the sandbox', enum: SandboxState, example: SandboxState.STARTED, }) state: SandboxState @IsOptional() @IsString() @ApiPropertyOptional({ description: 'Optional error message when reporting an error state', example: 'Failed to pull snapshot image', }) errorReason?: string @IsOptional() @IsBoolean() @ApiPropertyOptional({ description: 'Whether the sandbox is recoverable', example: true, }) recoverable?: boolean } ================================================ FILE: apps/api/src/sandbox/dto/update-snapshot.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsBoolean } from 'class-validator' @ApiSchema({ name: 'SetSnapshotGeneralStatusDto' }) export class SetSnapshotGeneralStatusDto { @ApiProperty({ description: 'Whether the snapshot is general', example: true, }) @IsBoolean() general: boolean } ================================================ FILE: apps/api/src/sandbox/dto/upload-file.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'UploadFile' }) export class UploadFileDto { @ApiProperty({ type: 'string', format: 'binary' }) file: any @ApiProperty() path: string } ================================================ FILE: apps/api/src/sandbox/dto/volume.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger' import { IsEnum } from 'class-validator' import { VolumeState } from '../enums/volume-state.enum' import { Volume } from '../entities/volume.entity' export class VolumeDto { @ApiProperty({ description: 'Volume ID', example: 'vol-12345678', }) id: string @ApiProperty({ description: 'Volume name', example: 'my-volume', }) name: string @ApiProperty({ description: 'Organization ID', example: '123e4567-e89b-12d3-a456-426614174000', }) organizationId: string @ApiProperty({ description: 'Volume state', enum: VolumeState, enumName: 'VolumeState', example: VolumeState.READY, }) @IsEnum(VolumeState) state: VolumeState @ApiProperty({ description: 'Creation timestamp', example: '2023-01-01T00:00:00.000Z', }) createdAt: string @ApiProperty({ description: 'Last update timestamp', example: '2023-01-01T00:00:00.000Z', }) updatedAt: string @ApiPropertyOptional({ description: 'Last used timestamp', example: '2023-01-01T00:00:00.000Z', nullable: true, }) lastUsedAt?: string @ApiProperty({ description: 'The error reason of the volume', example: 'Error processing volume', nullable: true, }) errorReason?: string static fromVolume(volume: Volume): VolumeDto { return { id: volume.id, name: volume.name, organizationId: volume.organizationId, state: volume.state, createdAt: volume.createdAt?.toISOString(), updatedAt: volume.updatedAt?.toISOString(), lastUsedAt: volume.lastUsedAt?.toISOString(), errorReason: volume.errorReason, } } } ================================================ FILE: apps/api/src/sandbox/dto/workspace-port-preview-url.deprecated.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString } from 'class-validator' @ApiSchema({ name: 'WorkspacePortPreviewUrl' }) export class WorkspacePortPreviewUrlDto { @ApiProperty({ description: 'Preview url', example: 'https://123456-mysandbox.runner.com', }) @IsString() url: string @ApiProperty({ description: 'Access token', example: 'ul67qtv-jl6wb9z5o3eii-ljqt9qed6l', }) @IsString() token: string } ================================================ FILE: apps/api/src/sandbox/dto/workspace.deprecated.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { SandboxDto } from './sandbox.dto' import { IsEnum, IsOptional } from 'class-validator' import { BackupState as SnapshotState } from '../enums/backup-state.enum' import { Sandbox } from '../entities/sandbox.entity' @ApiSchema({ name: 'SandboxInfo' }) export class SandboxInfoDto { @ApiProperty({ description: 'The creation timestamp of the project', example: '2023-10-01T12:00:00Z', }) created: string @ApiProperty({ description: 'Deprecated: The name of the sandbox', example: 'MySandbox', deprecated: true, default: '', }) name: string @ApiPropertyOptional({ description: 'Additional metadata provided by the provider', example: '{"key": "value"}', required: false, }) @IsOptional() providerMetadata?: string } @ApiSchema({ name: 'Workspace' }) export class WorkspaceDto extends SandboxDto { @ApiPropertyOptional({ description: 'The image used for the workspace', example: 'daytonaio/workspace:latest', }) image: string @ApiPropertyOptional({ description: 'The state of the snapshot', enum: SnapshotState, example: Object.values(SnapshotState)[0], required: false, }) @IsEnum(SnapshotState) snapshotState?: SnapshotState @ApiPropertyOptional({ description: 'The creation timestamp of the last snapshot', example: '2024-10-01T12:00:00Z', required: false, }) snapshotCreatedAt?: string @ApiPropertyOptional({ description: 'Additional information about the sandbox', type: SandboxInfoDto, required: false, }) @IsOptional() info?: SandboxInfoDto constructor() { super() } static fromSandbox(sandbox: Sandbox): WorkspaceDto { // Send empty string for toolboxProxyUrl as it is not needed in deprecated DTO const dto = super.fromSandbox(sandbox, '') return this.fromSandboxDto(dto) } static fromSandboxDto(sandboxDto: SandboxDto): WorkspaceDto { return { ...sandboxDto, image: sandboxDto.snapshot, snapshotState: sandboxDto.backupState, snapshotCreatedAt: sandboxDto.backupCreatedAt, info: { name: sandboxDto.name, created: sandboxDto.createdAt, providerMetadata: JSON.stringify({ state: sandboxDto.state, region: sandboxDto.target, class: sandboxDto.class, updatedAt: sandboxDto.updatedAt, lastSnapshot: sandboxDto.backupCreatedAt, cpu: sandboxDto.cpu, gpu: sandboxDto.gpu, memory: sandboxDto.memory, disk: sandboxDto.disk, autoStopInterval: sandboxDto.autoStopInterval, autoArchiveInterval: sandboxDto.autoArchiveInterval, daemonVersion: sandboxDto.daemonVersion, }), }, } } } ================================================ FILE: apps/api/src/sandbox/entities/build-info.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, OneToMany, PrimaryColumn, UpdateDateColumn, BeforeInsert } from 'typeorm' import { Snapshot } from './snapshot.entity' import { Sandbox } from './sandbox.entity' import { createHash } from 'crypto' export function generateBuildInfoHash(dockerfileContent: string, contextHashes: string[] = []): string { const sortedContextHashes = [...contextHashes].sort() || [] const combined = dockerfileContent + sortedContextHashes.join('') const hash = createHash('sha256').update(combined).digest('hex') return 'daytona-' + hash + ':daytona' } @Entity() export class BuildInfo { @PrimaryColumn() snapshotRef: string @Column({ type: 'text', nullable: true }) dockerfileContent?: string @Column('simple-array', { nullable: true }) contextHashes?: string[] @OneToMany(() => Snapshot, (snapshot) => snapshot.buildInfo) snapshots: Snapshot[] @OneToMany(() => Sandbox, (sandbox) => sandbox.buildInfo) sandboxes: Sandbox[] @Column({ type: 'timestamp with time zone', default: () => 'CURRENT_TIMESTAMP' }) lastUsedAt: Date @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date @BeforeInsert() generateHash() { this.snapshotRef = generateBuildInfoHash(this.dockerfileContent, this.contextHashes) } } ================================================ FILE: apps/api/src/sandbox/entities/job.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Check, Column, CreateDateColumn, Entity, Index, PrimaryGeneratedColumn, UpdateDateColumn, VersionColumn, } from 'typeorm' import { JobStatus } from '../enums/job-status.enum' import { JobType } from '../enums/job-type.enum' import { ResourceType } from '../enums/resource-type.enum' import { v4 } from 'uuid' @Entity() @Index(['runnerId', 'status']) @Index(['status', 'createdAt']) @Index(['resourceType', 'resourceId']) @Index('IDX_UNIQUE_INCOMPLETE_JOB', ['resourceType', 'resourceId', 'runnerId'], { unique: true, where: '"completedAt" IS NULL', }) // FIXME: Add this once https://github.com/typeorm/typeorm/issues/11714 is resolved // @Check( // 'VALIDATE_JOB_TYPE', // `"type" IN (${Object.values(JobType) // .map((v) => `'${v}'`) // .join(', ')})`, // ) export class Job { @PrimaryGeneratedColumn('uuid') id: string @VersionColumn() version: number @Column({ type: 'character varying', }) type: JobType @Column({ type: 'enum', enum: JobStatus, default: JobStatus.PENDING, }) status: JobStatus @Column() runnerId: string @Column({ type: 'enum', enum: ResourceType, }) resourceType: ResourceType @Column() resourceId: string @Column({ nullable: true, }) payload: string | null @Column({ nullable: true, }) resultMetadata: string | null @Column({ type: 'jsonb', nullable: true, }) traceContext: Record | null @Column({ nullable: true, type: 'text', }) errorMessage: string | null @Column({ nullable: true, type: 'timestamp with time zone', }) startedAt: Date | null @Column({ nullable: true, type: 'timestamp with time zone', }) completedAt: Date | null @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date constructor(params: { id?: string type: JobType status?: JobStatus runnerId: string resourceType: ResourceType resourceId: string payload?: string | null traceContext?: Record | null errorMessage?: string | null startedAt?: Date | null completedAt?: Date | null }) { this.id = params.id || v4() this.version = 1 this.type = params.type this.status = params.status || JobStatus.PENDING this.runnerId = params.runnerId this.resourceType = params.resourceType this.resourceId = params.resourceId this.payload = params.payload || null this.traceContext = params.traceContext || null this.errorMessage = params.errorMessage || null this.startedAt = params.startedAt || null this.completedAt = params.completedAt || null this.createdAt = new Date() this.updatedAt = new Date() } getResultMetadata(): Record | null { if (!this.resultMetadata) { return null } try { return JSON.parse(this.resultMetadata) } catch { return null } } } ================================================ FILE: apps/api/src/sandbox/entities/runner.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, Index, PrimaryGeneratedColumn, Unique, UpdateDateColumn } from 'typeorm' import { SandboxClass } from '../enums/sandbox-class.enum' import { RunnerState } from '../enums/runner-state.enum' import { RunnerServiceInfo } from '../common/runner-service-info' @Entity() @Unique(['region', 'name']) @Index(['state', 'unschedulable', 'region']) export class Runner { @PrimaryGeneratedColumn('uuid') id: string @Column({ nullable: true, }) domain: string | null @Column({ nullable: true, }) apiUrl: string | null @Column({ nullable: true, }) proxyUrl: string | null @Column() apiKey: string @Column({ type: 'float', default: 0, }) cpu: number @Column({ type: 'float', default: 0, }) memoryGiB: number @Column({ type: 'float', default: 0, }) diskGiB: number @Column({ nullable: true, }) gpu: number | null @Column({ nullable: true, }) gpuType: string | null @Column({ type: 'enum', enum: SandboxClass, default: SandboxClass.SMALL, }) class: SandboxClass @Column({ type: 'float', default: 0, }) currentCpuLoadAverage: number @Column({ type: 'float', default: 0, }) currentCpuUsagePercentage: number @Column({ type: 'float', default: 0, }) currentMemoryUsagePercentage: number @Column({ type: 'float', default: 0, }) currentDiskUsagePercentage: number @Column({ type: 'float', default: 0, }) currentAllocatedCpu: number @Column({ type: 'float', default: 0, }) currentAllocatedMemoryGiB: number @Column({ type: 'float', default: 0, }) currentAllocatedDiskGiB: number @Column({ default: 0, }) currentSnapshotCount: number @Column({ default: 0, }) currentStartedSandboxes: number @Column({ default: 0, }) availabilityScore: number @Column() region: string @Column() name: string @Column({ type: 'enum', enum: RunnerState, default: RunnerState.INITIALIZING, }) state: RunnerState @Column({ default: 'v0.0.0-dev', nullable: true, }) appVersion: string | null @Column({ default: '0', }) apiVersion: string @Column({ nullable: true, type: 'timestamp with time zone', }) lastChecked: Date @Column({ default: false, }) unschedulable: boolean @Column({ default: false, }) draining: boolean @Column({ type: 'jsonb', nullable: true, default: null, }) serviceHealth: RunnerServiceInfo[] | null @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date constructor(params: { region: string name: string apiKey: string apiVersion: string cpu?: number memoryGiB?: number diskGiB?: number domain?: string | null apiUrl?: string proxyUrl?: string appVersion?: string | null }) { this.region = params.region this.name = params.name this.apiKey = params.apiKey this.cpu = params.cpu ?? 0 this.memoryGiB = params.memoryGiB ?? 0 this.diskGiB = params.diskGiB ?? 0 this.domain = params.domain ?? null this.apiUrl = params.apiUrl this.proxyUrl = params.proxyUrl this.class = SandboxClass.SMALL this.apiVersion = params.apiVersion this.appVersion = params.appVersion ?? null this.gpu = null this.gpuType = null if (this.apiVersion === '0') { if (!this.apiUrl) { throw new Error('API URL is required for runner version 0') } if (!this.proxyUrl) { this.proxyUrl = this.apiUrl } } } } ================================================ FILE: apps/api/src/sandbox/entities/sandbox.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, Index, JoinColumn, ManyToOne, PrimaryGeneratedColumn, Unique, UpdateDateColumn, } from 'typeorm' import { SandboxState } from '../enums/sandbox-state.enum' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { SandboxClass } from '../enums/sandbox-class.enum' import { BackupState } from '../enums/backup-state.enum' import { v4 as uuidv4 } from 'uuid' import { SandboxVolume } from '../dto/sandbox.dto' import { BuildInfo } from './build-info.entity' import { nanoid } from 'nanoid' @Entity() @Unique(['organizationId', 'name']) @Index('sandbox_state_idx', ['state']) @Index('sandbox_desiredstate_idx', ['desiredState']) @Index('sandbox_snapshot_idx', ['snapshot']) @Index('sandbox_runnerid_idx', ['runnerId']) @Index('sandbox_runner_state_idx', ['runnerId', 'state']) @Index('sandbox_organizationid_idx', ['organizationId']) @Index('sandbox_region_idx', ['region']) @Index('sandbox_resources_idx', ['cpu', 'mem', 'disk', 'gpu']) @Index('sandbox_backupstate_idx', ['backupState']) @Index('sandbox_runner_state_desired_idx', ['runnerId', 'state', 'desiredState'], { where: '"pending" = false', }) @Index('sandbox_active_only_idx', ['id'], { where: `"state" <> ALL (ARRAY['destroyed'::sandbox_state_enum, 'archived'::sandbox_state_enum])`, }) @Index('sandbox_pending_idx', ['id'], { where: `"pending" = true`, }) @Index('idx_sandbox_authtoken', ['authToken']) @Index('sandbox_labels_gin_full_idx', { synchronize: false }) export class Sandbox { @PrimaryGeneratedColumn('uuid') id: string @Column({ type: 'uuid', }) organizationId: string @Column() name: string @Column() region: string @Column({ type: 'uuid', nullable: true, }) runnerId?: string // this is the runnerId of the runner that was previously assigned to the sandbox // if something goes wrong with new runner assignment, we can revert to the previous runner @Column({ type: 'uuid', nullable: true, }) prevRunnerId?: string @Column({ type: 'enum', enum: SandboxClass, default: SandboxClass.SMALL, }) class = SandboxClass.SMALL @Column({ type: 'enum', enum: SandboxState, default: SandboxState.UNKNOWN, }) state = SandboxState.UNKNOWN @Column({ type: 'enum', enum: SandboxDesiredState, default: SandboxDesiredState.STARTED, }) desiredState = SandboxDesiredState.STARTED @Column({ nullable: true }) snapshot?: string @Column() osUser: string @Column({ nullable: true }) errorReason?: string @Column({ default: false, type: 'boolean' }) recoverable = false @Column({ type: 'jsonb', default: {}, }) env: { [key: string]: string } = {} @Column({ default: false, type: 'boolean' }) public = false @Column({ default: false, type: 'boolean' }) networkBlockAll = false @Column({ nullable: true }) networkAllowList?: string @Column('jsonb', { nullable: true }) labels: { [key: string]: string } @Column({ nullable: true }) backupRegistryId: string | null @Column({ nullable: true }) backupSnapshot: string | null @Column({ nullable: true, type: 'timestamp with time zone' }) lastBackupAt: Date | null @Column({ type: 'enum', enum: BackupState, default: BackupState.NONE, }) backupState = BackupState.NONE @Column({ type: 'text', nullable: true, }) backupErrorReason: string | null @Column({ type: 'jsonb', default: [], }) existingBackupSnapshots: Array<{ snapshotName: string createdAt: Date }> = [] @Column({ type: 'int', default: 2 }) cpu = 2 @Column({ type: 'int', default: 0 }) gpu = 0 @Column({ type: 'int', default: 4 }) mem = 4 @Column({ type: 'int', default: 10 }) disk = 10 @Column({ type: 'jsonb', default: [], }) volumes: SandboxVolume[] = [] @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date @Column({ nullable: true, type: 'timestamp with time zone' }) lastActivityAt?: Date // this is the interval in minutes after which the sandbox will be stopped if lastActivityAt is not updated // if set to 0, auto stop will be disabled @Column({ default: 15, type: 'int' }) autoStopInterval: number | undefined = 15 // this is the interval in minutes after which a continuously stopped workspace will be automatically archived @Column({ default: 7 * 24 * 60, type: 'int' }) autoArchiveInterval: number | undefined = 7 * 24 * 60 // this is the interval in minutes after which a continuously stopped workspace will be automatically deleted // if set to negative value, auto delete will be disabled // if set to 0, sandbox will be immediately deleted upon stopping @Column({ default: -1, type: 'int' }) autoDeleteInterval: number | undefined = -1 @Column({ default: false, type: 'boolean' }) pending: boolean | undefined = false @Column({ default: () => 'MD5(random()::text)', type: 'text' }) authToken = nanoid(32).toLocaleLowerCase() @ManyToOne(() => BuildInfo, (buildInfo) => buildInfo.sandboxes, { nullable: true, }) @JoinColumn() buildInfo?: BuildInfo @Column({ nullable: true }) daemonVersion?: string constructor(region: string, name?: string) { this.id = uuidv4() // Set name - use provided name or fallback to ID this.name = name || this.id this.region = region } /** * Helper method that returns the update data needed for a backup state update. */ static getBackupStateUpdate( sandbox: Sandbox, backupState: BackupState, backupSnapshot?: string | null, backupRegistryId?: string | null, backupErrorReason?: string | null, ): Partial { const update: Partial = { backupState, } switch (backupState) { case BackupState.NONE: update.backupSnapshot = null break case BackupState.COMPLETED: { const now = new Date() update.lastBackupAt = now update.existingBackupSnapshots = [ ...sandbox.existingBackupSnapshots, { snapshotName: sandbox.backupSnapshot, createdAt: now, }, ] update.backupErrorReason = null break } } if (backupSnapshot !== undefined) { update.backupSnapshot = backupSnapshot } if (backupRegistryId !== undefined) { update.backupRegistryId = backupRegistryId } if (backupErrorReason !== undefined) { update.backupErrorReason = backupErrorReason } return update } /** * Helper method that returns the update data needed for a soft delete operation. */ static getSoftDeleteUpdate(sandbox: Sandbox): Partial { return { pending: true, desiredState: SandboxDesiredState.DESTROYED, backupState: BackupState.NONE, name: 'DESTROYED_' + sandbox.name + '_' + Date.now(), } } /** * Asserts that the current entity state is valid. */ assertValid(): void { this.validateDesiredStateTransition() } private validateDesiredStateTransition(): void { switch (this.desiredState) { case SandboxDesiredState.STARTED: if ( [ SandboxState.STARTED, SandboxState.STOPPED, SandboxState.STARTING, SandboxState.ARCHIVED, SandboxState.CREATING, SandboxState.UNKNOWN, SandboxState.RESTORING, SandboxState.PENDING_BUILD, SandboxState.BUILDING_SNAPSHOT, SandboxState.PULLING_SNAPSHOT, SandboxState.ARCHIVING, SandboxState.ERROR, SandboxState.BUILD_FAILED, SandboxState.RESIZING, ].includes(this.state) ) { break } throw new Error(`Sandbox ${this.id} is not in a valid state to be started. State: ${this.state}`) case SandboxDesiredState.STOPPED: if ( [ SandboxState.STARTED, SandboxState.STOPPING, SandboxState.STOPPED, SandboxState.ERROR, SandboxState.BUILD_FAILED, SandboxState.RESIZING, ].includes(this.state) ) { break } throw new Error(`Sandbox ${this.id} is not in a valid state to be stopped. State: ${this.state}`) case SandboxDesiredState.ARCHIVED: if ( [ SandboxState.ARCHIVED, SandboxState.ARCHIVING, SandboxState.STOPPED, SandboxState.ERROR, SandboxState.BUILD_FAILED, ].includes(this.state) ) { break } throw new Error(`Sandbox ${this.id} is not in a valid state to be archived. State: ${this.state}`) case SandboxDesiredState.DESTROYED: if ( [ SandboxState.DESTROYED, SandboxState.DESTROYING, SandboxState.STOPPED, SandboxState.STARTED, SandboxState.ARCHIVED, SandboxState.ERROR, SandboxState.BUILD_FAILED, SandboxState.ARCHIVING, SandboxState.PENDING_BUILD, ].includes(this.state) ) { break } throw new Error(`Sandbox ${this.id} is not in a valid state to be destroyed. State: ${this.state}`) } } /** * Enforces domain invariants on the current entity state. * * @returns Additional field changes that invariant enforcement produced. */ enforceInvariants(): Partial { const changes = this.getInvariantChanges() Object.assign(this, changes) return changes } private getInvariantChanges(): Partial { const changes: Partial = {} if (!this.pending && String(this.state) !== String(this.desiredState)) { changes.pending = true } if (this.pending && String(this.state) === String(this.desiredState)) { changes.pending = false } if ( this.state === SandboxState.ERROR || this.state === SandboxState.BUILD_FAILED || this.desiredState === SandboxDesiredState.ARCHIVED ) { changes.pending = false } if (this.state === SandboxState.DESTROYED || this.state === SandboxState.ARCHIVED) { changes.runnerId = null } if (this.state === SandboxState.DESTROYED) { changes.backupState = BackupState.NONE } return changes } } ================================================ FILE: apps/api/src/sandbox/entities/snapshot-region.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CreateDateColumn, Entity, JoinColumn, ManyToOne, PrimaryColumn, UpdateDateColumn } from 'typeorm' import { Snapshot } from './snapshot.entity' import { Region } from '../../region/entities/region.entity' @Entity() export class SnapshotRegion { @PrimaryColumn('uuid') snapshotId: string @PrimaryColumn() regionId: string @ManyToOne(() => Snapshot, (snapshot) => snapshot.snapshotRegions, { onDelete: 'CASCADE', }) @JoinColumn({ name: 'snapshotId' }) snapshot: Snapshot @ManyToOne(() => Region, { onDelete: 'CASCADE', }) @JoinColumn({ name: 'regionId' }) region: Region @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/sandbox/entities/snapshot-runner.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, Index, PrimaryGeneratedColumn, UpdateDateColumn } from 'typeorm' import { SnapshotRunnerState } from '../enums/snapshot-runner-state.enum' @Entity() @Index('snapshot_runner_snapshotref_idx', ['snapshotRef']) @Index('snapshot_runner_runnerid_snapshotref_idx', ['runnerId', 'snapshotRef']) @Index('snapshot_runner_runnerid_idx', ['runnerId']) @Index('snapshot_runner_state_idx', ['state']) export class SnapshotRunner { @PrimaryGeneratedColumn('uuid') id: string @Column({ type: 'enum', enum: SnapshotRunnerState, default: SnapshotRunnerState.PULLING_SNAPSHOT, }) state: SnapshotRunnerState @Column({ nullable: true }) errorReason?: string @Column({ // todo: remove default default: '', }) snapshotRef: string @Column() runnerId: string @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/sandbox/entities/snapshot.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, Index, JoinColumn, ManyToOne, OneToMany, PrimaryGeneratedColumn, Unique, UpdateDateColumn, } from 'typeorm' import { SnapshotRunner } from './snapshot-runner.entity' import { SnapshotState } from '../enums/snapshot-state.enum' import { BuildInfo } from './build-info.entity' import { SnapshotRegion } from './snapshot-region.entity' @Entity() @Unique(['organizationId', 'name']) @Index('snapshot_name_idx', ['name']) @Index('snapshot_state_idx', ['state']) export class Snapshot { @PrimaryGeneratedColumn('uuid') id: string @Column({ nullable: true, type: 'uuid', }) organizationId?: string // general snapshot is available to all organizations @Column({ type: 'boolean', default: false, }) general = false @Column() name: string @Column() imageName: string @Column({ nullable: true }) ref?: string @Column({ type: 'enum', enum: SnapshotState, default: SnapshotState.PENDING, }) state = SnapshotState.PENDING @Column({ nullable: true }) errorReason?: string @Column({ type: 'float', nullable: true }) size?: number @Column({ type: 'int', default: 1 }) cpu = 1 @Column({ type: 'int', default: 0 }) gpu = 0 @Column({ type: 'int', default: 1 }) mem = 1 @Column({ type: 'int', default: 3 }) disk = 3 @Column({ type: 'boolean', default: false }) hideFromUsers = false @OneToMany(() => SnapshotRunner, (runner) => runner.snapshotRef) runners: SnapshotRunner[] @Column({ array: true, type: 'text', nullable: true }) entrypoint?: string[] @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date @Column({ nullable: true }) lastUsedAt?: Date @ManyToOne(() => BuildInfo, (buildInfo) => buildInfo.snapshots, { nullable: true, eager: true, }) @JoinColumn() buildInfo?: BuildInfo @Column({ nullable: true }) initialRunnerId?: string @OneToMany(() => SnapshotRegion, (snapshotRegion) => snapshotRegion.snapshot, { cascade: true, onDelete: 'CASCADE', }) snapshotRegions: SnapshotRegion[] } ================================================ FILE: apps/api/src/sandbox/entities/ssh-access.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, Generated, JoinColumn, ManyToOne, PrimaryColumn, UpdateDateColumn, } from 'typeorm' import { Sandbox } from './sandbox.entity' @Entity() export class SshAccess { @PrimaryColumn() @Generated('uuid') id: string @Column({ type: 'uuid', }) sandboxId: string @Column({ type: 'text', }) token: string @Column({ type: 'timestamp', }) expiresAt: Date @CreateDateColumn() createdAt: Date @UpdateDateColumn() updatedAt: Date @ManyToOne(() => Sandbox, { onDelete: 'CASCADE' }) @JoinColumn({ name: 'sandboxId' }) sandbox: Sandbox } ================================================ FILE: apps/api/src/sandbox/entities/volume.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, PrimaryGeneratedColumn, Unique, UpdateDateColumn } from 'typeorm' import { VolumeState } from '../enums/volume-state.enum' @Entity() @Unique(['organizationId', 'name']) export class Volume { @PrimaryGeneratedColumn('uuid') id: string @Column({ nullable: true, type: 'uuid', }) organizationId?: string @Column() name: string @Column({ type: 'enum', enum: VolumeState, default: VolumeState.PENDING_CREATE, }) state: VolumeState @Column({ nullable: true }) errorReason?: string @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date @Column({ nullable: true }) lastUsedAt?: Date public getBucketName(): string { return `daytona-volume-${this.id}` } } ================================================ FILE: apps/api/src/sandbox/entities/warm-pool.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, Index, PrimaryGeneratedColumn, UpdateDateColumn } from 'typeorm' import { SandboxClass } from '../enums/sandbox-class.enum' @Entity() @Index('warm_pool_find_idx', ['snapshot', 'target', 'class', 'cpu', 'mem', 'disk', 'gpu', 'osUser', 'env']) export class WarmPool { @PrimaryGeneratedColumn('uuid') id: string @Column() pool: number @Column() snapshot: string @Column() target: string @Column() cpu: number @Column() mem: number @Column() disk: number @Column() gpu: number @Column() gpuType: string @Column({ type: 'enum', enum: SandboxClass, default: SandboxClass.SMALL, }) class: SandboxClass @Column() osUser: string @Column({ nullable: true }) errorReason?: string @Column({ type: 'simple-json', default: {}, }) env: { [key: string]: string } @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/sandbox/enums/backup-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum BackupState { NONE = 'None', PENDING = 'Pending', IN_PROGRESS = 'InProgress', COMPLETED = 'Completed', ERROR = 'Error', } ================================================ FILE: apps/api/src/sandbox/enums/job-status.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum JobStatus { PENDING = 'PENDING', IN_PROGRESS = 'IN_PROGRESS', COMPLETED = 'COMPLETED', FAILED = 'FAILED', } ================================================ FILE: apps/api/src/sandbox/enums/job-type.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum JobType { CREATE_SANDBOX = 'CREATE_SANDBOX', START_SANDBOX = 'START_SANDBOX', STOP_SANDBOX = 'STOP_SANDBOX', DESTROY_SANDBOX = 'DESTROY_SANDBOX', RESIZE_SANDBOX = 'RESIZE_SANDBOX', CREATE_BACKUP = 'CREATE_BACKUP', BUILD_SNAPSHOT = 'BUILD_SNAPSHOT', PULL_SNAPSHOT = 'PULL_SNAPSHOT', RECOVER_SANDBOX = 'RECOVER_SANDBOX', INSPECT_SNAPSHOT_IN_REGISTRY = 'INSPECT_SNAPSHOT_IN_REGISTRY', REMOVE_SNAPSHOT = 'REMOVE_SNAPSHOT', UPDATE_SANDBOX_NETWORK_SETTINGS = 'UPDATE_SANDBOX_NETWORK_SETTINGS', } ================================================ FILE: apps/api/src/sandbox/enums/resource-type.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum ResourceType { SANDBOX = 'SANDBOX', SNAPSHOT = 'SNAPSHOT', BACKUP = 'BACKUP', } ================================================ FILE: apps/api/src/sandbox/enums/runner-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum RunnerState { INITIALIZING = 'initializing', READY = 'ready', DISABLED = 'disabled', DECOMMISSIONED = 'decommissioned', UNRESPONSIVE = 'unresponsive', } ================================================ FILE: apps/api/src/sandbox/enums/sandbox-class.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum SandboxClass { SMALL = 'small', MEDIUM = 'medium', LARGE = 'large', } export const SandboxClassData = { [SandboxClass.SMALL]: { cpu: 4, memory: 8, disk: 30, }, [SandboxClass.MEDIUM]: { cpu: 8, memory: 16, disk: 60, }, [SandboxClass.LARGE]: { cpu: 12, memory: 24, disk: 90, }, } ================================================ FILE: apps/api/src/sandbox/enums/sandbox-desired-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum SandboxDesiredState { DESTROYED = 'destroyed', STARTED = 'started', STOPPED = 'stopped', RESIZED = 'resized', ARCHIVED = 'archived', } ================================================ FILE: apps/api/src/sandbox/enums/sandbox-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum SandboxState { CREATING = 'creating', RESTORING = 'restoring', DESTROYED = 'destroyed', DESTROYING = 'destroying', STARTED = 'started', STOPPED = 'stopped', STARTING = 'starting', STOPPING = 'stopping', ERROR = 'error', BUILD_FAILED = 'build_failed', PENDING_BUILD = 'pending_build', BUILDING_SNAPSHOT = 'building_snapshot', UNKNOWN = 'unknown', PULLING_SNAPSHOT = 'pulling_snapshot', ARCHIVED = 'archived', ARCHIVING = 'archiving', RESIZING = 'resizing', } ================================================ FILE: apps/api/src/sandbox/enums/snapshot-runner-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum SnapshotRunnerState { PULLING_SNAPSHOT = 'pulling_snapshot', BUILDING_SNAPSHOT = 'building_snapshot', READY = 'ready', ERROR = 'error', REMOVING = 'removing', } ================================================ FILE: apps/api/src/sandbox/enums/snapshot-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum SnapshotState { BUILDING = 'building', PENDING = 'pending', PULLING = 'pulling', ACTIVE = 'active', INACTIVE = 'inactive', ERROR = 'error', BUILD_FAILED = 'build_failed', REMOVING = 'removing', } ================================================ FILE: apps/api/src/sandbox/enums/volume-state.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum VolumeState { CREATING = 'creating', READY = 'ready', PENDING_CREATE = 'pending_create', PENDING_DELETE = 'pending_delete', DELETING = 'deleting', DELETED = 'deleted', ERROR = 'error', } ================================================ FILE: apps/api/src/sandbox/errors/runner-api-error.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class RunnerApiError extends Error { constructor( message: string, public readonly statusCode?: number, public readonly code?: string, ) { super(message) this.name = 'RunnerApiError' } } ================================================ FILE: apps/api/src/sandbox/errors/runner-not-ready.error.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class RunnerNotReadyError extends Error { constructor(message: string) { super(message) this.name = 'RunnerNotReadyError' } } ================================================ FILE: apps/api/src/sandbox/errors/snapshot-state-error.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class SnapshotStateError extends Error { constructor(public readonly errorReason: string) { super(errorReason) this.name = 'SnapshotStateError' } } ================================================ FILE: apps/api/src/sandbox/events/runner-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Runner } from '../entities/runner.entity' export class RunnerCreatedEvent { constructor(public readonly runner: Runner) {} } ================================================ FILE: apps/api/src/sandbox/events/runner-deleted.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm/entity-manager/EntityManager.js' export class RunnerDeletedEvent { constructor( public readonly entityManager: EntityManager, public readonly runnerId: string, ) {} } ================================================ FILE: apps/api/src/sandbox/events/runner-state-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Runner } from '../entities/runner.entity' import { RunnerState } from '../enums/runner-state.enum' export class RunnerStateUpdatedEvent { constructor( public readonly runner: Runner, public readonly oldState: RunnerState, public readonly newState: RunnerState, ) {} } ================================================ FILE: apps/api/src/sandbox/events/runner-unschedulable-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Runner } from '../entities/runner.entity' export class RunnerUnschedulableUpdatedEvent { constructor( public readonly runner: Runner, public readonly oldUnschedulable: boolean, public readonly newUnschedulable: boolean, ) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-archived.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxArchivedEvent { constructor(public readonly sandbox: Sandbox) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-backup-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxBackupCreatedEvent { constructor(public readonly sandbox: Sandbox) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-create.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxCreatedEvent { constructor(public readonly sandbox: Sandbox) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-desired-state-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' export class SandboxDesiredStateUpdatedEvent { constructor( public readonly sandbox: Sandbox, public readonly oldDesiredState: SandboxDesiredState, public readonly newDesiredState: SandboxDesiredState, ) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-destroyed.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxDestroyedEvent { constructor(public readonly sandbox: Sandbox) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-organization-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxOrganizationUpdatedEvent { constructor( public readonly sandbox: Sandbox, public readonly oldOrganizationId: string, public readonly newOrganizationId: string, ) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-public-status-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxPublicStatusUpdatedEvent { constructor( public readonly sandbox: Sandbox, public readonly oldStatus: boolean, public readonly newStatus: boolean, ) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-started.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxStartedEvent { constructor(public readonly sandbox: Sandbox) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-state-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' import { SandboxState } from '../enums/sandbox-state.enum' export class SandboxStateUpdatedEvent { constructor( public readonly sandbox: Sandbox, public readonly oldState: SandboxState, public readonly newState: SandboxState, ) {} } ================================================ FILE: apps/api/src/sandbox/events/sandbox-stopped.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox } from '../entities/sandbox.entity' export class SandboxStoppedEvent { constructor(public readonly sandbox: Sandbox) {} } ================================================ FILE: apps/api/src/sandbox/events/snapshot-activated.event.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Snapshot } from '../entities/snapshot.entity' export class SnapshotActivatedEvent { constructor(public readonly snapshot: Snapshot) {} } ================================================ FILE: apps/api/src/sandbox/events/snapshot-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Snapshot } from '../entities/snapshot.entity' export class SnapshotCreatedEvent { constructor(public readonly snapshot: Snapshot) {} } ================================================ FILE: apps/api/src/sandbox/events/snapshot-removed.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Snapshot } from '../entities/snapshot.entity' export class SnapshotRemovedEvent { constructor(public readonly snapshot: Snapshot) {} } ================================================ FILE: apps/api/src/sandbox/events/snapshot-state-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Snapshot } from '../entities/snapshot.entity' import { SnapshotState } from '../enums/snapshot-state.enum' export class SnapshotStateUpdatedEvent { constructor( public readonly snapshot: Snapshot, public readonly oldState: SnapshotState, public readonly newState: SnapshotState, ) {} } ================================================ FILE: apps/api/src/sandbox/events/volume-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Volume } from '../entities/volume.entity' export class VolumeCreatedEvent { constructor(public readonly volume: Volume) {} } ================================================ FILE: apps/api/src/sandbox/events/volume-last-used-at-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Volume } from '../entities/volume.entity' export class VolumeLastUsedAtUpdatedEvent { constructor(public readonly volume: Volume) {} } ================================================ FILE: apps/api/src/sandbox/events/volume-state-updated.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Volume } from '../entities/volume.entity' import { VolumeState } from '../enums/volume-state.enum' export class VolumeStateUpdatedEvent { constructor( public readonly volume: Volume, public readonly oldState: VolumeState, public readonly newState: VolumeState, ) {} } ================================================ FILE: apps/api/src/sandbox/events/warmpool-topup-requested.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { WarmPool } from '../entities/warm-pool.entity' export class WarmPoolTopUpRequested { constructor(public readonly warmPool: WarmPool) {} } ================================================ FILE: apps/api/src/sandbox/guards/job-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException, Logger, } from '@nestjs/common' import { BaseAuthContext } from '../../common/interfaces/auth-context.interface' import { JobService } from '../services/job.service' import { isRunnerContext, RunnerContext } from '../../common/interfaces/runner-context.interface' @Injectable() export class JobAccessGuard implements CanActivate { private readonly logger = new Logger(JobAccessGuard.name) constructor(private readonly jobService: JobService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const jobId: string = request.params.jobId || request.params.id // TODO: initialize authContext safely const authContext: BaseAuthContext = request.user try { const job = await this.jobService.findOne(jobId) if (!job) { throw new NotFoundException('Job not found') } if (!isRunnerContext(authContext)) { throw new ForbiddenException('User is not a runner') } const runnerContext = authContext as RunnerContext if (runnerContext.runnerId !== job.runnerId) { throw new ForbiddenException('Runner ID does not match job runner ID') } return true } catch (error) { if (!(error instanceof NotFoundException)) { this.logger.error(error) } throw new NotFoundException('Job not found') } } } ================================================ FILE: apps/api/src/sandbox/guards/proxy.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger, CanActivate } from '@nestjs/common' import { getAuthContext } from '../../auth/get-auth-context' import { isProxyContext } from '../../common/interfaces/proxy-context.interface' @Injectable() export class ProxyGuard implements CanActivate { protected readonly logger = new Logger(ProxyGuard.name) async canActivate(context: ExecutionContext): Promise { // Throws if not proxy context getAuthContext(context, isProxyContext) return true } } ================================================ FILE: apps/api/src/sandbox/guards/region-runner-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException, Logger, } from '@nestjs/common' import { RunnerService } from '../services/runner.service' import { BaseAuthContext } from '../../common/interfaces/auth-context.interface' import { isRegionProxyContext, RegionProxyContext } from '../../common/interfaces/region-proxy.interface' import { isRegionSSHGatewayContext, RegionSSHGatewayContext, } from '../../common/interfaces/region-ssh-gateway.interface' @Injectable() export class RegionRunnerAccessGuard implements CanActivate { private readonly logger = new Logger(RegionRunnerAccessGuard.name) constructor(private readonly runnerService: RunnerService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const runnerId: string = request.params.runnerId || request.params.id const authContext: BaseAuthContext = request.user if (!isRegionProxyContext(authContext) && !isRegionSSHGatewayContext(authContext)) { return false } try { const regionContext = authContext as RegionProxyContext | RegionSSHGatewayContext const runner = await this.runnerService.findOneOrFail(runnerId) if (regionContext.regionId !== runner.region) { throw new ForbiddenException('Region ID does not match runner region ID') } return true } catch (error) { if (!(error instanceof NotFoundException)) { this.logger.error(error) } throw new NotFoundException('Runner not found') } } } ================================================ FILE: apps/api/src/sandbox/guards/region-sandbox-access.guard.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException } from '@nestjs/common' import { SandboxService } from '../services/sandbox.service' import { BaseAuthContext } from '../../common/interfaces/auth-context.interface' import { isRegionProxyContext, RegionProxyContext } from '../../common/interfaces/region-proxy.interface' import { isRegionSSHGatewayContext, RegionSSHGatewayContext, } from '../../common/interfaces/region-ssh-gateway.interface' @Injectable() export class RegionSandboxAccessGuard implements CanActivate { constructor(private readonly sandboxService: SandboxService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const sandboxId: string = request.params.sandboxId || request.params.id const authContext: BaseAuthContext = request.user if (!isRegionProxyContext(authContext) && !isRegionSSHGatewayContext(authContext)) { return false } try { const regionContext = authContext as RegionProxyContext | RegionSSHGatewayContext const sandboxRegionId = await this.sandboxService.getRegionId(sandboxId) if (sandboxRegionId !== regionContext.regionId) { throw new ForbiddenException(`Sandbox region ID does not match region ${regionContext.role} region ID`) } return true } catch (error) { if (!(error instanceof NotFoundException)) { console.error(error) } throw new NotFoundException(`Sandbox with ID or name ${sandboxId} not found`) } } } ================================================ FILE: apps/api/src/sandbox/guards/runner-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException, Logger, } from '@nestjs/common' import { RegionService } from '../../region/services/region.service' import { RunnerService } from '../services/runner.service' import { BaseAuthContext, OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { RegionType } from '../../region/enums/region-type.enum' import { isRegionProxyContext } from '../../common/interfaces/region-proxy.interface' import { isRegionSSHGatewayContext } from '../../common/interfaces/region-ssh-gateway.interface' import { isProxyContext } from '../../common/interfaces/proxy-context.interface' import { isSshGatewayContext } from '../../common/interfaces/ssh-gateway-context.interface' @Injectable() export class RunnerAccessGuard implements CanActivate { private readonly logger = new Logger(RunnerAccessGuard.name) constructor( private readonly runnerService: RunnerService, private readonly regionService: RegionService, ) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const runnerId: string = request.params.runnerId || request.params.id // TODO: initialize authContext safely const authContext: BaseAuthContext = request.user try { const runner = await this.runnerService.findOneOrFail(runnerId) switch (true) { case isRegionProxyContext(authContext): case isRegionSSHGatewayContext(authContext): { // Use RunnerRegionAccessGuard to check access instead return false } case isProxyContext(authContext): case isSshGatewayContext(authContext): return true default: { const orgAuthContext = authContext as OrganizationAuthContext if (orgAuthContext.role !== SystemRole.ADMIN) { const region = await this.regionService.findOne(runner.region) if (!region) { throw new NotFoundException('Region not found') } if (region.organizationId !== orgAuthContext.organizationId) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } if (region.regionType !== RegionType.CUSTOM) { throw new ForbiddenException('Runner is not in a custom region') } } return true } } } catch (error) { if (!(error instanceof NotFoundException)) { this.logger.error(error) } throw new NotFoundException('Runner not found') } } } ================================================ FILE: apps/api/src/sandbox/guards/sandbox-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, NotFoundException, ForbiddenException } from '@nestjs/common' import { SandboxService } from '../services/sandbox.service' import { OrganizationAuthContext, BaseAuthContext } from '../../common/interfaces/auth-context.interface' import { isRunnerContext, RunnerContext } from '../../common/interfaces/runner-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { isProxyContext } from '../../common/interfaces/proxy-context.interface' import { isSshGatewayContext } from '../../common/interfaces/ssh-gateway-context.interface' import { isRegionProxyContext } from '../../common/interfaces/region-proxy.interface' import { isRegionSSHGatewayContext } from '../../common/interfaces/region-ssh-gateway.interface' @Injectable() export class SandboxAccessGuard implements CanActivate { constructor(private readonly sandboxService: SandboxService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() // TODO: remove deprecated request.params.workspaceId param once we remove the deprecated workspace controller const sandboxIdOrName: string = request.params.sandboxIdOrName || request.params.sandboxId || request.params.id || request.params.workspaceId // TODO: initialize authContext safely const authContext: BaseAuthContext = request.user try { switch (true) { case isRunnerContext(authContext): { // For runner authentication, verify that the runner ID matches the sandbox's runner ID const runnerContext = authContext as RunnerContext const sandboxRunnerId = await this.sandboxService.getRunnerId(sandboxIdOrName) if (sandboxRunnerId !== runnerContext.runnerId) { throw new ForbiddenException('Runner ID does not match sandbox runner ID') } break } case isRegionProxyContext(authContext): case isRegionSSHGatewayContext(authContext): { // Use RegionSandboxAccessGuard to check access instead return false } case isProxyContext(authContext): case isSshGatewayContext(authContext): return true default: { // For user/organization authentication, check organization access const orgAuthContext = authContext as OrganizationAuthContext const sandboxOrganizationId = await this.sandboxService.getOrganizationId( sandboxIdOrName, orgAuthContext.organizationId, ) if (orgAuthContext.role !== SystemRole.ADMIN && sandboxOrganizationId !== orgAuthContext.organizationId) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } } } return true } catch (error) { if (!(error instanceof NotFoundException)) { console.error(error) } throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} not found`) } } } ================================================ FILE: apps/api/src/sandbox/guards/snapshot-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, ForbiddenException, NotFoundException } from '@nestjs/common' import { SnapshotService } from '../services/snapshot.service' import { BaseAuthContext, isOrganizationAuthContext, OrganizationAuthContext, } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { Snapshot } from '../entities/snapshot.entity' import { isSshGatewayContext } from '../../common/interfaces/ssh-gateway-context.interface' import { isProxyContext } from '../../common/interfaces/proxy-context.interface' import { isRegionProxyContext, RegionProxyContext } from '../../common/interfaces/region-proxy.interface' import { isRegionSSHGatewayContext, RegionSSHGatewayContext, } from '../../common/interfaces/region-ssh-gateway.interface' @Injectable() export class SnapshotAccessGuard implements CanActivate { constructor(private readonly snapshotService: SnapshotService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const snapshotId: string = request.params.snapshotId || request.params.id let snapshot: Snapshot // TODO: initialize authContext safely const authContext: BaseAuthContext = request.user try { snapshot = await this.snapshotService.getSnapshot(snapshotId) } catch { if (!isOrganizationAuthContext(authContext)) { throw new NotFoundException(`Snapshot with ID ${snapshotId} not found`) } // If not found by ID, try by name snapshot = await this.snapshotService.getSnapshotByName(snapshotId, authContext.organizationId) } try { switch (true) { case isRegionProxyContext(authContext): case isRegionSSHGatewayContext(authContext): { // For region proxy/ssh gateway authentication, verify that the runner's region ID matches the region ID const regionContext = authContext as RegionProxyContext | RegionSSHGatewayContext const isAvailable = await this.snapshotService.isAvailableInRegion(snapshot.id, regionContext.regionId) if (!isAvailable) { throw new NotFoundException(`Snapshot is not available in region ${regionContext.regionId}`) } break } case isProxyContext(authContext): case isSshGatewayContext(authContext): break default: { // For user/organization authentication, check organization access const orgAuthContext = authContext as OrganizationAuthContext if (orgAuthContext.role !== SystemRole.ADMIN && snapshot.organizationId !== orgAuthContext.organizationId) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } } } request.snapshot = snapshot return true } catch (error) { if (!(error instanceof NotFoundException)) { console.error(error) } throw new NotFoundException(`Snapshot with ID or name ${snapshotId} not found`) } } } ================================================ FILE: apps/api/src/sandbox/guards/snapshot-read-access.guard.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, ForbiddenException, NotFoundException } from '@nestjs/common' import { SnapshotService } from '../services/snapshot.service' import { BaseAuthContext, isOrganizationAuthContext, OrganizationAuthContext, } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { Snapshot } from '../entities/snapshot.entity' import { isSshGatewayContext } from '../../common/interfaces/ssh-gateway-context.interface' import { isProxyContext } from '../../common/interfaces/proxy-context.interface' import { isRegionProxyContext, RegionProxyContext } from '../../common/interfaces/region-proxy.interface' import { isRegionSSHGatewayContext, RegionSSHGatewayContext, } from '../../common/interfaces/region-ssh-gateway.interface' @Injectable() export class SnapshotReadAccessGuard implements CanActivate { constructor(private readonly snapshotService: SnapshotService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const snapshotId: string = request.params.snapshotId || request.params.id let snapshot: Snapshot const authContext: BaseAuthContext = request.user try { snapshot = await this.snapshotService.getSnapshot(snapshotId) } catch { if (!isOrganizationAuthContext(authContext)) { throw new NotFoundException(`Snapshot with ID ${snapshotId} not found`) } snapshot = await this.snapshotService.getSnapshotByName(snapshotId, authContext.organizationId) } try { switch (true) { case isRegionProxyContext(authContext): case isRegionSSHGatewayContext(authContext): { const regionContext = authContext as RegionProxyContext | RegionSSHGatewayContext const isAvailable = await this.snapshotService.isAvailableInRegion(snapshot.id, regionContext.regionId) if (!isAvailable) { throw new NotFoundException(`Snapshot is not available in region ${regionContext.regionId}`) } break } case isProxyContext(authContext): case isSshGatewayContext(authContext): break default: { const orgAuthContext = authContext as OrganizationAuthContext if ( orgAuthContext.role !== SystemRole.ADMIN && snapshot.organizationId !== orgAuthContext.organizationId && !snapshot.general ) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } } } request.snapshot = snapshot return true } catch (error) { if (!(error instanceof NotFoundException)) { console.error(error) } throw new NotFoundException(`Snapshot with ID or name ${snapshotId} not found`) } } } ================================================ FILE: apps/api/src/sandbox/guards/ssh-gateway.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, ExecutionContext, Logger, CanActivate } from '@nestjs/common' import { getAuthContext } from '../../auth/get-auth-context' import { isSshGatewayContext } from '../../common/interfaces/ssh-gateway-context.interface' @Injectable() export class SshGatewayGuard implements CanActivate { protected readonly logger = new Logger(SshGatewayGuard.name) async canActivate(context: ExecutionContext): Promise { // Throws if not ssh gateway context getAuthContext(context, isSshGatewayContext) return true } } ================================================ FILE: apps/api/src/sandbox/guards/volume-access.guard.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, CanActivate, ExecutionContext, ForbiddenException, NotFoundException } from '@nestjs/common' import { OrganizationAuthContext } from '../../common/interfaces/auth-context.interface' import { SystemRole } from '../../user/enums/system-role.enum' import { VolumeService } from '../services/volume.service' @Injectable() export class VolumeAccessGuard implements CanActivate { constructor(private readonly volumeService: VolumeService) {} async canActivate(context: ExecutionContext): Promise { const request = context.switchToHttp().getRequest() const volumeId = request.params.volumeId || request.params.id const volumeName = request.params.name if (!volumeId && !volumeName) { throw new NotFoundException(`Volume not found`) } const authContext: OrganizationAuthContext = request.user try { const params = volumeId ? { id: volumeId } : { name: volumeName, organizationId: authContext.organizationId } const volumeOrganizationId = await this.volumeService.getOrganizationId(params) if (authContext.role !== SystemRole.ADMIN && volumeOrganizationId !== authContext.organizationId) { throw new ForbiddenException('Request organization ID does not match resource organization ID') } } catch { throw new NotFoundException(`Volume with ${volumeId ? 'ID' : 'name'} ${volumeId || volumeName} not found`) } return true } } ================================================ FILE: apps/api/src/sandbox/managers/backup.manager.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnApplicationShutdown } from '@nestjs/common' import { Cron, CronExpression } from '@nestjs/schedule' import { In, IsNull, LessThan, Not, Or } from 'typeorm' import { Sandbox } from '../entities/sandbox.entity' import { SandboxState } from '../enums/sandbox-state.enum' import { RunnerService } from '../services/runner.service' import { RunnerState } from '../enums/runner-state.enum' import { BadRequestError } from '../../exceptions/bad-request.exception' import { DockerRegistryService } from '../../docker-registry/services/docker-registry.service' import { BackupState } from '../enums/backup-state.enum' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION } from '../constants/sandbox.constants' import { fromAxiosError } from '../../common/utils/from-axios-error' import { RedisLockProvider } from '../common/redis-lock.provider' import { OnEvent } from '@nestjs/event-emitter' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxDestroyedEvent } from '../events/sandbox-destroyed.event' import { SandboxBackupCreatedEvent } from '../events/sandbox-backup-created.event' import { SandboxArchivedEvent } from '../events/sandbox-archived.event' import { RunnerAdapterFactory } from '../runner-adapter/runnerAdapter' import { TypedConfigService } from '../../config/typed-config.service' import { TrackJobExecution } from '../../common/decorators/track-job-execution.decorator' import { TrackableJobExecutions } from '../../common/interfaces/trackable-job-executions' import { setTimeout } from 'timers/promises' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { DockerRegistry } from '../../docker-registry/entities/docker-registry.entity' import { SandboxService } from '../services/sandbox.service' import { SandboxRepository } from '../repositories/sandbox.repository' @Injectable() export class BackupManager implements TrackableJobExecutions, OnApplicationShutdown { activeJobs = new Set() private readonly logger = new Logger(BackupManager.name) constructor( private readonly sandboxRepository: SandboxRepository, private readonly sandboxService: SandboxService, private readonly runnerService: RunnerService, private readonly runnerAdapterFactory: RunnerAdapterFactory, private readonly dockerRegistryService: DockerRegistryService, @InjectRedis() private readonly redis: Redis, private readonly redisLockProvider: RedisLockProvider, private readonly configService: TypedConfigService, ) {} // on init async onApplicationBootstrap() { await this.adHocBackupCheck() } async onApplicationShutdown() { // wait for all active jobs to finish while (this.activeJobs.size > 0) { this.logger.log(`Waiting for ${this.activeJobs.size} active jobs to finish`) await setTimeout(1000) } } // todo: make frequency configurable or more efficient @Cron(CronExpression.EVERY_5_MINUTES, { name: 'ad-hoc-backup-check' }) @TrackJobExecution() @LogExecution('ad-hoc-backup-check') @WithInstrumentation() async adHocBackupCheck(): Promise { const lockKey = 'ad-hoc-backup-check' const hasLock = await this.redisLockProvider.lock(lockKey, 5 * 60) if (!hasLock) { return } // Get all ready runners const readyRunners = await this.runnerService.findAllReady() try { // Process all runners in parallel await Promise.all( readyRunners.map(async (runner) => { const sandboxes = await this.sandboxRepository.find({ where: { runnerId: runner.id, organizationId: Not(SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION), state: SandboxState.STARTED, backupState: In([BackupState.NONE, BackupState.COMPLETED]), lastBackupAt: Or(IsNull(), LessThan(new Date(Date.now() - 1 * 60 * 60 * 1000))), autoDeleteInterval: Not(0), }, order: { lastBackupAt: 'ASC', }, // todo: increase this number when backup is stable take: 10, }) await Promise.all( sandboxes.map(async (sandbox) => { const lockKey = `sandbox-backup-${sandbox.id}` const hasLock = await this.redisLockProvider.lock(lockKey, 60) if (!hasLock) { return } try { // todo: remove the catch handler asap await this.setBackupPending(sandbox).catch((error) => { if (error instanceof BadRequestError && error.message === 'A backup is already in progress') { return } this.logger.error(`Failed to create backup for sandbox ${sandbox.id}:`, fromAxiosError(error)) }) } catch (error) { this.logger.error(`Error processing stop state for sandbox ${sandbox.id}:`, fromAxiosError(error)) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) }), ) } catch (error) { this.logger.error(`Error processing backups: `, error) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'check-backup-states' }) @TrackJobExecution() @LogExecution('check-backup-states') @WithInstrumentation() async checkBackupStates(): Promise { // lock the sync to only run one instance at a time const lockKey = 'check-backup-states' const hasLock = await this.redisLockProvider.lock(lockKey, 10) if (!hasLock) { return } try { const sandboxes = await this.sandboxRepository .createQueryBuilder('sandbox') .innerJoin('runner', 'r', 'r.id = sandbox.runnerId') .where('sandbox.state IN (:...states)', { states: [SandboxState.ARCHIVING, SandboxState.STARTED, SandboxState.STOPPED], }) .andWhere('sandbox.backupState IN (:...backupStates)', { backupStates: [BackupState.PENDING, BackupState.IN_PROGRESS], }) .andWhere('r.state = :ready', { ready: RunnerState.READY }) // Prioritize manual archival action, then auto-archive poller, then ad-hoc backup poller .addSelect( ` CASE sandbox.state WHEN :archiving THEN 1 WHEN :stopped THEN 2 WHEN :started THEN 3 ELSE 999 END `, 'state_priority', ) .setParameters({ archiving: SandboxState.ARCHIVING, stopped: SandboxState.STOPPED, started: SandboxState.STARTED, }) .orderBy('state_priority', 'ASC') .addOrderBy('sandbox.lastBackupAt', 'ASC', 'NULLS FIRST') // Process sandboxes with no backups first .addOrderBy('sandbox.createdAt', 'ASC') // For equal lastBackupAt, process older sandboxes first .take(100) .getMany() await Promise.allSettled( sandboxes.map(async (s) => { const lockKey = `sandbox-backup-${s.id}` const hasLock = await this.redisLockProvider.lock(lockKey, 60) if (!hasLock) { return } try { // get the latest sandbox state const sandbox = await this.sandboxRepository.findOneByOrFail({ id: s.id, }) try { switch (sandbox.backupState) { case BackupState.PENDING: { await this.handlePendingBackup(sandbox) break } case BackupState.IN_PROGRESS: { await this.checkBackupProgress(sandbox) break } } } catch (error) { // if error, retry 10 times const errorRetryKey = `${lockKey}-error-retry` const errorRetryCount = await this.redis.get(errorRetryKey) if (!errorRetryCount) { await this.redis.setex(errorRetryKey, 300, '1') } else if (parseInt(errorRetryCount) > 10) { this.logger.error(`Error processing backup for sandbox ${sandbox.id}:`, fromAxiosError(error)) await this.sandboxService.updateSandboxBackupState( sandbox.id, BackupState.ERROR, undefined, undefined, fromAxiosError(error).message, ) } else { await this.redis.setex(errorRetryKey, 300, errorRetryCount + 1) } } } catch (error) { this.logger.error(`Error processing backup for sandbox ${s.id}:`, error) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) } catch (error) { this.logger.error(`Error processing backups: `, error) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'check-backup-states-errored-draining' }) @TrackJobExecution() @LogExecution('check-backup-states-errored-draining') @WithInstrumentation() async checkBackupStatesForErroredDraining(): Promise { const lockKey = 'check-backup-states-errored-draining' const hasLock = await this.redisLockProvider.lock(lockKey, 10) if (!hasLock) { return } try { const sandboxes = await this.sandboxRepository .createQueryBuilder('sandbox') .innerJoin('runner', 'r', 'r.id = sandbox.runnerId') .where('sandbox.state = :error', { error: SandboxState.ERROR }) .andWhere('sandbox.backupState IN (:...backupStates)', { backupStates: [BackupState.PENDING, BackupState.IN_PROGRESS], }) .andWhere('r.state = :ready', { ready: RunnerState.READY }) .andWhere('r."draining" = true') .addOrderBy('sandbox.lastBackupAt', 'ASC', 'NULLS FIRST') .addOrderBy('sandbox.createdAt', 'ASC') .take(100) .getMany() await Promise.allSettled( sandboxes.map(async (s) => { const lockKey = `sandbox-backup-${s.id}` const hasLock = await this.redisLockProvider.lock(lockKey, 60) if (!hasLock) { return } try { const sandbox = await this.sandboxRepository.findOneByOrFail({ id: s.id, }) try { switch (sandbox.backupState) { case BackupState.PENDING: { await this.handlePendingBackup(sandbox) break } case BackupState.IN_PROGRESS: { await this.checkBackupProgress(sandbox) break } } } catch (error) { const errorRetryKey = `${lockKey}-error-retry` const errorRetryCount = await this.redis.get(errorRetryKey) if (!errorRetryCount) { await this.redis.setex(errorRetryKey, 300, '1') } else if (parseInt(errorRetryCount) > 10) { this.logger.error( `Error processing backup for errored sandbox ${sandbox.id} on draining runner:`, fromAxiosError(error), ) await this.sandboxService.updateSandboxBackupState( sandbox.id, BackupState.ERROR, undefined, undefined, fromAxiosError(error).message, ) } else { await this.redis.setex(errorRetryKey, 300, errorRetryCount + 1) } } } catch (error) { this.logger.error(`Error processing backup for errored sandbox ${s.id} on draining runner:`, error) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) } catch (error) { this.logger.error(`Error processing backups for errored sandboxes on draining runners: `, error) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'sync-stop-state-create-backups' }) @TrackJobExecution() @LogExecution('sync-stop-state-create-backups') @WithInstrumentation() async syncStopStateCreateBackups(): Promise { const lockKey = 'sync-stop-state-create-backups' const hasLock = await this.redisLockProvider.lock(lockKey, 10) if (!hasLock) { return } try { const sandboxes = await this.sandboxRepository .createQueryBuilder('sandbox') .innerJoin('runner', 'r', 'r.id = sandbox.runnerId') .where('sandbox.state IN (:...states)', { states: [SandboxState.ARCHIVING, SandboxState.STOPPED] }) .andWhere('sandbox.backupState = :none', { none: BackupState.NONE }) .andWhere('r.state = :ready', { ready: RunnerState.READY }) .take(100) .getMany() await Promise.allSettled( sandboxes .filter((sandbox) => sandbox.runnerId !== null) .map(async (sandbox) => { const lockKey = `sandbox-backup-${sandbox.id}` const hasLock = await this.redisLockProvider.lock(lockKey, 30) if (!hasLock) { return } try { await this.setBackupPending(sandbox) } catch (error) { this.logger.error(`Error processing backup for sandbox ${sandbox.id}:`, error) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) } catch (error) { this.logger.error(`Error processing backups: `, error) } finally { await this.redisLockProvider.unlock(lockKey) } } async setBackupPending(sandbox: Sandbox): Promise { if (sandbox.backupState === BackupState.COMPLETED) { return } // Allow backups for STARTED sandboxes, STOPPED/ERROR sandboxes with runnerId, or ARCHIVING sandboxes if ( !( sandbox.state === SandboxState.STARTED || sandbox.state === SandboxState.ARCHIVING || (sandbox.state === SandboxState.STOPPED && sandbox.runnerId) || (sandbox.state === SandboxState.ERROR && sandbox.runnerId) ) ) { throw new BadRequestError('Sandbox must be started, stopped, or errored with assigned runner to create a backup') } if (sandbox.backupState === BackupState.IN_PROGRESS || sandbox.backupState === BackupState.PENDING) { return } let registry: DockerRegistry | null = null if (sandbox.backupRegistryId) { registry = await this.dockerRegistryService.findOne(sandbox.backupRegistryId) } else { registry = await this.dockerRegistryService.getAvailableBackupRegistry(sandbox.region) } if (!registry) { throw new BadRequestError('No backup registry configured') } // Generate backup snapshot name const timestamp = new Date().toISOString().replace(/[:.]/g, '-') const backupSnapshot = `${registry.url.replace('https://', '').replace('http://', '')}/${registry.project || 'daytona'}/backup-${sandbox.id}:${timestamp}` await this.sandboxService.updateSandboxBackupState(sandbox.id, BackupState.PENDING, backupSnapshot, registry.id) } private async checkBackupProgress(sandbox: Sandbox): Promise { try { const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) // Get sandbox info from runner const sandboxInfo = await runnerAdapter.sandboxInfo(sandbox.id) switch (sandboxInfo.backupState) { case BackupState.COMPLETED: { await this.sandboxService.updateSandboxBackupState(sandbox.id, BackupState.COMPLETED) break } case BackupState.ERROR: { await this.sandboxService.updateSandboxBackupState( sandbox.id, BackupState.ERROR, undefined, undefined, sandboxInfo.backupErrorReason, ) break } // If backup state is none, retry the backup process by setting the backup state to pending // This can happen if the runner is restarted or the operation is cancelled case BackupState.NONE: { await this.sandboxService.updateSandboxBackupState(sandbox.id, BackupState.PENDING) break } // If still in progress or any other state, do nothing and wait for next sync } } catch (error) { await this.sandboxService.updateSandboxBackupState( sandbox.id, BackupState.ERROR, undefined, undefined, fromAxiosError(error).message, ) throw error } } private async deleteSandboxBackupRepositoryFromRegistry(sandbox: Sandbox): Promise { const registry = await this.dockerRegistryService.findOne(sandbox.backupRegistryId) try { await this.dockerRegistryService.deleteSandboxRepository(sandbox.id, registry) } catch (error) { this.logger.error( `Failed to delete backup repository ${sandbox.id} from registry ${registry.id}:`, fromAxiosError(error), ) } } private async handlePendingBackup(sandbox: Sandbox): Promise { const lockKey = `runner-${sandbox.runnerId}-backup-lock` try { await this.redisLockProvider.waitForLock(lockKey, 10) const backupsInProgress = await this.sandboxRepository.count({ where: { runnerId: sandbox.runnerId, backupState: BackupState.IN_PROGRESS, }, }) if (backupsInProgress >= this.configService.getOrThrow('maxConcurrentBackupsPerRunner')) { return } const registry = await this.dockerRegistryService.findOne(sandbox.backupRegistryId) if (!registry) { throw new Error('Registry not found') } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) // check if backup is already in progress on the runner const runnerSandbox = await runnerAdapter.sandboxInfo(sandbox.id) if (runnerSandbox.backupState === BackupState.IN_PROGRESS) { await this.sandboxService.updateSandboxBackupState(sandbox.id, BackupState.IN_PROGRESS) return } // Initiate backup on runner await runnerAdapter.createBackup(sandbox, sandbox.backupSnapshot, registry) await this.sandboxService.updateSandboxBackupState(sandbox.id, BackupState.IN_PROGRESS) } catch (error) { if (error.response?.status === 400 && error.response?.data?.message.includes('A backup is already in progress')) { await this.sandboxService.updateSandboxBackupState(sandbox.id, BackupState.IN_PROGRESS) return } await this.sandboxService.updateSandboxBackupState( sandbox.id, BackupState.ERROR, undefined, undefined, fromAxiosError(error).message, ) throw error } finally { await this.redisLockProvider.unlock(lockKey) } } @OnEvent(SandboxEvents.ARCHIVED) @TrackJobExecution() private async handleSandboxArchivedEvent(event: SandboxArchivedEvent) { this.setBackupPending(event.sandbox) } @OnEvent(SandboxEvents.DESTROYED) @TrackJobExecution() private async handleSandboxDestroyedEvent(event: SandboxDestroyedEvent) { this.deleteSandboxBackupRepositoryFromRegistry(event.sandbox) } @OnEvent(SandboxEvents.BACKUP_CREATED) @TrackJobExecution() private async handleSandboxBackupCreatedEvent(event: SandboxBackupCreatedEvent) { this.setBackupPending(event.sandbox) } } ================================================ FILE: apps/api/src/sandbox/managers/sandbox-actions/sandbox-archive.action.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable } from '@nestjs/common' import { Sandbox } from '../../entities/sandbox.entity' import { SandboxState } from '../../enums/sandbox-state.enum' import { DONT_SYNC_AGAIN, SandboxAction, SyncState, SYNC_AGAIN } from './sandbox.action' import { BackupState } from '../../enums/backup-state.enum' import { LockCode, RedisLockProvider } from '../../common/redis-lock.provider' import { RunnerService } from '../../services/runner.service' import { SandboxRepository } from '../../repositories/sandbox.repository' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { RunnerAdapterFactory } from '../../runner-adapter/runnerAdapter' import { EventEmitter2 } from '@nestjs/event-emitter' import { SandboxEvents } from '../../constants/sandbox-events.constants' import { SandboxBackupCreatedEvent } from '../../events/sandbox-backup-created.event' import { WithSpan } from '../../../common/decorators/otel.decorator' @Injectable() export class SandboxArchiveAction extends SandboxAction { constructor( protected runnerService: RunnerService, protected runnerAdapterFactory: RunnerAdapterFactory, protected sandboxRepository: SandboxRepository, protected readonly redisLockProvider: RedisLockProvider, @InjectRedis() private readonly redis: Redis, private readonly eventEmitter: EventEmitter2, ) { super(runnerService, runnerAdapterFactory, sandboxRepository, redisLockProvider) } @WithSpan() async run(sandbox: Sandbox, lockCode: LockCode): Promise { // Only proceed with archiving if the sandbox is in STOPPED, ARCHIVING or ERROR (runner draining) state. // For all other states, do not proceed with archiving. if ( sandbox.state !== SandboxState.STOPPED && sandbox.state !== SandboxState.ARCHIVING && sandbox.state !== SandboxState.ERROR ) { return DONT_SYNC_AGAIN } const lockKey = 'archive-lock-' + sandbox.runnerId if (!(await this.redisLockProvider.lock(lockKey, 10))) { return DONT_SYNC_AGAIN } const isFromErrorState = sandbox.state === SandboxState.ERROR await this.redisLockProvider.unlock(lockKey) // if the backup state is error, we need to retry the backup if (sandbox.backupState === BackupState.ERROR) { const archiveErrorRetryKey = 'archive-error-retry-' + sandbox.id const archiveErrorRetryCountRaw = await this.redis.get(archiveErrorRetryKey) const archiveErrorRetryCount = archiveErrorRetryCountRaw ? parseInt(archiveErrorRetryCountRaw) : 0 // if the archive error retry count is greater than 3, we need to mark the sandbox as error if (archiveErrorRetryCount > 3) { // Only transition to ERROR if not already in ERROR state if (!isFromErrorState) { await this.updateSandboxState( sandbox, SandboxState.ERROR, lockCode, undefined, 'Failed to archive sandbox after 3 retries', ) } await this.redis.del(archiveErrorRetryKey) return DONT_SYNC_AGAIN } await this.redis.setex('archive-error-retry-' + sandbox.id, 720, String(archiveErrorRetryCount + 1)) // recreate the backup to retry this.eventEmitter.emit(SandboxEvents.BACKUP_CREATED, new SandboxBackupCreatedEvent(sandbox)) return DONT_SYNC_AGAIN } if (sandbox.backupState !== BackupState.COMPLETED) { return DONT_SYNC_AGAIN } // when the backup is completed, destroy the sandbox on the runner // and deassociate the sandbox from the runner const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) try { const sandboxInfo = await runnerAdapter.sandboxInfo(sandbox.id) this.logger.log(`sandbox info from runner: state: ${sandboxInfo.state}, backupState: ${sandboxInfo.backupState}`) if (sandboxInfo.state === SandboxState.DESTROYED) { if (isFromErrorState) { this.logger.warn(`Transitioning sandbox ${sandbox.id} from ERROR to ARCHIVED state (runner draining)`) } await this.updateSandboxState(sandbox, SandboxState.ARCHIVED, lockCode, null) return DONT_SYNC_AGAIN } if (sandboxInfo.state !== SandboxState.DESTROYING) { await runnerAdapter.destroySandbox(sandbox.id) } return SYNC_AGAIN } catch (error) { // fail for errors other than sandbox not found or sandbox already destroyed if ( (error.response?.data?.statusCode === 400 && error.response?.data?.message.includes('Sandbox already destroyed')) || error.response?.status === 404 || error.statusCode === 404 ) { // if the sandbox is already destroyed, do nothing if (isFromErrorState) { this.logger.warn(`Transitioning sandbox ${sandbox.id} from ERROR to ARCHIVED state (runner draining)`) } await this.updateSandboxState(sandbox, SandboxState.ARCHIVED, lockCode, null) return DONT_SYNC_AGAIN } throw error } } } ================================================ FILE: apps/api/src/sandbox/managers/sandbox-actions/sandbox-destroy.action.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable } from '@nestjs/common' import { Sandbox } from '../../entities/sandbox.entity' import { SandboxState } from '../../enums/sandbox-state.enum' import { DONT_SYNC_AGAIN, SandboxAction, SyncState, SYNC_AGAIN } from './sandbox.action' import { RunnerState } from '../../enums/runner-state.enum' import { RunnerService } from '../../services/runner.service' import { RunnerAdapterFactory } from '../../runner-adapter/runnerAdapter' import { SandboxRepository } from '../../repositories/sandbox.repository' import { LockCode, RedisLockProvider } from '../../common/redis-lock.provider' import { WithSpan } from '../../../common/decorators/otel.decorator' @Injectable() export class SandboxDestroyAction extends SandboxAction { constructor( protected runnerService: RunnerService, protected runnerAdapterFactory: RunnerAdapterFactory, protected sandboxRepository: SandboxRepository, protected redisLockProvider: RedisLockProvider, ) { super(runnerService, runnerAdapterFactory, sandboxRepository, redisLockProvider) } @WithSpan() async run(sandbox: Sandbox, lockCode: LockCode): Promise { if (sandbox.state === SandboxState.DESTROYED) { return DONT_SYNC_AGAIN } if (sandbox.state === SandboxState.ARCHIVED || sandbox.state === SandboxState.PENDING_BUILD) { await this.updateSandboxState(sandbox, SandboxState.DESTROYED, lockCode) return DONT_SYNC_AGAIN } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (runner.state !== RunnerState.READY) { return DONT_SYNC_AGAIN } const runnerAdapter = await this.runnerAdapterFactory.create(runner) try { const sandboxInfo = await runnerAdapter.sandboxInfo(sandbox.id) if (sandboxInfo.state === SandboxState.DESTROYED) { await this.updateSandboxState(sandbox, SandboxState.DESTROYED, lockCode) return DONT_SYNC_AGAIN } if (sandbox.state !== SandboxState.DESTROYING) { await runnerAdapter.destroySandbox(sandbox.id) await this.updateSandboxState(sandbox, SandboxState.DESTROYING, lockCode) } return SYNC_AGAIN } catch (error) { // if the sandbox is not found on runner, it is already destroyed if (error.response?.status === 404 || error.statusCode === 404) { await this.updateSandboxState(sandbox, SandboxState.DESTROYED, lockCode) return DONT_SYNC_AGAIN } throw error } } } ================================================ FILE: apps/api/src/sandbox/managers/sandbox-actions/sandbox-start.action.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, NotFoundException } from '@nestjs/common' import { SandboxRepository } from '../../repositories/sandbox.repository' import { RECOVERY_ERROR_SUBSTRINGS } from '../../constants/errors-for-recovery' import { Sandbox } from '../../entities/sandbox.entity' import { SandboxState } from '../../enums/sandbox-state.enum' import { DONT_SYNC_AGAIN, SandboxAction, SYNC_AGAIN, SyncState } from './sandbox.action' import { SANDBOX_BUILD_INFO_CACHE_TTL_MS } from '../../utils/sandbox-lookup-cache.util' import { SnapshotRunnerState } from '../../enums/snapshot-runner-state.enum' import { BackupState } from '../../enums/backup-state.enum' import { RunnerState } from '../../enums/runner-state.enum' import { BuildInfo } from '../../entities/build-info.entity' import { SnapshotService } from '../../services/snapshot.service' import { DockerRegistryService } from '../../../docker-registry/services/docker-registry.service' import { DockerRegistry } from '../../../docker-registry/entities/docker-registry.entity' import { RunnerService } from '../../services/runner.service' import { RunnerAdapterFactory } from '../../runner-adapter/runnerAdapter' import { SnapshotStateError } from '../../errors/snapshot-state-error' import { Snapshot } from '../../entities/snapshot.entity' import { OrganizationService } from '../../../organization/services/organization.service' import { TypedConfigService } from '../../../config/typed-config.service' import { Runner } from '../../entities/runner.entity' import { Organization } from '../../../organization/entities/organization.entity' import { LockCode, RedisLockProvider } from '../../common/redis-lock.provider' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { WithSpan } from '../../../common/decorators/otel.decorator' @Injectable() export class SandboxStartAction extends SandboxAction { protected readonly logger = new Logger(SandboxStartAction.name) constructor( protected runnerService: RunnerService, protected runnerAdapterFactory: RunnerAdapterFactory, protected sandboxRepository: SandboxRepository, protected readonly snapshotService: SnapshotService, protected readonly dockerRegistryService: DockerRegistryService, protected readonly organizationService: OrganizationService, protected readonly configService: TypedConfigService, protected readonly redisLockProvider: RedisLockProvider, @InjectRedis() private readonly redis: Redis, ) { super(runnerService, runnerAdapterFactory, sandboxRepository, redisLockProvider) } @WithSpan() async run(sandbox: Sandbox, lockCode: LockCode): Promise { // Load buildInfo only for states that need it — avoids a JOIN+DISTINCT in the // shared syncInstanceState query that stop/destroy/archive paths never use. if ( sandbox.snapshot === null && [SandboxState.PENDING_BUILD, SandboxState.BUILDING_SNAPSHOT, SandboxState.UNKNOWN].includes(sandbox.state) ) { await this.loadBuildInfo(sandbox) } switch (sandbox.state) { case SandboxState.PULLING_SNAPSHOT: { if (!sandbox.runnerId) { // Using the PULLING_SNAPSHOT state for the case where the runner isn't assigned yet as well return this.handleUnassignedRunnerSandbox(sandbox, lockCode) } else { return this.handleRunnerSandboxStartedStateCheck(sandbox, lockCode) } } case SandboxState.PENDING_BUILD: { return this.handleUnassignedRunnerSandbox(sandbox, lockCode, true) } case SandboxState.BUILDING_SNAPSHOT: { return this.handleRunnerSandboxBuildingSnapshotStateOnDesiredStateStart(sandbox, lockCode) } case SandboxState.UNKNOWN: { return this.handleRunnerSandboxUnknownStateOnDesiredStateStart(sandbox, lockCode) } case SandboxState.ARCHIVED: case SandboxState.ARCHIVING: case SandboxState.STOPPED: { return this.handleRunnerSandboxStoppedOrArchivedStateOnDesiredStateStart(sandbox, lockCode) } case SandboxState.RESTORING: case SandboxState.CREATING: case SandboxState.STARTING: { return this.handleRunnerSandboxStartedStateCheck(sandbox, lockCode) } case SandboxState.ERROR: { this.logger.error(`Sandbox ${sandbox.id} is in error state on desired state start`) return DONT_SYNC_AGAIN } } return DONT_SYNC_AGAIN } /** * Loads the buildInfo relation for a sandbox. * Uses QueryBuilder with getMany() to avoid the SELECT DISTINCT subquery * that TypeORM generates when combining relations with findOne/LIMIT. * Since sandbox.id is a PK and BuildInfo is @ManyToOne, at most one row is returned. */ private async loadBuildInfo(sandbox: Sandbox): Promise { const [result] = await this.sandboxRepository .createQueryBuilder('sandbox') .leftJoinAndSelect('sandbox.buildInfo', 'buildInfo') .where('sandbox.id = :id', { id: sandbox.id }) .cache(`sandbox:buildInfo:${sandbox.id}`, SANDBOX_BUILD_INFO_CACHE_TTL_MS) .getMany() sandbox.buildInfo = result?.buildInfo ?? null } private async handleRunnerSandboxBuildingSnapshotStateOnDesiredStateStart( sandbox: Sandbox, lockCode: LockCode, ): Promise { // Check for timeout - allow up to 60 minutes since the last sandbox update const timeoutMinutes = 60 const timeoutMs = timeoutMinutes * 60 * 1000 if (sandbox.updatedAt && Date.now() - sandbox.updatedAt.getTime() > timeoutMs) { await this.updateSandboxState( sandbox, SandboxState.BUILD_FAILED, lockCode, undefined, 'Timeout while building snapshot on runner', ) return DONT_SYNC_AGAIN } const snapshotRunner = await this.runnerService.getSnapshotRunner(sandbox.runnerId, sandbox.buildInfo.snapshotRef) if (snapshotRunner) { switch (snapshotRunner.state) { case SnapshotRunnerState.READY: { // TODO: "UNKNOWN" should probably be changed to something else await this.updateSandboxState(sandbox, SandboxState.UNKNOWN, lockCode) return SYNC_AGAIN } case SnapshotRunnerState.ERROR: { await this.updateSandboxState( sandbox, SandboxState.BUILD_FAILED, lockCode, undefined, snapshotRunner.errorReason, ) return DONT_SYNC_AGAIN } } } if (!snapshotRunner || snapshotRunner.state === SnapshotRunnerState.BUILDING_SNAPSHOT) { // Sleep for a second and go back to syncing instance state await new Promise((resolve) => setTimeout(resolve, 1000)) return SYNC_AGAIN } return DONT_SYNC_AGAIN } private async handleUnassignedRunnerSandbox( sandbox: Sandbox, lockCode: LockCode, isBuild = false, ): Promise { // Get snapshot reference based on whether it's a pull or build operation let snapshotRef: string if (isBuild) { snapshotRef = sandbox.buildInfo.snapshotRef } else { const snapshot = await this.snapshotService.getSnapshotByName(sandbox.snapshot, sandbox.organizationId) snapshotRef = snapshot.ref } const declarativeBuildScoreThreshold = this.configService.get('runnerScore.thresholds.declarativeBuild') // Try to assign an available runner with the snapshot already available try { const runner = await this.runnerService.getRandomAvailableRunner({ regions: [sandbox.region], sandboxClass: sandbox.class, snapshotRef: snapshotRef, ...(isBuild && declarativeBuildScoreThreshold !== undefined && { availabilityScoreThreshold: declarativeBuildScoreThreshold, }), }) if (runner) { await this.updateSandboxState(sandbox, SandboxState.UNKNOWN, lockCode, runner.id) return SYNC_AGAIN } } catch { // Continue to next assignment method } // Try to assign an available runner that is currently processing the snapshot const snapshotRunners = await this.runnerService.getSnapshotRunners(snapshotRef) const targetState = isBuild ? SnapshotRunnerState.BUILDING_SNAPSHOT : SnapshotRunnerState.PULLING_SNAPSHOT const targetSandboxState = isBuild ? SandboxState.BUILDING_SNAPSHOT : SandboxState.PULLING_SNAPSHOT const errorSandboxState = isBuild ? SandboxState.BUILD_FAILED : SandboxState.ERROR for (const snapshotRunner of snapshotRunners) { // Consider removing the runner usage rate check or improving it const runner = await this.runnerService.findOneOrFail(snapshotRunner.runnerId) if (snapshotRunner.state === SnapshotRunnerState.ERROR) { await this.updateSandboxState(sandbox, errorSandboxState, lockCode, runner.id, snapshotRunner.errorReason) return DONT_SYNC_AGAIN } if (runner.unschedulable || runner.draining || runner.state !== RunnerState.READY) { continue } if (declarativeBuildScoreThreshold === undefined || runner.availabilityScore >= declarativeBuildScoreThreshold) { if (snapshotRunner.state === targetState) { await this.updateSandboxState(sandbox, targetSandboxState, lockCode, runner.id) return SYNC_AGAIN } } } // Get excluded runner IDs based on operation type const excludedRunnerIds = await (isBuild ? this.runnerService.getRunnersWithMultipleSnapshotsBuilding() : this.runnerService.getRunnersWithMultipleSnapshotsPulling()) // Try to assign an available runner to start processing the snapshot let runner: Runner try { runner = await this.runnerService.getRandomAvailableRunner({ regions: [sandbox.region], sandboxClass: sandbox.class, excludedRunnerIds: excludedRunnerIds, ...(isBuild && declarativeBuildScoreThreshold !== undefined && { availabilityScoreThreshold: declarativeBuildScoreThreshold, }), }) } catch { // TODO: reconsider the timeout here // No runners available, wait for 3 seconds and retry await new Promise((resolve) => setTimeout(resolve, 3000)) return SYNC_AGAIN } if (isBuild) { this.buildOnRunner(sandbox.buildInfo, runner, sandbox.organizationId) await this.updateSandboxState(sandbox, SandboxState.BUILDING_SNAPSHOT, lockCode, runner.id) } else { const snapshot = await this.snapshotService.getSnapshotByName(sandbox.snapshot, sandbox.organizationId) await this.runnerService.createSnapshotRunnerEntry(runner.id, snapshot.ref, SnapshotRunnerState.PULLING_SNAPSHOT) this.pullSnapshotToRunner(snapshot, runner) await this.updateSandboxState(sandbox, SandboxState.PULLING_SNAPSHOT, lockCode, runner.id) } return SYNC_AGAIN } async pullSnapshotToRunner(snapshot: Snapshot, runner: Runner) { const internalRegistry = await this.dockerRegistryService.findInternalRegistryBySnapshotRef( snapshot.ref, runner.region, ) if (!internalRegistry) { throw new Error('No internal registry found for sandbox snapshot') } const runnerAdapter = await this.runnerAdapterFactory.create(runner) // Fire the pull request (runner returns 202 immediately) await runnerAdapter.pullSnapshot(snapshot.ref, internalRegistry) const pollTimeoutMs = 60 * 60 * 1_000 // 1 hour const pollIntervalMs = 5 * 1_000 // 5 seconds const startTime = Date.now() while (Date.now() - startTime < pollTimeoutMs) { try { await runnerAdapter.getSnapshotInfo(snapshot.ref) return } catch (err) { if (err instanceof SnapshotStateError) { throw err } } await new Promise((resolve) => setTimeout(resolve, pollIntervalMs)) } } // Initiates the snapshot build on the runner and creates an SnapshotRunner depending on the result async buildOnRunner(buildInfo: BuildInfo, runner: Runner, organizationId: string) { const runnerAdapter = await this.runnerAdapterFactory.create(runner) const sourceRegistries = await this.dockerRegistryService.getSourceRegistriesForDockerfile( buildInfo.dockerfileContent, organizationId, ) // Fire build request (runner returns 202 immediately) await runnerAdapter.buildSnapshot( buildInfo, organizationId, sourceRegistries.length > 0 ? sourceRegistries : undefined, ) const pollTimeoutMs = 60 * 60 * 1_000 // 1 hour const pollIntervalMs = 5 * 1_000 // 5 seconds const startTime = Date.now() while (Date.now() - startTime < pollTimeoutMs) { try { await runnerAdapter.getSnapshotInfo(buildInfo.snapshotRef) break } catch (err) { if (err instanceof SnapshotStateError) { await this.runnerService.createSnapshotRunnerEntry( runner.id, buildInfo.snapshotRef, SnapshotRunnerState.ERROR, err.message, ) return } await new Promise((resolve) => setTimeout(resolve, pollIntervalMs)) } } if (Date.now() - startTime >= pollTimeoutMs) { await this.runnerService.createSnapshotRunnerEntry( runner.id, buildInfo.snapshotRef, SnapshotRunnerState.ERROR, 'Timeout while building', ) return } const exists = await runnerAdapter.snapshotExists(buildInfo.snapshotRef) let state = SnapshotRunnerState.BUILDING_SNAPSHOT if (exists) { state = SnapshotRunnerState.READY } await this.runnerService.createSnapshotRunnerEntry(runner.id, buildInfo.snapshotRef, state) } private async handleRunnerSandboxUnknownStateOnDesiredStateStart( sandbox: Sandbox, lockCode: LockCode, ): Promise { const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (runner.state !== RunnerState.READY) { return DONT_SYNC_AGAIN } const organization = await this.organizationService.findOne(sandbox.organizationId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) let internalRegistry: DockerRegistry let entrypoint: string[] if (!sandbox.buildInfo) { // get internal snapshot name const snapshot = await this.snapshotService.getSnapshotByName(sandbox.snapshot, sandbox.organizationId) const snapshotRef = snapshot.ref internalRegistry = await this.dockerRegistryService.findInternalRegistryBySnapshotRef(snapshotRef, runner.region) if (!internalRegistry) { throw new Error('No registry found for snapshot') } sandbox.snapshot = snapshotRef entrypoint = snapshot.entrypoint } else { sandbox.snapshot = sandbox.buildInfo.snapshotRef entrypoint = this.snapshotService.getEntrypointFromDockerfile(sandbox.buildInfo.dockerfileContent) } const metadata = { ...organization?.sandboxMetadata, sandboxName: sandbox.name, } const result = await runnerAdapter.createSandbox( sandbox, internalRegistry, entrypoint, metadata, this.configService.get('sandboxOtel.endpointUrl'), ) await this.updateSandboxState(sandbox, SandboxState.CREATING, lockCode, undefined, undefined, result?.daemonVersion) // sync states again immediately for sandbox return SYNC_AGAIN } private async handleRunnerSandboxStoppedOrArchivedStateOnDesiredStateStart( sandbox: Sandbox, lockCode: LockCode, ): Promise { const organization = await this.organizationService.findOne(sandbox.organizationId) // check if sandbox is assigned to a runner and if that runner is unschedulable // if it is, move sandbox to prevRunnerId, and set runnerId to null // this will assign a new runner to the sandbox and restore the sandbox from the latest backup if (sandbox.runnerId) { const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) const originalRunnerId = sandbox.runnerId // Store original value const startScoreThreshold = this.configService.get('runnerScore.thresholds.start') || 0 const shouldMoveToNewRunner = (runner.unschedulable || runner.state != RunnerState.READY || runner.availabilityScore < startScoreThreshold) && sandbox.backupState === BackupState.COMPLETED // if the runner is unschedulable/not ready and sandbox has a valid backup, move sandbox to a new runner if (shouldMoveToNewRunner) { sandbox.prevRunnerId = originalRunnerId sandbox.runnerId = null await this.sandboxRepository.update( sandbox.id, { updateData: { prevRunnerId: originalRunnerId, runnerId: null, }, }, true, ) } // If the sandbox is on a runner and its backupState is COMPLETED // but there are too many running sandboxes on that runner, move it to a less used runner if (sandbox.backupState === BackupState.COMPLETED) { if (runner.availabilityScore < this.configService.getOrThrow('runnerScore.thresholds.availability')) { const availableRunners = await this.runnerService.findAvailableRunners({ regions: [sandbox.region], sandboxClass: sandbox.class, }) const lessUsedRunners = availableRunners.filter((runner) => runner.id !== originalRunnerId) // temp workaround to move sandboxes to less used runner if (lessUsedRunners.length > 0) { sandbox.prevRunnerId = originalRunnerId sandbox.runnerId = null await this.sandboxRepository.update( sandbox.id, { updateData: { prevRunnerId: originalRunnerId, runnerId: null, }, }, true, ) try { const runnerAdapter = await this.runnerAdapterFactory.create(runner) await runnerAdapter.destroySandbox(sandbox.id) } catch (e) { if (e.response?.status !== 404 && e.statusCode !== 404) { this.logger.error(`Failed to cleanup sandbox ${sandbox.id} on previous runner ${runner.id}:`, e) } } } } } } if (sandbox.runnerId === null) { // if sandbox has no runner, check if backup is completed // if not, set sandbox to error // if backup is completed, get random available runner and start sandbox // use the backup to start the sandbox if (sandbox.backupState !== BackupState.COMPLETED) { await this.updateSandboxState( sandbox, SandboxState.ERROR, lockCode, undefined, 'Sandbox has no runner and backup is not completed', ) return DONT_SYNC_AGAIN } const syncCheck = await this.restoreSandboxOnNewRunner(sandbox, lockCode, organization, sandbox.prevRunnerId) if (syncCheck !== null) { return syncCheck } } else { // if sandbox has runner, start sandbox const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (runner.state !== RunnerState.READY) { return DONT_SYNC_AGAIN } const runnerAdapter = await this.runnerAdapterFactory.create(runner) const metadata: { [key: string]: string } = { ...organization?.sandboxMetadata } if (sandbox.volumes?.length) { metadata['volumes'] = JSON.stringify( sandbox.volumes.map((v) => ({ volumeId: v.volumeId, mountPath: v.mountPath, subpath: v.subpath })), ) } try { await runnerAdapter.startSandbox(sandbox.id, sandbox.authToken, metadata) } catch (error) { // Check against a list of substrings that should trigger an automatic recovery if (error?.message) { const matchesRecovery = RECOVERY_ERROR_SUBSTRINGS.some((substring) => error.message.toLowerCase().includes(substring.toLowerCase()), ) if (matchesRecovery) { try { await this.restoreSandboxOnNewRunner(sandbox, lockCode, organization, sandbox.runnerId, true) this.logger.warn(`Sandbox ${sandbox.id} transferred to a new runner`) return SYNC_AGAIN } catch (restoreError) { this.logger.warn(`Sandbox ${sandbox.id} recovery attempt failed:`, restoreError.message) } } } throw error } await this.updateSandboxState(sandbox, SandboxState.STARTING, lockCode) return SYNC_AGAIN } return SYNC_AGAIN } // used to check if sandbox is started on runner and update sandbox state accordingly // also used to handle the case where a sandbox is started on a runner and then transferred to a new runner private async handleRunnerSandboxStartedStateCheck(sandbox: Sandbox, lockCode: LockCode): Promise { // edge case when sandbox is being transferred to a new runner if (!sandbox.runnerId) { return SYNC_AGAIN } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) const sandboxInfo = await runnerAdapter.sandboxInfo(sandbox.id) switch (sandboxInfo.state) { case SandboxState.STARTED: { // if previous backup state is error or completed, set backup state to none if ([BackupState.ERROR, BackupState.COMPLETED].includes(sandbox.backupState)) { await this.updateSandboxState( sandbox, SandboxState.STARTED, lockCode, undefined, undefined, sandboxInfo.daemonVersion, BackupState.NONE, ) return DONT_SYNC_AGAIN } else { await this.updateSandboxState( sandbox, SandboxState.STARTED, lockCode, undefined, undefined, sandboxInfo.daemonVersion, ) // if sandbox was transferred to a new runner, remove it from the old runner if (sandbox.prevRunnerId) { await this.removeSandboxFromPreviousRunner(sandbox) } return DONT_SYNC_AGAIN } } case SandboxState.STARTING: if (await this.checkTimeoutError(sandbox, 5, 'Timeout while starting sandbox')) { return DONT_SYNC_AGAIN } break case SandboxState.RESTORING: if (await this.checkTimeoutError(sandbox, 30, 'Timeout while starting sandbox')) { return DONT_SYNC_AGAIN } break case SandboxState.CREATING: { if (await this.checkTimeoutError(sandbox, 15, 'Timeout while creating sandbox')) { return DONT_SYNC_AGAIN } break } case SandboxState.UNKNOWN: { await this.updateSandboxState(sandbox, SandboxState.UNKNOWN, lockCode) break } case SandboxState.ERROR: { await this.updateSandboxState( sandbox, SandboxState.ERROR, lockCode, undefined, 'Sandbox entered error state on runner during startup wait loop', ) break } case SandboxState.PULLING_SNAPSHOT: { if (await this.checkTimeoutError(sandbox, 30, 'Timeout while pulling snapshot')) { return DONT_SYNC_AGAIN } await this.updateSandboxState(sandbox, SandboxState.PULLING_SNAPSHOT, lockCode) break } case SandboxState.DESTROYED: { this.logger.warn( `Sandbox ${sandbox.id} is in destroyed state while starting on runner ${sandbox.runnerId}, prev runner ${sandbox.prevRunnerId}`, ) await this.checkTimeoutError( sandbox, 15, 'Timeout while starting sandbox: Sandbox is in unknown state on runner', ) return DONT_SYNC_AGAIN } // also any other state that is not STARTED default: { this.logger.error(`Sandbox ${sandbox.id} is in unexpected state ${sandboxInfo.state}`) await this.updateSandboxState( sandbox, SandboxState.ERROR, lockCode, undefined, `Sandbox is in unexpected state: ${sandboxInfo.state}`, ) break } } return SYNC_AGAIN } private async checkTimeoutError(sandbox: Sandbox, timeoutMinutes: number, errorReason: string): Promise { if ( sandbox.lastActivityAt && new Date(sandbox.lastActivityAt).getTime() < Date.now() - 1000 * 60 * timeoutMinutes ) { const updateData: Partial = { state: SandboxState.ERROR, errorReason, recoverable: false, } await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) return true } return false } private async restoreSandboxOnNewRunner( sandbox: Sandbox, lockCode: LockCode, organization: Organization, excludedRunnerId: string, isRecovery?: boolean, ): Promise { let lockKey: string | null = null // Recovery lock to prevent frequent automatic restore attempts if (isRecovery) { lockKey = `sandbox-${sandbox.id}-restored-cooldown` const sixHoursInSeconds = 6 * 60 * 60 const acquired = await this.redisLockProvider.lock(lockKey, sixHoursInSeconds) if (!acquired) { return null } } if (!sandbox.backupRegistryId) { throw new Error('No registry found for backup') } const registry = await this.dockerRegistryService.findOne(sandbox.backupRegistryId) if (!registry) { throw new Error('No registry found for backup') } // make sure we pick a runner that has the base snapshot let baseSnapshot: Snapshot | null = null if (sandbox.snapshot) { try { baseSnapshot = await this.snapshotService.getSnapshotByName(sandbox.snapshot, sandbox.organizationId) } catch (e) { if (e instanceof NotFoundException) { // if the base snapshot is not found, we'll use any available runner later } else { if (isRecovery) { return SYNC_AGAIN } // for all other errors, throw them throw e } } } const snapshotRef = baseSnapshot ? baseSnapshot.ref : null let availableRunners: Runner[] = [] const excludedRunnerIds: string[] = excludedRunnerId ? [excludedRunnerId] : [] const runnersWithBaseSnapshot: Runner[] = snapshotRef ? await this.runnerService.findAvailableRunners({ regions: [sandbox.region], sandboxClass: sandbox.class, snapshotRef, excludedRunnerIds, }) : [] if (runnersWithBaseSnapshot.length > 0) { availableRunners = runnersWithBaseSnapshot } else { // if no runner has the base snapshot, get all available runners availableRunners = await this.runnerService.findAvailableRunners({ regions: [sandbox.region], excludedRunnerIds, }) } // check if we have any available runners after filtering if (availableRunners.length === 0) { // Sync state again later. Runners are unavailable if (isRecovery) { await this.redisLockProvider.unlock(lockKey) } return DONT_SYNC_AGAIN } // get random runner from available runners const randomRunnerIndex = (min: number, max: number) => Math.floor(Math.random() * (max - min + 1) + min) const runner = availableRunners[randomRunnerIndex(0, availableRunners.length - 1)] // verify the runner is still available and ready if (!runner || runner.state !== RunnerState.READY || runner.unschedulable) { this.logger.warn(`Selected runner ${runner?.id || 'null'} is no longer available, retrying sandbox assignment`) if (isRecovery) { await this.redisLockProvider.unlock(lockKey) } return SYNC_AGAIN } const runnerAdapter = await this.runnerAdapterFactory.create(runner) const existingBackups = sandbox.existingBackupSnapshots .sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime()) .map((existingSnapshot) => existingSnapshot.snapshotName) let validBackup: string | null = null let exists = false for (const existingBackup of existingBackups) { try { if (!validBackup && sandbox.backupSnapshot) { // last snapshot is the current snapshot, so we don't need to check it // just in case, we'll use the value from the backupSnapshot property validBackup = sandbox.backupSnapshot } else { validBackup = existingBackup } if (!validBackup) { continue } await runnerAdapter.inspectSnapshotInRegistry(validBackup, registry) exists = true break } catch (error) { this.logger.error(`Failed to check if backup snapshot ${validBackup} exists in registry ${registry.id}:`, error) } } const restoreBackupSnapshotRetryKey = `restore-backup-snapshot-retry-${sandbox.id}` if (!exists) { if (!isRecovery) { // Check retry count - allow up to 3 attempts for transient issues const retryCountRaw = await this.redis.get(restoreBackupSnapshotRetryKey) const retryCount = retryCountRaw ? parseInt(retryCountRaw) : 0 if (retryCount < 3) { // Increment retry count with 10 minute TTL, let syncStates cron pick up the retry later await this.redis.setex(restoreBackupSnapshotRetryKey, 600, String(retryCount + 1)) this.logger.warn( `No valid backup snapshot found for sandbox ${sandbox.id}, retry attempt ${retryCount + 1}/3`, ) return DONT_SYNC_AGAIN } // After 3 retries, error out and clear the retry counter await this.redis.del(restoreBackupSnapshotRetryKey) await this.updateSandboxState( sandbox, SandboxState.ERROR, lockCode, undefined, 'No valid backup snapshot found', ) } else { throw new Error('No valid backup snapshot found') } return SYNC_AGAIN } // Clear the retry counter on success await this.redis.del(restoreBackupSnapshotRetryKey) await this.updateSandboxState(sandbox, SandboxState.RESTORING, lockCode, runner.id) sandbox.snapshot = validBackup const metadata = { ...organization?.sandboxMetadata, sandboxName: sandbox.name, } await runnerAdapter.createSandbox( sandbox, registry, undefined, metadata, this.configService.get('sandboxOtel.endpointUrl'), ) return null } private async removeSandboxFromPreviousRunner(sandbox: Sandbox): Promise { const runner = await this.runnerService.findOne(sandbox.prevRunnerId) if (!runner) { this.logger.warn(`Previously assigned runner ${sandbox.prevRunnerId} for sandbox ${sandbox.id} not found`) await this.sandboxRepository.update(sandbox.id, { updateData: { prevRunnerId: null } }, true) return } const runnerAdapter = await this.runnerAdapterFactory.create(runner) try { // First try to destroy the sandbox await runnerAdapter.destroySandbox(sandbox.id) } catch (error) { if (error.response?.status !== 404 && error.statusCode !== 404) { this.logger.error(`Failed to cleanup sandbox ${sandbox.id} on previous runner ${runner.id}:`, error) throw error } } await this.sandboxRepository.update(sandbox.id, { updateData: { prevRunnerId: null } }, true) } } ================================================ FILE: apps/api/src/sandbox/managers/sandbox-actions/sandbox-stop.action.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable } from '@nestjs/common' import { Sandbox } from '../../entities/sandbox.entity' import { SandboxState } from '../../enums/sandbox-state.enum' import { DONT_SYNC_AGAIN, SandboxAction, SyncState, SYNC_AGAIN } from './sandbox.action' import { BackupState } from '../../enums/backup-state.enum' import { RunnerState } from '../../enums/runner-state.enum' import { RunnerService } from '../../services/runner.service' import { RunnerAdapterFactory } from '../../runner-adapter/runnerAdapter' import { SandboxRepository } from '../../repositories/sandbox.repository' import { LockCode, RedisLockProvider } from '../../common/redis-lock.provider' import { WithSpan } from '../../../common/decorators/otel.decorator' @Injectable() export class SandboxStopAction extends SandboxAction { constructor( protected runnerService: RunnerService, protected runnerAdapterFactory: RunnerAdapterFactory, protected sandboxRepository: SandboxRepository, protected redisLockProvider: RedisLockProvider, ) { super(runnerService, runnerAdapterFactory, sandboxRepository, redisLockProvider) } @WithSpan() async run(sandbox: Sandbox, lockCode: LockCode): Promise { const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (runner.state !== RunnerState.READY) { return DONT_SYNC_AGAIN } const runnerAdapter = await this.runnerAdapterFactory.create(runner) if (sandbox.state === SandboxState.STARTED) { // stop sandbox await runnerAdapter.stopSandbox(sandbox.id) await this.updateSandboxState(sandbox, SandboxState.STOPPING, lockCode) // sync states again immediately for sandbox return SYNC_AGAIN } if (sandbox.state !== SandboxState.STOPPING && sandbox.state !== SandboxState.ERROR) { return DONT_SYNC_AGAIN } const sandboxInfo = await runnerAdapter.sandboxInfo(sandbox.id) if (sandboxInfo.state === SandboxState.STOPPED) { await this.updateSandboxState( sandbox, SandboxState.STOPPED, lockCode, undefined, undefined, undefined, BackupState.NONE, ) return DONT_SYNC_AGAIN } else if (sandboxInfo.state === SandboxState.ERROR) { await this.updateSandboxState( sandbox, SandboxState.ERROR, lockCode, undefined, 'Sandbox is in error state on runner', ) return DONT_SYNC_AGAIN } return SYNC_AGAIN } } ================================================ FILE: apps/api/src/sandbox/managers/sandbox-actions/sandbox.action.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { RunnerService } from '../../services/runner.service' import { RunnerAdapterFactory } from '../../runner-adapter/runnerAdapter' import { Sandbox } from '../../entities/sandbox.entity' import { SandboxRepository } from '../../repositories/sandbox.repository' import { SandboxState } from '../../enums/sandbox-state.enum' import { BackupState } from '../../enums/backup-state.enum' import { getStateChangeLockKey } from '../../utils/lock-key.util' import { LockCode, RedisLockProvider } from '../../common/redis-lock.provider' export const SYNC_AGAIN = 'sync-again' export const DONT_SYNC_AGAIN = 'dont-sync-again' export type SyncState = typeof SYNC_AGAIN | typeof DONT_SYNC_AGAIN @Injectable() export abstract class SandboxAction { protected readonly logger = new Logger(SandboxAction.name) constructor( protected readonly runnerService: RunnerService, protected runnerAdapterFactory: RunnerAdapterFactory, protected readonly sandboxRepository: SandboxRepository, protected readonly redisLockProvider: RedisLockProvider, ) {} abstract run(sandbox: Sandbox, lockCode: LockCode): Promise protected async updateSandboxState( sandbox: Sandbox, state: SandboxState, expectedLockCode: LockCode, runnerId?: string | null | undefined, errorReason?: string, daemonVersion?: string, backupState?: BackupState, recoverable?: boolean, ) { // check if the lock code is still valid const lockKey = getStateChangeLockKey(sandbox.id) const currentLockCode = await this.redisLockProvider.getCode(lockKey) if (currentLockCode === null) { this.logger.warn( `no lock code found - state update action expired - skipping - sandboxId: ${sandbox.id} - state: ${state}`, ) return } if (expectedLockCode.getCode() !== currentLockCode.getCode()) { this.logger.warn( `lock code mismatch - state update action expired - skipping - sandboxId: ${sandbox.id} - state: ${state}`, ) return } if (state !== SandboxState.ARCHIVED && !sandbox.pending) { const err = new Error(`sandbox ${sandbox.id} is not in a pending state`) this.logger.error(err) return } const updateData: Partial = { state, } if (runnerId !== undefined) { updateData.runnerId = runnerId } if (errorReason !== undefined) { updateData.errorReason = errorReason if (state === SandboxState.ERROR) { updateData.recoverable = recoverable ?? false } } if (sandbox.state === SandboxState.ERROR && !sandbox.errorReason) { updateData.errorReason = 'Sandbox is in error state during update' updateData.recoverable = false } if (daemonVersion !== undefined) { updateData.daemonVersion = daemonVersion } if (state == SandboxState.DESTROYED) { updateData.backupState = BackupState.NONE } if (backupState !== undefined) { Object.assign(updateData, Sandbox.getBackupStateUpdate(sandbox, backupState)) } if (recoverable !== undefined) { updateData.recoverable = recoverable } await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) } } ================================================ FILE: apps/api/src/sandbox/managers/sandbox.manager.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnApplicationShutdown } from '@nestjs/common' import { Cron, CronExpression } from '@nestjs/schedule' import { In, IsNull, MoreThanOrEqual, Not, Raw } from 'typeorm' import { randomUUID } from 'crypto' import { SandboxState } from '../enums/sandbox-state.enum' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { RunnerService } from '../services/runner.service' import { RedisLockProvider, LockCode } from '../common/redis-lock.provider' import { SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION } from '../constants/sandbox.constants' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxStoppedEvent } from '../events/sandbox-stopped.event' import { SandboxStartedEvent } from '../events/sandbox-started.event' import { SandboxArchivedEvent } from '../events/sandbox-archived.event' import { SandboxDestroyedEvent } from '../events/sandbox-destroyed.event' import { SandboxCreatedEvent } from '../events/sandbox-create.event' import { WithInstrumentation, WithSpan } from '../../common/decorators/otel.decorator' import { SandboxStartAction } from './sandbox-actions/sandbox-start.action' import { SandboxStopAction } from './sandbox-actions/sandbox-stop.action' import { SandboxDestroyAction } from './sandbox-actions/sandbox-destroy.action' import { SandboxArchiveAction } from './sandbox-actions/sandbox-archive.action' import { SYNC_AGAIN, DONT_SYNC_AGAIN } from './sandbox-actions/sandbox.action' import { TrackJobExecution } from '../../common/decorators/track-job-execution.decorator' import { TrackableJobExecutions } from '../../common/interfaces/trackable-job-executions' import { setTimeout } from 'timers/promises' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { SandboxRepository } from '../repositories/sandbox.repository' import { getStateChangeLockKey } from '../utils/lock-key.util' import { BackupState } from '../enums/backup-state.enum' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { sanitizeSandboxError } from '../utils/sanitize-error.util' import { Sandbox } from '../entities/sandbox.entity' import { RunnerAdapterFactory } from '../runner-adapter/runnerAdapter' import { DockerRegistryService } from '../../docker-registry/services/docker-registry.service' import { OrganizationService } from '../../organization/services/organization.service' import { TypedConfigService } from '../../config/typed-config.service' import { BackupManager } from './backup.manager' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' @Injectable() export class SandboxManager implements TrackableJobExecutions, OnApplicationShutdown { activeJobs = new Set() private readonly logger = new Logger(SandboxManager.name) constructor( private readonly sandboxRepository: SandboxRepository, private readonly runnerService: RunnerService, private readonly redisLockProvider: RedisLockProvider, private readonly sandboxStartAction: SandboxStartAction, private readonly sandboxStopAction: SandboxStopAction, private readonly sandboxDestroyAction: SandboxDestroyAction, private readonly sandboxArchiveAction: SandboxArchiveAction, private readonly configService: TypedConfigService, private readonly dockerRegistryService: DockerRegistryService, private readonly organizationService: OrganizationService, private readonly runnerAdapterFactory: RunnerAdapterFactory, private readonly backupManager: BackupManager, @InjectRedis() private readonly redis: Redis, ) {} async onApplicationShutdown() { // wait for all active jobs to finish while (this.activeJobs.size > 0) { this.logger.log(`Waiting for ${this.activeJobs.size} active jobs to finish`) await setTimeout(1000) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'auto-stop-check' }) @TrackJobExecution() @WithInstrumentation() @LogExecution('auto-stop-check') @WithInstrumentation() async autostopCheck(): Promise { const lockKey = 'auto-stop-check-worker-selected' // lock the sync to only run one instance at a time if (!(await this.redisLockProvider.lock(lockKey, 60))) { return } try { const readyRunners = await this.runnerService.findAllReady() // Process all runners in parallel await Promise.all( readyRunners.map(async (runner) => { const sandboxes = await this.sandboxRepository.find({ where: { runnerId: runner.id, organizationId: Not(SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION), state: SandboxState.STARTED, desiredState: SandboxDesiredState.STARTED, pending: Not(true), autoStopInterval: Not(0), lastActivityAt: Raw((alias) => `${alias} < NOW() - INTERVAL '1 minute' * "autoStopInterval"`), }, order: { lastBackupAt: 'ASC', }, take: 100, }) await Promise.all( sandboxes.map(async (sandbox) => { const lockKey = getStateChangeLockKey(sandbox.id) const acquired = await this.redisLockProvider.lock(lockKey, 30) if (!acquired) { return } let updateData: Partial = {} // if auto-delete interval is 0, delete the sandbox immediately if (sandbox.autoDeleteInterval === 0) { updateData = Sandbox.getSoftDeleteUpdate(sandbox) } else { updateData.pending = true updateData.desiredState = SandboxDesiredState.STOPPED } try { await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: sandbox.state }, }) this.syncInstanceState(sandbox.id).catch(this.logger.error) } catch (error) { this.logger.error(`Error processing auto-stop state for sandbox ${sandbox.id}:`, error) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'auto-archive-check' }) @TrackJobExecution() @LogExecution('auto-archive-check') @WithInstrumentation() async autoArchiveCheck(): Promise { const lockKey = 'auto-archive-check-worker-selected' // lock the sync to only run one instance at a time if (!(await this.redisLockProvider.lock(lockKey, 60))) { return } try { const sandboxes = await this.sandboxRepository.find({ where: { organizationId: Not(SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION), state: SandboxState.STOPPED, desiredState: SandboxDesiredState.STOPPED, pending: Not(true), lastActivityAt: Raw((alias) => `${alias} < NOW() - INTERVAL '1 minute' * "autoArchiveInterval"`), }, order: { lastBackupAt: 'ASC', }, take: 100, }) await Promise.all( sandboxes.map(async (sandbox) => { const lockKey = getStateChangeLockKey(sandbox.id) const acquired = await this.redisLockProvider.lock(lockKey, 30) if (!acquired) { return } try { const updateData: Partial = { desiredState: SandboxDesiredState.ARCHIVED, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: sandbox.state }, }) this.syncInstanceState(sandbox.id).catch(this.logger.error) } catch (error) { this.logger.error(`Error processing auto-archive state for sandbox ${sandbox.id}:`, error) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'auto-delete-check' }) @TrackJobExecution() @LogExecution('auto-delete-check') @WithInstrumentation() async autoDeleteCheck(): Promise { const lockKey = 'auto-delete-check-worker-selected' // lock the sync to only run one instance at a time if (!(await this.redisLockProvider.lock(lockKey, 60))) { return } try { const readyRunners = await this.runnerService.findAllReady() // Process all runners in parallel await Promise.all( readyRunners.map(async (runner) => { const sandboxes = await this.sandboxRepository.find({ where: { runnerId: runner.id, organizationId: Not(SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION), state: SandboxState.STOPPED, desiredState: SandboxDesiredState.STOPPED, pending: Not(true), autoDeleteInterval: MoreThanOrEqual(0), lastActivityAt: Raw((alias) => `${alias} < NOW() - INTERVAL '1 minute' * "autoDeleteInterval"`), }, order: { lastActivityAt: 'ASC', }, take: 100, }) await Promise.all( sandboxes.map(async (sandbox) => { const lockKey = getStateChangeLockKey(sandbox.id) const acquired = await this.redisLockProvider.lock(lockKey, 30) if (!acquired) { return } try { const updateData = Sandbox.getSoftDeleteUpdate(sandbox) await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: sandbox.state }, }) this.syncInstanceState(sandbox.id).catch(this.logger.error) } catch (error) { this.logger.error(`Error processing auto-delete state for sandbox ${sandbox.id}:`, error) } finally { await this.redisLockProvider.unlock(lockKey) } }), ) }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'draining-runner-sandboxes-check' }) @TrackJobExecution() @LogExecution('draining-runner-sandboxes-check') @WithInstrumentation() async drainingRunnerSandboxesCheck(): Promise { const lockKey = 'draining-runner-sandboxes-check' const lockTtl = 10 * 60 // seconds (10 min) if (!(await this.redisLockProvider.lock(lockKey, lockTtl))) { return } try { const skip = (await this.redis.get('draining-runner-sandboxes-skip')) || 0 const drainingRunners = await this.runnerService.findDrainingPaginated(Number(skip), 10) this.logger.debug(`Checking ${drainingRunners.length} draining runners for sandbox migration (offset: ${skip})`) if (drainingRunners.length === 0) { await this.redis.set('draining-runner-sandboxes-skip', 0) return } await this.redis.set('draining-runner-sandboxes-skip', Number(skip) + drainingRunners.length) await Promise.allSettled( drainingRunners.map(async (runner) => { try { const sandboxes = await this.sandboxRepository.find({ where: { runnerId: runner.id, state: SandboxState.STOPPED, desiredState: SandboxDesiredState.STOPPED, backupState: BackupState.COMPLETED, backupSnapshot: Not(IsNull()), }, take: 100, }) this.logger.debug( `Found ${sandboxes.length} eligible sandboxes on draining runner ${runner.id} for migration`, ) await Promise.allSettled( sandboxes.map(async (sandbox) => { const sandboxLockKey = getStateChangeLockKey(sandbox.id) const hasSandboxLock = await this.redisLockProvider.lock(sandboxLockKey, 60) if (!hasSandboxLock) { return } try { const startScoreThreshold = this.configService.get('runnerScore.thresholds.start') || 0 const targetRunner = await this.runnerService.getRandomAvailableRunner({ snapshotRef: sandbox.backupSnapshot, excludedRunnerIds: [runner.id], availabilityScoreThreshold: startScoreThreshold, }) await this.reassignSandbox(sandbox, runner.id, targetRunner.id) } catch (e) { this.logger.error(`Error migrating sandbox ${sandbox.id} from draining runner ${runner.id}`, e) } finally { await this.redisLockProvider.unlock(sandboxLockKey) } }), ) // Archive ERROR sandboxes that have completed backups on this draining runner await this.archiveErroredSandboxesOnDrainingRunner(runner.id) // Recover recoverable ERROR sandboxes in-place (expand disk) so they become STOPPED await this.recoverRecoverableSandboxesOnDrainingRunner(runner.id) // Retry backups for non-started sandboxes with errored backup state await this.retryErroredBackupsOnDrainingRunner(runner.id) } catch (e) { this.logger.error(`Error processing draining runner ${runner.id} for sandbox migration`, e) } }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } private async archiveErroredSandboxesOnDrainingRunner(runnerId: string): Promise { const erroredSandboxes = await this.sandboxRepository.find({ where: { runnerId, state: SandboxState.ERROR, recoverable: false, desiredState: Not(In([SandboxDesiredState.DESTROYED, SandboxDesiredState.ARCHIVED])), backupState: BackupState.COMPLETED, backupSnapshot: Not(IsNull()), }, take: 100, }) if (erroredSandboxes.length === 0) { return } this.logger.debug( `Found ${erroredSandboxes.length} errored sandboxes with completed backups on draining runner ${runnerId}`, ) await Promise.allSettled( erroredSandboxes.map(async (sandbox) => { const sandboxLockKey = getStateChangeLockKey(sandbox.id) const acquired = await this.redisLockProvider.lock(sandboxLockKey, 30) if (!acquired) { return } try { this.logger.warn( `Setting desired state to ARCHIVED for errored sandbox ${sandbox.id} on draining runner ${runnerId} (previous desired state: ${sandbox.desiredState})`, ) const updateData: Partial = { desiredState: SandboxDesiredState.ARCHIVED, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { state: SandboxState.ERROR }, }) } catch (e) { this.logger.error( `Failed to set desired state to ARCHIVED for errored sandbox ${sandbox.id} on draining runner ${runnerId}`, e, ) } finally { await this.redisLockProvider.unlock(sandboxLockKey) } }), ) } private static readonly DRAINING_BACKUP_RETRY_TTL_SECONDS = 12 * 60 * 60 // 12 hours private static readonly DRAINING_RECOVER_TTL_SECONDS = 12 * 60 * 60 // 12 hours private async retryErroredBackupsOnDrainingRunner(runnerId: string): Promise { const erroredSandboxes = await this.sandboxRepository.find({ where: [ { runnerId, state: SandboxState.STOPPED, recoverable: false, desiredState: SandboxDesiredState.STOPPED, backupState: BackupState.ERROR, }, { runnerId, state: SandboxState.ERROR, recoverable: false, backupState: In([BackupState.ERROR, BackupState.NONE]), desiredState: Not(SandboxDesiredState.DESTROYED), }, ], take: 100, }) if (erroredSandboxes.length === 0) { return } this.logger.debug(`Found ${erroredSandboxes.length} sandboxes with errored backups on draining runner ${runnerId}`) await Promise.allSettled( erroredSandboxes.map(async (sandbox) => { const redisKey = `draining:backup-retry:${sandbox.id}` // Check if we've already retried within the last 12 hours const alreadyRetried = await this.redis.exists(redisKey) if (alreadyRetried) { this.logger.debug( `Skipping backup retry for sandbox ${sandbox.id} on draining runner ${runnerId} — already retried within 12 hours`, ) return } try { await this.backupManager.setBackupPending(sandbox) await this.redis.set(redisKey, '1', 'EX', SandboxManager.DRAINING_BACKUP_RETRY_TTL_SECONDS) this.logger.log(`Retried backup for sandbox ${sandbox.id} on draining runner ${runnerId}`) } catch (e) { this.logger.error(`Failed to retry backup for sandbox ${sandbox.id} on draining runner ${runnerId}`, e) } }), ) } private async recoverRecoverableSandboxesOnDrainingRunner(runnerId: string): Promise { const recoverableSandboxes = await this.sandboxRepository.find({ where: { runnerId, recoverable: true, desiredState: Not(In([SandboxDesiredState.DESTROYED])), backupSnapshot: Not(IsNull()), }, take: 100, }) if (recoverableSandboxes.length === 0) { return } this.logger.debug(`Found ${recoverableSandboxes.length} recoverable sandboxes on draining runner ${runnerId}`) const runner = await this.runnerService.findOneOrFail(runnerId) if (runner.apiVersion === '2') { this.logger.debug( `Skipping recovery for sandboxes on draining runner ${runnerId} — not supported for runner API v2`, ) return } const runnerAdapter = await this.runnerAdapterFactory.create(runner) await Promise.allSettled( recoverableSandboxes.map(async (sandbox) => { const redisKey = `draining:recover:${sandbox.id}` // Check if we've already attempted recovery within the last 12 hours const alreadyAttempted = await this.redis.exists(redisKey) if (alreadyAttempted) { this.logger.debug( `Skipping recovery for sandbox ${sandbox.id} on draining runner ${runnerId} — already attempted within 12 hours`, ) return } const sandboxLockKey = getStateChangeLockKey(sandbox.id) const acquired = await this.redisLockProvider.lock(sandboxLockKey, 60) if (!acquired) { return } try { await runnerAdapter.recoverSandbox(sandbox) const updateData: Partial = { state: SandboxState.STOPPED, desiredState: SandboxDesiredState.STOPPED, errorReason: null, recoverable: false, backupState: BackupState.NONE, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: sandbox.state }, }) this.logger.log(`Recovered sandbox ${sandbox.id} on draining runner ${runnerId}`) } catch (e) { await this.redis.set(redisKey, '1', 'EX', SandboxManager.DRAINING_RECOVER_TTL_SECONDS) this.logger.error(`Failed to recover sandbox ${sandbox.id} on draining runner ${runnerId}`, e) } finally { await this.redisLockProvider.unlock(sandboxLockKey) } }), ) } private async reassignSandbox(sandbox: Sandbox, oldRunnerId: string, newRunnerId: string): Promise { this.logger.debug( `Starting sandbox reassignment for ${sandbox.id} from runner ${oldRunnerId} to runner ${newRunnerId}`, ) // Safety check: ensure sandbox is not pending if (sandbox.pending) { this.logger.warn( `Sandbox ${sandbox.id} is pending, skipping reassignment from runner ${oldRunnerId} to runner ${newRunnerId}`, ) return } if (!sandbox.backupRegistryId) { throw new Error(`Sandbox ${sandbox.id} has no backup registry`) } const registry = await this.dockerRegistryService.findOne(sandbox.backupRegistryId) if (!registry) { throw new Error(`Registry ${sandbox.backupRegistryId} not found for sandbox ${sandbox.id}`) } const organization = await this.organizationService.findOne(sandbox.organizationId) const metadata = { ...organization?.sandboxMetadata, sandboxName: sandbox.name, } const newRunner = await this.runnerService.findOneOrFail(newRunnerId) const newRunnerAdapter = await this.runnerAdapterFactory.create(newRunner) const originalSnapshot = sandbox.snapshot sandbox.snapshot = sandbox.backupSnapshot try { // Pass undefined for entrypoint as the backup snapshot already has it baked in and use skipStart await newRunnerAdapter.createSandbox(sandbox, registry, undefined, metadata, undefined, true) this.logger.debug(`Created sandbox ${sandbox.id} on new runner ${newRunnerId} with skipStart`) } catch (e) { // Restore original snapshot on failure sandbox.snapshot = originalSnapshot this.logger.error(`Failed to create sandbox ${sandbox.id} on new runner ${newRunnerId}`, e) throw e } // Re-fetch sandbox from DB to get fresh state (the in-memory entity may be stale) const freshSandbox = await this.sandboxRepository.findOne({ where: { id: sandbox.id } }) if (!freshSandbox || freshSandbox.pending) { this.logger.warn( `Sandbox ${sandbox.id} is pending or missing, aborting reassignment from runner ${oldRunnerId} to runner ${newRunnerId}`, ) // Roll back: remove the sandbox from the new runner since we won't complete the migration try { await newRunnerAdapter.destroySandbox(sandbox.id) this.logger.debug(`Rolled back sandbox ${sandbox.id} creation on new runner ${newRunnerId}`) } catch (rollbackErr) { this.logger.error( `Failed to roll back sandbox ${sandbox.id} on new runner ${newRunnerId} after pending check`, rollbackErr, ) } return } // Update the sandbox to use the new runner; roll back on failure try { const updateData: Partial = { prevRunnerId: sandbox.runnerId, runnerId: newRunnerId, } await this.sandboxRepository.update( sandbox.id, { updateData, }, true, ) } catch (e) { this.logger.error(`Failed to update sandbox ${sandbox.id} runnerId to ${newRunnerId}, rolling back`, e) // Roll back: remove the sandbox from the new runner try { await newRunnerAdapter.destroySandbox(sandbox.id) this.logger.debug(`Rolled back sandbox ${sandbox.id} creation on new runner ${newRunnerId}`) } catch (rollbackErr) { this.logger.error( `Failed to roll back sandbox ${sandbox.id} on new runner ${newRunnerId} after DB update failure`, rollbackErr, ) } throw e } this.logger.log(`Migrated sandbox ${sandbox.id} from draining runner ${oldRunnerId} to runner ${newRunnerId}`) // Best effort deletion of the sandbox on the old runner try { const oldRunner = await this.runnerService.findOne(oldRunnerId) if (oldRunner) { const oldRunnerAdapter = await this.runnerAdapterFactory.create(oldRunner) await oldRunnerAdapter.destroySandbox(sandbox.id) this.logger.debug(`Deleted sandbox ${sandbox.id} from old runner ${oldRunnerId}`) } } catch (e) { this.logger.warn(`Best effort deletion failed for sandbox ${sandbox.id} on old runner ${oldRunnerId}`, e) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'sync-states' }) @TrackJobExecution() @WithInstrumentation() @LogExecution('sync-states') async syncStates(): Promise { const globalLockKey = 'sync-states' const lockTtl = 10 * 60 // seconds (10 min) if (!(await this.redisLockProvider.lock(globalLockKey, lockTtl))) { return } try { const queryBuilder = this.sandboxRepository .createQueryBuilder('sandbox') .select(['sandbox.id']) .where('sandbox.state NOT IN (:...excludedStates)', { excludedStates: [ SandboxState.DESTROYED, SandboxState.ERROR, SandboxState.BUILD_FAILED, SandboxState.RESIZING, ], }) .andWhere('sandbox."desiredState"::text != sandbox.state::text') .andWhere('sandbox."desiredState"::text != :archived', { archived: SandboxDesiredState.ARCHIVED }) .orderBy('sandbox."lastActivityAt"', 'DESC') const stream = await queryBuilder.stream() let processedCount = 0 const maxProcessPerRun = 1000 const pendingProcesses: Promise[] = [] try { await new Promise((resolve, reject) => { stream.on('data', async (row: any) => { if (processedCount >= maxProcessPerRun) { resolve() return } const lockKey = getStateChangeLockKey(row.sandbox_id) if (await this.redisLockProvider.isLocked(lockKey)) { // Sandbox is already being processed, skip it return } // Process sandbox asynchronously but track the promise const processPromise = this.syncInstanceState(row.sandbox_id) pendingProcesses.push(processPromise) processedCount++ // Limit concurrent processing to avoid overwhelming the system if (pendingProcesses.length >= 10) { stream.pause() Promise.allSettled(pendingProcesses.splice(0, pendingProcesses.length)) .then(() => stream.resume()) .catch(reject) } }) stream.on('end', () => { Promise.all(pendingProcesses) .then(() => { resolve() }) .catch(reject) }) stream.on('error', reject) }) } finally { if (!stream.destroyed) { stream.destroy() } } } finally { await this.redisLockProvider.unlock(globalLockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'sync-archived-desired-states' }) @TrackJobExecution() @LogExecution('sync-archived-desired-states') @WithInstrumentation() async syncArchivedDesiredStates(): Promise { const lockKey = 'sync-archived-desired-states' if (!(await this.redisLockProvider.lock(lockKey, 30))) { return } const sandboxes = await this.sandboxRepository.find({ where: { state: In([SandboxState.ARCHIVING, SandboxState.STOPPED, SandboxState.ERROR]), desiredState: SandboxDesiredState.ARCHIVED, }, take: 100, order: { updatedAt: 'ASC', }, }) await Promise.all( sandboxes.map(async (sandbox) => { this.syncInstanceState(sandbox.id) }), ) await this.redisLockProvider.unlock(lockKey) } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'sync-archived-completed-states' }) @TrackJobExecution() @LogExecution('sync-archived-completed-states') async syncArchivedCompletedStates(): Promise { const lockKey = 'sync-archived-completed-states' if (!(await this.redisLockProvider.lock(lockKey, 30))) { return } const sandboxes = await this.sandboxRepository.find({ where: { state: In([SandboxState.ARCHIVING, SandboxState.STOPPED, SandboxState.ERROR]), desiredState: SandboxDesiredState.ARCHIVED, backupState: BackupState.COMPLETED, }, take: 100, order: { updatedAt: 'ASC', }, }) await Promise.allSettled( sandboxes.map(async (sandbox) => { await this.syncInstanceState(sandbox.id) }), ) await this.redisLockProvider.unlock(lockKey) } /** * Sync the state of a sandbox. * * Loop to handle SYNC_AGAIN without releasing the lock or re-fetching. * The sandbox entity is mutated in-place by repository.update() on each iteration, * and the lock guarantees no concurrent modification. */ async syncInstanceState(sandboxId: string): Promise { // Track the start time of the sync operation. const startedAt = new Date() // Generate a random lock code to prevent race condition if sandbox action continues after the lock expires. const lockCode = new LockCode(randomUUID()) // Prevent syncState cron from running multiple instances of the same sandbox. const lockKey = getStateChangeLockKey(sandboxId) const acquired = await this.redisLockProvider.lock(lockKey, 30, lockCode) if (!acquired) { return } try { const sandbox = await this.sandboxRepository.findOneOrFail({ where: { id: sandboxId }, }) while (new Date().getTime() - startedAt.getTime() <= 10000) { if ( [SandboxState.DESTROYED, SandboxState.BUILD_FAILED, SandboxState.RESIZING].includes(sandbox.state) || (sandbox.state === SandboxState.ERROR && sandbox.desiredState !== SandboxDesiredState.ARCHIVED) ) { // Break sync loop if sandbox reaches a terminal state. // However, should allow ERROR → ARCHIVED transition (e.g., during runner draining). break } if (String(sandbox.state) === String(sandbox.desiredState)) { this.logger.warn( `Sandbox ${sandboxId} is already in the desired state ${sandbox.desiredState}, skipping sync`, ) // Break sync loop if sandbox is already in the desired state. break } // Rely on the sandbox action to return SYNC_AGAIN or DONT_SYNC_AGAIN to continue/break the sync loop. let syncState = DONT_SYNC_AGAIN try { switch (sandbox.desiredState) { case SandboxDesiredState.STARTED: { syncState = await this.sandboxStartAction.run(sandbox, lockCode) break } case SandboxDesiredState.STOPPED: { syncState = await this.sandboxStopAction.run(sandbox, lockCode) break } case SandboxDesiredState.DESTROYED: { syncState = await this.sandboxDestroyAction.run(sandbox, lockCode) break } case SandboxDesiredState.ARCHIVED: { syncState = await this.sandboxArchiveAction.run(sandbox, lockCode) break } } } catch (error) { this.logger.error(`Error processing desired state for sandbox ${sandboxId}:`, error) const { recoverable, errorReason } = sanitizeSandboxError(error) const updateData: Partial = { state: SandboxState.ERROR, errorReason, recoverable, } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) // Break sync loop since sandbox is in error state. break } // Do not sync again for v2 runners // Job completion will update the sandbox state if (sandbox.runnerId && (await this.runnerService.getRunnerApiVersion(sandbox.runnerId)) === '2') { break } // Break sync loop if sandbox action returned DONT_SYNC_AGAIN. if (syncState !== SYNC_AGAIN) { break } } } finally { await this.redisLockProvider.unlock(lockKey) } } @OnAsyncEvent({ event: SandboxEvents.ARCHIVED, }) @TrackJobExecution() @WithSpan() private async handleSandboxArchivedEvent(event: SandboxArchivedEvent) { await this.syncInstanceState(event.sandbox.id) } @OnAsyncEvent({ event: SandboxEvents.DESTROYED, }) @TrackJobExecution() @WithSpan() private async handleSandboxDestroyedEvent(event: SandboxDestroyedEvent) { await this.syncInstanceState(event.sandbox.id) } @OnAsyncEvent({ event: SandboxEvents.STARTED, }) @TrackJobExecution() @WithSpan() private async handleSandboxStartedEvent(event: SandboxStartedEvent) { await this.syncInstanceState(event.sandbox.id) } @OnAsyncEvent({ event: SandboxEvents.STOPPED, }) @TrackJobExecution() @WithSpan() private async handleSandboxStoppedEvent(event: SandboxStoppedEvent) { await this.syncInstanceState(event.sandbox.id) } @OnAsyncEvent({ event: SandboxEvents.CREATED, }) @TrackJobExecution() @WithSpan() private async handleSandboxCreatedEvent(event: SandboxCreatedEvent) { await this.syncInstanceState(event.sandbox.id) } } ================================================ FILE: apps/api/src/sandbox/managers/snapshot.manager.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, NotFoundException, OnApplicationShutdown } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Cron, CronExpression } from '@nestjs/schedule' import { In, IsNull, LessThan, Not, Repository } from 'typeorm' import { DockerRegistryService } from '../../docker-registry/services/docker-registry.service' import { Snapshot } from '../entities/snapshot.entity' import { SnapshotState } from '../enums/snapshot-state.enum' import { SnapshotRunner } from '../entities/snapshot-runner.entity' import { Runner } from '../entities/runner.entity' import { DockerRegistry } from '../../docker-registry/entities/docker-registry.entity' import { RunnerState } from '../enums/runner-state.enum' import { SnapshotRunnerState } from '../enums/snapshot-runner-state.enum' import { v4 as uuidv4 } from 'uuid' import { RunnerNotReadyError } from '../errors/runner-not-ready.error' import { RedisLockProvider } from '../common/redis-lock.provider' import { OrganizationService } from '../../organization/services/organization.service' import { BuildInfo } from '../entities/build-info.entity' import { fromAxiosError } from '../../common/utils/from-axios-error' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { RunnerService } from '../services/runner.service' import { TrackableJobExecutions } from '../../common/interfaces/trackable-job-executions' import { TrackJobExecution } from '../../common/decorators/track-job-execution.decorator' import { setTimeout as sleep } from 'timers/promises' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { RunnerAdapterFactory } from '../runner-adapter/runnerAdapter' import { SnapshotStateError } from '../errors/snapshot-state-error' import { SnapshotEvents } from '../constants/snapshot-events' import { SnapshotCreatedEvent } from '../events/snapshot-created.event' import { SnapshotService } from '../services/snapshot.service' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { parseDockerImage } from '../../common/utils/docker-image.util' import { SandboxState } from '../enums/sandbox-state.enum' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { BackupState } from '../enums/backup-state.enum' import { BadRequestError } from '../../exceptions/bad-request.exception' import { SandboxRepository } from '../repositories/sandbox.repository' import { SnapshotInfoResponse } from '@daytonaio/runner-api-client' import { SnapshotActivatedEvent } from '../events/snapshot-activated.event' const SYNC_AGAIN = 'sync-again' const DONT_SYNC_AGAIN = 'dont-sync-again' const DEFAULT_SNAPSHOT_DEACTIVATION_TIMEOUT_MINUTES = 14 * 24 * 60 // 14 days type SyncState = typeof SYNC_AGAIN | typeof DONT_SYNC_AGAIN @Injectable() export class SnapshotManager implements TrackableJobExecutions, OnApplicationShutdown { activeJobs = new Set() private readonly logger = new Logger(SnapshotManager.name) // generate a unique instance id used to ensure only one instance of the worker is handing the // snapshot activation private readonly instanceId = uuidv4() constructor( @InjectRedis() private readonly redis: Redis, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRepository(SnapshotRunner) private readonly snapshotRunnerRepository: Repository, @InjectRepository(Runner) private readonly runnerRepository: Repository, private readonly sandboxRepository: SandboxRepository, @InjectRepository(BuildInfo) private readonly buildInfoRepository: Repository, private readonly runnerService: RunnerService, private readonly dockerRegistryService: DockerRegistryService, private readonly runnerAdapterFactory: RunnerAdapterFactory, private readonly redisLockProvider: RedisLockProvider, private readonly organizationService: OrganizationService, private readonly snapshotService: SnapshotService, ) {} async onApplicationShutdown() { // wait for all active jobs to finish while (this.activeJobs.size > 0) { this.logger.log(`Waiting for ${this.activeJobs.size} active jobs to finish`) await sleep(1000) } } @Cron(CronExpression.EVERY_5_SECONDS, { name: 'sync-runner-snapshots', waitForCompletion: true }) @TrackJobExecution() @LogExecution('sync-runner-snapshots') @WithInstrumentation() async syncRunnerSnapshots() { const lockKey = 'sync-runner-snapshots-lock' const lockTtl = 10 * 60 // seconds (10 min) if (!(await this.redisLockProvider.lock(lockKey, lockTtl))) { return } const skip = (await this.redis.get('sync-runner-snapshots-skip')) || 0 const snapshots = await this.snapshotRepository .createQueryBuilder('snapshot') .innerJoin('organization', 'org', 'org.id = snapshot.organizationId') .where('snapshot.state = :snapshotState', { snapshotState: SnapshotState.ACTIVE }) .andWhere('org.suspended = false') .orderBy('snapshot.createdAt', 'ASC') .take(100) .skip(Number(skip)) .getMany() if (snapshots.length === 0) { await this.redisLockProvider.unlock(lockKey) await this.redis.set('sync-runner-snapshots-skip', 0) return } await this.redis.set('sync-runner-snapshots-skip', Number(skip) + snapshots.length) const results = await Promise.allSettled( snapshots.map(async (snapshot) => { const regions = await this.snapshotService.getSnapshotRegions(snapshot.id) const sharedRegionIds = regions.filter((r) => r.organizationId === null).map((r) => r.id) const organizationRegionIds = regions .filter((r) => r.organizationId === snapshot.organizationId) .map((r) => r.id) return this.propagateSnapshotToRunners(snapshot, sharedRegionIds, organizationRegionIds) }), ) // Log all promise errors results.forEach((result) => { if (result.status === 'rejected') { this.logger.error(`Error propagating snapshot to runners: ${fromAxiosError(result.reason)}`) } }) await this.redisLockProvider.unlock(lockKey) } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'sync-runner-snapshot-states', waitForCompletion: true }) @TrackJobExecution() @LogExecution('sync-runner-snapshot-states') @WithInstrumentation() async syncRunnerSnapshotStates() { // this approach is not ideal, as if the number of runners is large, this will take a long time // also, if some snapshots stuck in a "pulling" state, they will infest the queue // todo: find a better approach const lockKey = 'sync-runner-snapshot-states-lock' if (!(await this.redisLockProvider.lock(lockKey, 30))) { return } const runnerSnapshots = await this.snapshotRunnerRepository .createQueryBuilder('snapshotRunner') .where({ state: In([ SnapshotRunnerState.PULLING_SNAPSHOT, SnapshotRunnerState.BUILDING_SNAPSHOT, SnapshotRunnerState.REMOVING, ]), }) .orderBy('RANDOM()') .take(100) .getMany() await Promise.allSettled( runnerSnapshots.map((snapshotRunner) => { return this.syncRunnerSnapshotState(snapshotRunner).catch((err) => { if (err.code !== 'ECONNRESET') { if (err instanceof RunnerNotReadyError) { this.logger.debug( `Runner ${snapshotRunner.runnerId} is not ready while trying to sync snapshot runner ${snapshotRunner.id}: ${err}`, ) return } this.logger.error(`Error syncing runner snapshot state ${snapshotRunner.id}: ${fromAxiosError(err)}`) this.snapshotRunnerRepository.update(snapshotRunner.id, { state: SnapshotRunnerState.ERROR, errorReason: fromAxiosError(err).message, }) } }) }), ) await this.redisLockProvider.unlock(lockKey) } async syncRunnerSnapshotState(snapshotRunner: SnapshotRunner): Promise { const runner = await this.runnerService.findOne(snapshotRunner.runnerId) if (!runner) { // cleanup the snapshot runner record if the runner is not found // this can happen if the runner is deleted from the database without cleaning up the snapshot runners await this.snapshotRunnerRepository.delete(snapshotRunner.id) this.logger.warn( `Runner ${snapshotRunner.runnerId} not found while trying to process snapshot runner ${snapshotRunner.id}. Snapshot runner has been removed.`, ) return } if (runner.state !== RunnerState.READY) { // todo: handle timeout policy // for now just remove the snapshot runner record if the runner is not ready await this.snapshotRunnerRepository.delete(snapshotRunner.id) throw new RunnerNotReadyError(`Runner ${runner.id} is not ready`) } switch (snapshotRunner.state) { case SnapshotRunnerState.PULLING_SNAPSHOT: await this.handleSnapshotRunnerStatePullingSnapshot(snapshotRunner, runner) break case SnapshotRunnerState.BUILDING_SNAPSHOT: await this.handleSnapshotRunnerStateBuildingSnapshot(snapshotRunner, runner) break case SnapshotRunnerState.REMOVING: await this.handleSnapshotRunnerStateRemoving(snapshotRunner, runner) break } } async propagateSnapshotToRunners(snapshot: Snapshot, sharedRegionIds: string[], organizationRegionIds: string[]) { // todo: remove try catch block and implement error handling try { // get all runners in the regions to propagate to const runners = await this.runnerRepository.find({ where: { state: RunnerState.READY, unschedulable: Not(true), region: In([...sharedRegionIds, ...organizationRegionIds]), }, }) const sharedRunners = runners.filter((runner) => sharedRegionIds.includes(runner.region)) const sharedRunnerIds = sharedRunners.map((runner) => runner.id) const organizationRunners = runners.filter((runner) => organizationRegionIds.includes(runner.region)) const organizationRunnerIds = organizationRunners.map((runner) => runner.id) // get all runners where the snapshot is already propagated to (or in progress) const sharedSnapshotRunners = await this.snapshotRunnerRepository.find({ where: { snapshotRef: snapshot.ref, state: In([SnapshotRunnerState.READY, SnapshotRunnerState.PULLING_SNAPSHOT]), runnerId: In(sharedRunnerIds), }, }) const sharedSnapshotRunnersDistinctRunnersIds = new Set( sharedSnapshotRunners.map((snapshotRunner) => snapshotRunner.runnerId), ) const organizationSnapshotRunners = await this.snapshotRunnerRepository.find({ where: { snapshotRef: snapshot.ref, state: In([SnapshotRunnerState.READY, SnapshotRunnerState.PULLING_SNAPSHOT]), runnerId: In(organizationRunnerIds), }, }) const organizationSnapshotRunnersDistinctRunnersIds = new Set( organizationSnapshotRunners.map((snapshotRunner) => snapshotRunner.runnerId), ) // get all runners where the snapshot is not propagated to const unallocatedSharedRunners = sharedRunners.filter( (runner) => !sharedSnapshotRunnersDistinctRunnersIds.has(runner.id), ) const unallocatedOrganizationRunners = organizationRunners.filter( (runner) => !organizationSnapshotRunnersDistinctRunnersIds.has(runner.id), ) const runnersToPropagateTo: Runner[] = [] // propagate the snapshot to all organization runners runnersToPropagateTo.push(...unallocatedOrganizationRunners) // respect the propagation limit for shared runners const sharedRunnersPropagateLimit = Math.max( 0, Math.ceil(sharedRunners.length / 3) - sharedSnapshotRunnersDistinctRunnersIds.size, ) runnersToPropagateTo.push( ...unallocatedSharedRunners.sort(() => Math.random() - 0.5).slice(0, sharedRunnersPropagateLimit), ) if (runnersToPropagateTo.length === 0) { return } // regionId -> registry const internalRegistriesMap = new Map() for (const regionId of [...sharedRegionIds, ...organizationRegionIds]) { const registry = await this.dockerRegistryService.findInternalRegistryBySnapshotRef(snapshot.ref, regionId) if (registry) { internalRegistriesMap.set(regionId, registry) } } const results = await Promise.allSettled( runnersToPropagateTo.map(async (runner) => { const internalRegistry = internalRegistriesMap.get(runner.region) if (!internalRegistry) { throw new Error(`No internal registry found for snapshot ${snapshot.ref} in region ${runner.region}`) } const snapshotRunner = await this.runnerService.getSnapshotRunner(runner.id, snapshot.ref) try { if (!snapshotRunner) { await this.runnerService.createSnapshotRunnerEntry( runner.id, snapshot.ref, SnapshotRunnerState.PULLING_SNAPSHOT, ) await this.pullSnapshotRunner(runner, snapshot.ref, internalRegistry) } else if (snapshotRunner.state === SnapshotRunnerState.PULLING_SNAPSHOT) { await this.handleSnapshotRunnerStatePullingSnapshot(snapshotRunner, runner) } } catch (err) { this.logger.error(`Error propagating snapshot to runner ${runner.id}: ${fromAxiosError(err)}`) snapshotRunner.state = SnapshotRunnerState.ERROR snapshotRunner.errorReason = err.message await this.snapshotRunnerRepository.update(snapshotRunner.id, snapshotRunner) } }), ) results.forEach((result) => { if (result.status === 'rejected') { this.logger.error(result.reason) } }) } catch (err) { this.logger.error(err) } } async pullSnapshotRunner( runner: Runner, snapshotRef: string, registry?: DockerRegistry, destinationRegistry?: DockerRegistry, destinationRef?: string, ) { const runnerAdapter = await this.runnerAdapterFactory.create(runner) // Runner returns immediately; polling for completion is handled by syncRunnerSnapshotStates cron await runnerAdapter.pullSnapshot(snapshotRef, registry, destinationRegistry, destinationRef) } async handleSnapshotRunnerStatePullingSnapshot(snapshotRunner: SnapshotRunner, runner: Runner) { const runnerAdapter = await this.runnerAdapterFactory.create(runner) try { await runnerAdapter.getSnapshotInfo(snapshotRunner.snapshotRef) snapshotRunner.state = SnapshotRunnerState.READY await this.snapshotRunnerRepository.save(snapshotRunner) return } catch (err) { if (err instanceof SnapshotStateError) { snapshotRunner.state = SnapshotRunnerState.ERROR snapshotRunner.errorReason = err.errorReason await this.snapshotRunnerRepository.save(snapshotRunner) return } } const timeoutMinutes = 60 const timeoutMs = timeoutMinutes * 60 * 1000 if (Date.now() - snapshotRunner.updatedAt.getTime() > timeoutMs) { snapshotRunner.state = SnapshotRunnerState.ERROR snapshotRunner.errorReason = 'Timeout while pulling snapshot to runner' await this.snapshotRunnerRepository.save(snapshotRunner) return } const retryTimeoutMinutes = 10 const retryTimeoutMs = retryTimeoutMinutes * 60 * 1000 if (Date.now() - snapshotRunner.createdAt.getTime() > retryTimeoutMs) { const internalRegistry = await this.dockerRegistryService.findInternalRegistryBySnapshotRef( snapshotRunner.snapshotRef, runner.region, ) if (!internalRegistry) { throw new Error( `No internal registry found for snapshot ${snapshotRunner.snapshotRef} in region ${runner.region}`, ) } await this.pullSnapshotRunner(runner, snapshotRunner.snapshotRef, internalRegistry) return } } async handleSnapshotRunnerStateBuildingSnapshot(snapshotRunner: SnapshotRunner, runner: Runner) { const runnerAdapter = await this.runnerAdapterFactory.create(runner) try { await runnerAdapter.getSnapshotInfo(snapshotRunner.snapshotRef) snapshotRunner.state = SnapshotRunnerState.READY await this.snapshotRunnerRepository.save(snapshotRunner) return } catch (err) { if (err instanceof SnapshotStateError) { snapshotRunner.state = SnapshotRunnerState.ERROR snapshotRunner.errorReason = err.errorReason await this.snapshotRunnerRepository.save(snapshotRunner) return } } } // Pulls stopped sandboxes' backup snapshots to another runner to prepare for reassignment during draining @Cron(CronExpression.EVERY_10_SECONDS, { name: 'migrate-draining-runner-snapshots', waitForCompletion: true }) @TrackJobExecution() @LogExecution('migrate-draining-runner-snapshots') @WithInstrumentation() private async handleMigrateDrainingRunnerSnapshots() { const lockKey = 'migrate-draining-runner-snapshots' const hasLock = await this.redisLockProvider.lock(lockKey, 60) if (!hasLock) { return } try { const drainingRunners = await this.runnerRepository.find({ where: { draining: true, state: RunnerState.READY, }, }) this.logger.debug(`Checking ${drainingRunners.length} draining runners for snapshot migration`) await Promise.allSettled( drainingRunners.map(async (runner) => { try { const sandboxes = await this.sandboxRepository.find({ where: { runnerId: runner.id, state: SandboxState.STOPPED, desiredState: SandboxDesiredState.STOPPED, backupState: BackupState.COMPLETED, backupSnapshot: Not(IsNull()), }, take: 100, }) this.logger.debug( `Found ${sandboxes.length} eligible sandboxes on draining runner ${runner.id} for snapshot migration`, ) await Promise.allSettled( sandboxes.map(async (sandbox) => { const sandboxLockKey = `draining-runner-snapshot-migration:${sandbox.id}` const hasSandboxLock = await this.redisLockProvider.lock(sandboxLockKey, 3600) if (!hasSandboxLock) { return } try { // Get an available runner in the same region with the same class const targetRunner = await this.runnerService.getRandomAvailableRunner({ regions: [sandbox.region], sandboxClass: sandbox.class, excludedRunnerIds: [runner.id], }) // Check if snapshot runner entry already exists const existingEntry = await this.runnerService.getSnapshotRunner( targetRunner.id, sandbox.backupSnapshot, ) if (existingEntry) { if (existingEntry.state === SnapshotRunnerState.ERROR) { // Clean up the failed entry so we can retry this.logger.warn( `Removing ERROR snapshot runner entry ${existingEntry.id} for runner ${targetRunner.id} and snapshot ${sandbox.backupSnapshot} to allow retry`, ) await this.snapshotRunnerRepository.delete(existingEntry.id) } else { this.logger.debug( `Snapshot runner entry already exists for runner ${targetRunner.id} and snapshot ${sandbox.backupSnapshot} (state: ${existingEntry.state})`, ) // Do not unlock to avoid duplicates return } } // Find the backup registry to use as source for the pull const registry = sandbox.backupRegistryId ? await this.dockerRegistryService.findOne(sandbox.backupRegistryId) : await this.dockerRegistryService.findInternalRegistryBySnapshotRef( sandbox.backupSnapshot, targetRunner.region, ) if (!registry) { this.logger.warn( `No registry found for backup snapshot ${sandbox.backupSnapshot} of sandbox ${sandbox.id}`, ) await this.redisLockProvider.unlock(sandboxLockKey) return } // Create snapshot runner entry on the target runner await this.runnerService.createSnapshotRunnerEntry( targetRunner.id, sandbox.backupSnapshot, SnapshotRunnerState.PULLING_SNAPSHOT, ) await this.pullSnapshotRunner(targetRunner, sandbox.backupSnapshot, registry) this.logger.log( `Created snapshot runner entry for sandbox ${sandbox.id} backup ${sandbox.backupSnapshot} on runner ${targetRunner.id} (migrating from draining runner ${runner.id})`, ) await this.redisLockProvider.unlock(sandboxLockKey) } catch (e) { if (e instanceof BadRequestError && e.message === 'No available runners') { this.logger.warn( `No available runners found in region ${sandbox.region} for sandbox ${sandbox.id} snapshot migration`, ) } else { this.logger.error(`Error migrating snapshot for sandbox ${sandbox.id}`, e) } await this.redisLockProvider.unlock(sandboxLockKey) } }), ) } catch (e) { this.logger.error(`Error processing draining runner ${runner.id} for snapshot migration`, e) } }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'check-snapshot-cleanup' }) @TrackJobExecution() @LogExecution('check-snapshot-cleanup') @WithInstrumentation() async checkSnapshotCleanup() { const lockKey = 'check-snapshot-cleanup-lock' if (!(await this.redisLockProvider.lock(lockKey, 30))) { return } const snapshots = await this.snapshotRepository.find({ where: { state: SnapshotState.REMOVING, }, }) await Promise.all( snapshots.map(async (snapshot) => { const countActiveSnapshots = await this.snapshotRepository.count({ where: { state: SnapshotState.ACTIVE, ref: snapshot.ref, }, }) // Only remove snapshot runners if no other snapshots depend on them if (countActiveSnapshots === 0) { await this.snapshotRunnerRepository.update( { snapshotRef: snapshot.ref, }, { state: SnapshotRunnerState.REMOVING, }, ) } await this.snapshotRepository.remove(snapshot) }), ) await this.redisLockProvider.unlock(lockKey) } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'check-snapshot-state' }) @TrackJobExecution() @LogExecution('check-snapshot-state') @WithInstrumentation() async checkSnapshotState() { // the first time the snapshot is created it needs to be pushed to the internal registry // before propagating to the runners // this cron job will process the snapshot states until the snapshot is active (or error) // get all snapshots const snapshots = await this.snapshotRepository.find({ where: { state: Not(In([SnapshotState.ACTIVE, SnapshotState.ERROR, SnapshotState.BUILD_FAILED, SnapshotState.INACTIVE])), }, }) await Promise.all( snapshots.map(async (snapshot) => { this.syncSnapshotState(snapshot.id) }), ) } async syncSnapshotState(snapshotId: string): Promise { const lockKey = `sync-snapshot-state-${snapshotId}` if (!(await this.redisLockProvider.lock(lockKey, 720))) { return } const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId }, }) if ( !snapshot || [SnapshotState.ACTIVE, SnapshotState.ERROR, SnapshotState.BUILD_FAILED, SnapshotState.INACTIVE].includes( snapshot.state, ) ) { await this.redisLockProvider.unlock(lockKey) return } let syncState = DONT_SYNC_AGAIN try { switch (snapshot.state) { case SnapshotState.PENDING: syncState = await this.handleSnapshotStatePending(snapshot) break case SnapshotState.PULLING: case SnapshotState.BUILDING: syncState = await this.handleCheckInitialRunnerSnapshot(snapshot) break case SnapshotState.REMOVING: syncState = await this.handleSnapshotStateRemoving(snapshot) break } } catch (error) { if (error.code === 'ECONNRESET') { syncState = SYNC_AGAIN } else { const message = error.message || String(error) await this.updateSnapshotState(snapshot.id, SnapshotState.ERROR, message) } } await this.redisLockProvider.unlock(lockKey) if (syncState === SYNC_AGAIN) { this.syncSnapshotState(snapshotId) } } async handleSnapshotRunnerStateRemoving(snapshotRunner: SnapshotRunner, runner: Runner) { if (!runner) { // generally this should not happen // in case the runner has been deleted from the database, delete the snapshot runner record const errorMessage = `Runner not found while trying to remove snapshot ${snapshotRunner.snapshotRef} from runner ${snapshotRunner.runnerId}` this.logger.warn(errorMessage) this.snapshotRunnerRepository.delete(snapshotRunner.id).catch((err) => { this.logger.error(fromAxiosError(err)) }) return } if (!snapshotRunner.snapshotRef) { // this should never happen // remove the snapshot runner record (it will be recreated again by the snapshot propagation job) this.logger.warn(`Internal snapshot name not found for snapshot runner ${snapshotRunner.id}`) this.snapshotRunnerRepository.delete(snapshotRunner.id).catch((err) => { this.logger.error(fromAxiosError(err)) }) return } const runnerAdapter = await this.runnerAdapterFactory.create(runner) const exists = await runnerAdapter.snapshotExists(snapshotRunner.snapshotRef) if (!exists) { await this.snapshotRunnerRepository.delete(snapshotRunner.id) } else { // just in case the snapshot is still there runnerAdapter.removeSnapshot(snapshotRunner.snapshotRef).catch((err) => { // this should not happen, and is not critical // if the runner can not remove the snapshot, just delete the snapshot runner record this.snapshotRunnerRepository.delete(snapshotRunner.id).catch((err) => { this.logger.error(fromAxiosError(err)) }) // and log the error for tracking const errorMessage = `Failed to do just in case remove snapshot ${snapshotRunner.snapshotRef} from runner ${runner.id}: ${fromAxiosError(err)}` this.logger.warn(errorMessage) }) } } async handleSnapshotStateRemoving(snapshot: Snapshot): Promise { const snapshotRunnerItems = await this.snapshotRunnerRepository.find({ where: { snapshotRef: snapshot.ref, }, }) if (snapshotRunnerItems.length === 0) { await this.snapshotRepository.remove(snapshot) } return DONT_SYNC_AGAIN } async handleCheckInitialRunnerSnapshot(snapshot: Snapshot): Promise { // Check for timeout - allow up to 30 minutes const timeoutMinutes = 30 const timeoutMs = timeoutMinutes * 60 * 1000 if (Date.now() - snapshot.updatedAt.getTime() > timeoutMs) { await this.updateSnapshotState(snapshot.id, SnapshotState.ERROR, 'Timeout processing snapshot on initial runner') return DONT_SYNC_AGAIN } // Check if the snapshot ref is already set and it is already on the runner const snapshotRunner = await this.snapshotRunnerRepository.findOne({ where: { snapshotRef: snapshot.ref, runnerId: snapshot.initialRunnerId, }, }) if (snapshot.ref && snapshotRunner) { if (snapshotRunner.state === SnapshotRunnerState.READY) { await this.updateSnapshotState(snapshot.id, SnapshotState.ACTIVE) return DONT_SYNC_AGAIN } else if (snapshotRunner.state === SnapshotRunnerState.ERROR) { await this.snapshotRunnerRepository.delete(snapshotRunner.id) } } const runner = await this.runnerService.findOneOrFail(snapshot.initialRunnerId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) const initialImageRefOnRunner = snapshot.buildInfo ? snapshot.buildInfo.snapshotRef : snapshot.ref let snapshotInfoResponse: SnapshotInfoResponse try { snapshotInfoResponse = await runnerAdapter.getSnapshotInfo(initialImageRefOnRunner) } catch (error) { if (error instanceof SnapshotStateError) { throw error } else { return DONT_SYNC_AGAIN } } const internalRegistry = await this.dockerRegistryService.getAvailableInternalRegistry(runner.region) if (!internalRegistry) { throw new Error('No internal registry found for snapshot') } await this.processSnapshotDigest( snapshot, internalRegistry, snapshotInfoResponse.hash, snapshotInfoResponse.sizeGB, snapshotInfoResponse.entrypoint, ) try { await runnerAdapter.inspectSnapshotInRegistry(snapshot.ref, internalRegistry) } catch (error) { this.logger.error(`Failed to inspect snapshot ${snapshot.ref} in registry: ${error}`) await this.snapshotRepository.save(snapshot) return DONT_SYNC_AGAIN } try { await runnerAdapter.removeSnapshot(initialImageRefOnRunner) } catch (error) { this.logger.error(`Failed to remove snapshot ${snapshot.imageName}: ${fromAxiosError(error)}`) } // For pull snapshots, best effort cleanup the original image now that we've computed the ref from it // Only cleanup if there's no other snapshot in processing state using the same image if (!snapshot.buildInfo) { try { const anotherSnapshot = await this.snapshotRepository.findOne({ where: { imageName: snapshot.imageName, id: Not(snapshot.id), state: Not(In([SnapshotState.ACTIVE, SnapshotState.INACTIVE])), }, }) if (!anotherSnapshot) { await runnerAdapter.removeSnapshot(snapshot.imageName) } } catch (err) { this.logger.error(`Failed to cleanup original image ${snapshot.imageName}: ${fromAxiosError(err)}`) } } if (snapshotRunner) { snapshotRunner.state = SnapshotRunnerState.READY await this.snapshotRunnerRepository.save(snapshotRunner) } else { await this.runnerService.createSnapshotRunnerEntry(runner.id, snapshot.ref, SnapshotRunnerState.READY) } await this.updateSnapshotState(snapshot.id, SnapshotState.ACTIVE) // Best effort removal of old snapshot from transient registry const transientRegistry = await this.dockerRegistryService.findTransientRegistryBySnapshotImageName( snapshot.imageName, runner.region, ) if (transientRegistry) { try { await this.dockerRegistryService.removeImage(snapshot.imageName, transientRegistry.id) } catch (error) { if (error.statusCode === 404) { // image not found, just return return DONT_SYNC_AGAIN } this.logger.error('Failed to remove transient image:', fromAxiosError(error)) } } return DONT_SYNC_AGAIN } async processPullOnInitialRunner(snapshot: Snapshot, runner: Runner) { // Check for timeout - allow up to 30 minutes const timeoutMinutes = 30 const timeoutMs = timeoutMinutes * 60 * 1000 if (Date.now() - snapshot.updatedAt.getTime() > timeoutMs) { await this.updateSnapshotState( snapshot.id, SnapshotState.ERROR, 'Timeout processing snapshot pull on initial runner', ) return DONT_SYNC_AGAIN } let sourceRegistry = await this.dockerRegistryService.findSourceRegistryBySnapshotImageName( snapshot.imageName, runner.region, snapshot.organizationId, ) if (!sourceRegistry) { sourceRegistry = await this.dockerRegistryService.getDefaultDockerHubRegistry() } const destinationRegistry = await this.dockerRegistryService.getAvailableInternalRegistry(runner.region) // Fire pull request (runner returns 202 immediately) // Post-processing (digest, cleanup) is handled by handleCheckInitialRunnerSnapshot on the next poll cycle try { await this.pullSnapshotRunner( runner, snapshot.imageName, sourceRegistry, destinationRegistry ?? undefined, snapshot.ref ? snapshot.ref : undefined, ) } catch (err) { // Validation errors are still returned synchronously await this.updateSnapshotState(snapshot.id, SnapshotState.ERROR, err.message) throw err } } async processBuildOnRunner(snapshot: Snapshot, runner: Runner) { try { const registry = await this.dockerRegistryService.getAvailableInternalRegistry(runner.region) const sourceRegistries = await this.dockerRegistryService.getSourceRegistriesForDockerfile( snapshot.buildInfo.dockerfileContent, snapshot.organizationId, ) const runnerAdapter = await this.runnerAdapterFactory.create(runner) registry.url = registry.url.replace(/^(https?:\/\/)/, '') // Runner returns immediately; polling for completion is handled by handleCheckInitialRunnerSnapshot await runnerAdapter.buildSnapshot( snapshot.buildInfo, snapshot.organizationId, sourceRegistries.length > 0 ? sourceRegistries : undefined, registry ?? undefined, true, ) } catch (err) { this.logger.error(`Error building snapshot ${snapshot.name}: ${fromAxiosError(err)}`) await this.updateSnapshotState(snapshot.id, SnapshotState.BUILD_FAILED, fromAxiosError(err).message) } } async handleSnapshotStatePending(snapshot: Snapshot): Promise { let initialRunner: Runner | undefined = undefined if (!snapshot.initialRunnerId) { // TODO: get only runners where the base snapshot is available (extract from buildInfo) const excludedRunnerIds = snapshot.buildInfo ? await this.runnerService.getRunnersWithMultipleSnapshotsBuilding() : await this.runnerService.getRunnersWithMultipleSnapshotsPulling() try { const regions = await this.snapshotService.getSnapshotRegions(snapshot.id) if (!regions.length) { throw new Error('No regions found for snapshot') } initialRunner = await this.runnerService.getRandomAvailableRunner({ regions: regions.map((region) => region.id), excludedRunnerIds: excludedRunnerIds, }) } catch (error) { this.logger.warn(`Failed to get initial runner: ${fromAxiosError(error)}`) } if (!initialRunner) { // No runners available, retry later return DONT_SYNC_AGAIN } snapshot.initialRunnerId = initialRunner.id await this.snapshotRepository.save(snapshot) } else { initialRunner = await this.runnerService.findOneOrFail(snapshot.initialRunnerId) } if (snapshot.buildInfo) { await this.updateSnapshotState(snapshot.id, SnapshotState.BUILDING) await this.runnerService.createSnapshotRunnerEntry( initialRunner.id, snapshot.buildInfo.snapshotRef, SnapshotRunnerState.BUILDING_SNAPSHOT, ) await this.processBuildOnRunner(snapshot, initialRunner) } else { if (!snapshot.ref) { const runnerAdapter = await this.runnerAdapterFactory.create(initialRunner) const registry = await this.dockerRegistryService.findRegistryByImageName( snapshot.imageName, initialRunner.region, snapshot.organizationId, ) const image = parseDockerImage(snapshot.imageName) if (registry && !image.registry) { image.registry = registry.url.replace(/^(https?:\/\/)/, '') } const imageName = image.getFullName() const internalRegistry = await this.dockerRegistryService.getAvailableInternalRegistry(initialRunner.region) if (!internalRegistry) { throw new Error('No internal registry found for snapshot') } const snapshotDigestResponse = await runnerAdapter.inspectSnapshotInRegistry(imageName, registry) await this.processSnapshotDigest( snapshot, internalRegistry, snapshotDigestResponse.hash, snapshotDigestResponse.sizeGB, ) await this.snapshotRepository.save(snapshot) } await this.updateSnapshotState(snapshot.id, SnapshotState.PULLING) await this.runnerService.createSnapshotRunnerEntry( initialRunner.id, snapshot.ref, SnapshotRunnerState.PULLING_SNAPSHOT, ) await this.processPullOnInitialRunner(snapshot, initialRunner) } return SYNC_AGAIN } private async updateSnapshotState(snapshotId: string, state: SnapshotState, errorReason?: string) { const partialUpdate: Partial = { state, } if (state === SnapshotState.ACTIVE) { partialUpdate.lastUsedAt = new Date() } if (errorReason !== undefined) { partialUpdate.errorReason = errorReason } const result = await this.snapshotRepository.update( { id: snapshotId, }, partialUpdate, ) if (!result.affected) { throw new NotFoundException(`Snapshot with ID ${snapshotId} not found`) } } @Cron(CronExpression.EVERY_HOUR, { name: 'cleanup-old-buildinfo-snapshot-runners' }) @TrackJobExecution() @LogExecution('cleanup-old-buildinfo-snapshot-runners') @WithInstrumentation() async cleanupOldBuildInfoSnapshotRunners() { const lockKey = 'cleanup-old-buildinfo-snapshots-lock' if (!(await this.redisLockProvider.lock(lockKey, 300))) { return } try { const oneDayAgo = new Date() oneDayAgo.setDate(oneDayAgo.getDate() - 1) // Find all BuildInfo entities that haven't been used in over a day const oldBuildInfos = await this.buildInfoRepository.find({ where: { lastUsedAt: LessThan(oneDayAgo), }, }) if (oldBuildInfos.length === 0) { return } const snapshotRefs = oldBuildInfos.map((buildInfo) => buildInfo.snapshotRef) const result = await this.snapshotRunnerRepository.update( { snapshotRef: In(snapshotRefs) }, { state: SnapshotRunnerState.REMOVING }, ) if (result.affected > 0) { this.logger.debug(`Marked ${result.affected} SnapshotRunners for removal due to unused BuildInfo`) } } catch (error) { this.logger.error(`Failed to mark old BuildInfo SnapshotRunners for removal: ${fromAxiosError(error)}`) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_MINUTES, { name: 'deactivate-old-snapshots' }) @TrackJobExecution() @LogExecution('deactivate-old-snapshots') @WithInstrumentation() async deactivateOldSnapshots() { const lockKey = 'deactivate-old-snapshots-lock' if (!(await this.redisLockProvider.lock(lockKey, 300))) { return } try { const cutoff = `NOW() - INTERVAL '1 minute' * COALESCE(org."snapshot_deactivation_timeout_minutes", ${DEFAULT_SNAPSHOT_DEACTIVATION_TIMEOUT_MINUTES})` const oldSnapshots = await this.snapshotRepository .createQueryBuilder('snapshot') .leftJoin('organization', 'org', `org."id" = snapshot."organizationId"`) .where('snapshot.general = false') .andWhere('snapshot.state = :snapshotState', { snapshotState: SnapshotState.ACTIVE }) .andWhere(`(snapshot."lastUsedAt" IS NULL OR snapshot."lastUsedAt" < ${cutoff})`) .andWhere(`snapshot."createdAt" < ${cutoff}`) .andWhere( `NOT EXISTS ( SELECT 1 FROM snapshot s WHERE s."ref" = snapshot."ref" AND s.state = :activeState AND (s."lastUsedAt" >= ${cutoff} OR s."createdAt" >= ${cutoff}) )`, { activeState: SnapshotState.ACTIVE, }, ) .take(100) .getMany() if (oldSnapshots.length === 0) { return } // Deactivate the snapshots const snapshotIds = oldSnapshots.map((snapshot) => snapshot.id) await this.snapshotRepository.update({ id: In(snapshotIds) }, { state: SnapshotState.INACTIVE }) // Get internal names of deactivated snapshots const refs = oldSnapshots.map((snapshot) => snapshot.ref).filter((name) => name) // Filter out null/undefined values if (refs.length > 0) { // Set associated SnapshotRunner records to REMOVING state const result = await this.snapshotRunnerRepository.update( { snapshotRef: In(refs) }, { state: SnapshotRunnerState.REMOVING }, ) this.logger.debug( `Deactivated ${oldSnapshots.length} snapshots and marked ${result.affected} SnapshotRunners for removal`, ) } } catch (error) { this.logger.error(`Failed to deactivate old snapshots: ${fromAxiosError(error)}`) } finally { await this.redisLockProvider.unlock(lockKey) } } @Cron(CronExpression.EVERY_10_MINUTES, { name: 'cleanup-inactive-snapshots-from-runners' }) @TrackJobExecution() @LogExecution('cleanup-inactive-snapshots-from-runners') @WithInstrumentation() async cleanupInactiveSnapshotsFromRunners() { const lockKey = 'cleanup-inactive-snapshots-from-runners-lock' if (!(await this.redisLockProvider.lock(lockKey, 300))) { return } try { // Only fetch inactive snapshots that have associated snapshot runner entries const queryResult = await this.snapshotRepository .createQueryBuilder('snapshot') .select('snapshot."ref"') .where('snapshot.state = :snapshotState', { snapshotState: SnapshotState.INACTIVE }) .andWhere('snapshot."ref" IS NOT NULL') .andWhereExists( this.snapshotRunnerRepository .createQueryBuilder('snapshot_runner') .select('1') .where('snapshot_runner."snapshotRef" = snapshot."ref"') .andWhere('snapshot_runner.state != :snapshotRunnerState', { snapshotRunnerState: SnapshotRunnerState.REMOVING, }), ) .andWhere( () => { const query = this.snapshotRepository .createQueryBuilder('s') .select('1') .where('s."ref" = snapshot."ref"') .andWhere('s.state = :snapshotState') return `NOT EXISTS (${query.getQuery()})` }, { snapshotState: SnapshotState.ACTIVE, }, ) .take(100) .getRawMany() const inactiveSnapshotRefs = queryResult.map((result) => result.ref) if (inactiveSnapshotRefs.length > 0) { // Set associated SnapshotRunner records to REMOVING state const result = await this.snapshotRunnerRepository.update( { snapshotRef: In(inactiveSnapshotRefs) }, { state: SnapshotRunnerState.REMOVING }, ) this.logger.debug(`Marked ${result.affected} SnapshotRunners for removal`) } } catch (error) { this.logger.error(`Failed to cleanup inactive snapshots from runners: ${fromAxiosError(error)}`) } finally { await this.redisLockProvider.unlock(lockKey) } } private async processSnapshotDigest( snapshot: Snapshot, internalRegistry: DockerRegistry, hash: string, sizeGB: number, entrypoint?: string[] | string, ) { let shouldSave = false if (!snapshot.ref) { shouldSave = true const sanitizedUrl = internalRegistry.url.replace(/^https?:\/\//, '') snapshot.ref = `${sanitizedUrl}/${internalRegistry.project || 'daytona'}/daytona-${hash}:daytona` } if (!snapshot.size) { shouldSave = true const organization = await this.organizationService.findOne(snapshot.organizationId) if (!organization) { throw new NotFoundException(`Organization with ID ${snapshot.organizationId} not found`) } const MAX_SIZE_GB = organization.maxSnapshotSize if (sizeGB > MAX_SIZE_GB) { await this.updateSnapshotState( snapshot.id, SnapshotState.ERROR, `Snapshot size (${sizeGB.toFixed(2)}GB) exceeds maximum allowed size of ${MAX_SIZE_GB}GB`, ) return DONT_SYNC_AGAIN } snapshot.size = sizeGB } // If entrypoint is not explicitly set, set it from snapshotInfoResponse if (!snapshot.entrypoint) { if (entrypoint && entrypoint.length > 0) { shouldSave = true if (Array.isArray(entrypoint)) { snapshot.entrypoint = entrypoint } else { snapshot.entrypoint = [entrypoint] } } } if (shouldSave) { await this.snapshotRepository.save(snapshot) } } @OnAsyncEvent({ event: SnapshotEvents.CREATED, }) private async handleSnapshotCreatedEvent(event: SnapshotCreatedEvent) { await this.syncSnapshotState(event.snapshot.id) } @OnAsyncEvent({ event: SnapshotEvents.ACTIVATED, }) private async handleSnapshotActivatedEvent(event: SnapshotActivatedEvent) { await this.syncSnapshotState(event.snapshot.id) } } ================================================ FILE: apps/api/src/sandbox/managers/volume.manager.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnApplicationBootstrap, OnApplicationShutdown, OnModuleInit } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Repository, In } from 'typeorm' import { Volume } from '../entities/volume.entity' import { VolumeState } from '../enums/volume-state.enum' import { Cron, CronExpression, SchedulerRegistry } from '@nestjs/schedule' import { S3Client, CreateBucketCommand, ListBucketsCommand, PutBucketTaggingCommand } from '@aws-sdk/client-s3' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { RedisLockProvider } from '../common/redis-lock.provider' import { TypedConfigService } from '../../config/typed-config.service' import { deleteS3Bucket } from '../../common/utils/delete-s3-bucket' import { TrackableJobExecutions } from '../../common/interfaces/trackable-job-executions' import { TrackJobExecution } from '../../common/decorators/track-job-execution.decorator' import { setTimeout } from 'timers/promises' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' const VOLUME_STATE_LOCK_KEY = 'volume-state-' @Injectable() export class VolumeManager implements OnModuleInit, TrackableJobExecutions, OnApplicationShutdown, OnApplicationBootstrap { activeJobs = new Set() private readonly logger = new Logger(VolumeManager.name) private processingVolumes: Set = new Set() private skipTestConnection = false private s3Client: S3Client | null = null constructor( @InjectRepository(Volume) private readonly volumeRepository: Repository, private readonly configService: TypedConfigService, @InjectRedis() private readonly redis: Redis, private readonly redisLockProvider: RedisLockProvider, private readonly schedulerRegistry: SchedulerRegistry, ) { if (!this.configService.get('s3.endpoint')) { return } const endpoint = this.configService.getOrThrow('s3.endpoint') const region = this.configService.getOrThrow('s3.region') const accessKeyId = this.configService.getOrThrow('s3.accessKey') const secretAccessKey = this.configService.getOrThrow('s3.secretKey') this.skipTestConnection = this.configService.get('skipConnections') this.s3Client = new S3Client({ endpoint: endpoint.startsWith('http') ? endpoint : `http://${endpoint}`, region, credentials: { accessKeyId, secretAccessKey, }, forcePathStyle: true, }) } async onModuleInit() { if (!this.s3Client) { return } if (this.skipTestConnection) { this.logger.debug('Skipping S3 connection test') return } await this.testConnection() } onApplicationBootstrap() { if (!this.s3Client) { return } this.schedulerRegistry.getCronJob('process-pending-volumes').start() } async onApplicationShutdown() { // wait for all active jobs to finish while (this.activeJobs.size > 0) { this.logger.log(`Waiting for ${this.activeJobs.size} active jobs to finish`) await setTimeout(1000) } } private async testConnection() { try { // Try a simple operation to test the connection const command = new ListBucketsCommand({}) await this.s3Client.send(command) this.logger.debug('Successfully connected to S3') } catch (error) { this.logger.error('Failed to connect to S3:', error) throw error } } @Cron(CronExpression.EVERY_5_SECONDS, { name: 'process-pending-volumes', waitForCompletion: true, disabled: true }) @TrackJobExecution() @LogExecution('process-pending-volumes') @WithInstrumentation() async processPendingVolumes() { if (!this.s3Client) { return } try { // Lock the entire process const lockKey = 'process-pending-volumes' if (!(await this.redisLockProvider.lock(lockKey, 30))) { return } const pendingVolumes = await this.volumeRepository.find({ where: { state: In([VolumeState.PENDING_CREATE, VolumeState.PENDING_DELETE]), }, }) await Promise.all( pendingVolumes.map(async (volume) => { if (this.processingVolumes.has(volume.id)) { return } // Get lock for this specific volume const volumeLockKey = `${VOLUME_STATE_LOCK_KEY}${volume.id}` const acquired = await this.redisLockProvider.lock(volumeLockKey, 30) if (!acquired) { return } try { this.processingVolumes.add(volume.id) await this.processVolumeState(volume) } finally { this.processingVolumes.delete(volume.id) await this.redisLockProvider.unlock(volumeLockKey) } }), ) await this.redisLockProvider.unlock(lockKey) } catch (error) { this.logger.error('Error processing pending volumes:', error) } } private async processVolumeState(volume: Volume): Promise { const volumeLockKey = `${VOLUME_STATE_LOCK_KEY}${volume.id}` try { switch (volume.state) { case VolumeState.PENDING_CREATE: await this.handlePendingCreate(volume, volumeLockKey) break case VolumeState.PENDING_DELETE: await this.handlePendingDelete(volume, volumeLockKey) break } } catch (error) { this.logger.error(`Error processing volume ${volume.id}:`, error) await this.volumeRepository.update(volume.id, { state: VolumeState.ERROR, errorReason: error.message, }) } } private async handlePendingCreate(volume: Volume, lockKey: string): Promise { try { // Refresh lock before state change await this.redis.setex(lockKey, 30, '1') // Update state to CREATING await this.volumeRepository.save({ ...volume, state: VolumeState.CREATING, }) // Refresh lock before S3 operation await this.redis.setex(lockKey, 30, '1') // Create bucket in Minio/S3 const createBucketCommand = new CreateBucketCommand({ Bucket: volume.getBucketName(), }) await this.s3Client.send(createBucketCommand) await this.s3Client.send( new PutBucketTaggingCommand({ Bucket: volume.getBucketName(), Tagging: { TagSet: [ { Key: 'VolumeId', Value: volume.id, }, { Key: 'OrganizationId', Value: volume.organizationId, }, { Key: 'Environment', Value: this.configService.get('environment'), }, ], }, }), ) // Refresh lock before final state update await this.redis.setex(lockKey, 30, '1') // Update volume state to READY await this.volumeRepository.save({ ...volume, state: VolumeState.READY, }) this.logger.debug(`Volume ${volume.id} created successfully`) } catch (error) { this.logger.error(`Error creating volume ${volume.id}:`, error) await this.volumeRepository.save({ ...volume, state: VolumeState.ERROR, errorReason: error.message, }) } } private async handlePendingDelete(volume: Volume, lockKey: string): Promise { try { // Refresh lock before state change await this.redis.setex(lockKey, 30, '1') // Update state to DELETING await this.volumeRepository.save({ ...volume, state: VolumeState.DELETING, }) // Refresh lock before S3 operation await this.redis.setex(lockKey, 30, '1') // Delete bucket from Minio/S3 try { await deleteS3Bucket(this.s3Client, volume.getBucketName()) } catch (error) { if (error.name === 'NoSuchBucket') { this.logger.warn(`Bucket for volume ${volume.id} does not exist, treating as already deleted`) } else if (error.name === 'BucketNotEmpty') { throw new Error('Volume deletion failed because the bucket is not empty. You may retry deletion.') } else { throw error } } // Refresh lock before final state update await this.redis.setex(lockKey, 30, '1') // Delete any existing volume record with the deleted state and the same name in the same organization await this.volumeRepository.delete({ organizationId: volume.organizationId, name: `${volume.name}-deleted`, state: VolumeState.DELETED, }) // Update volume state to DELETED and rename await this.volumeRepository.save({ ...volume, state: VolumeState.DELETED, name: `${volume.name}-deleted`, }) this.logger.debug(`Volume ${volume.id} deleted successfully`) } catch (error) { this.logger.error(`Error deleting volume ${volume.id}:`, error) await this.volumeRepository.save({ ...volume, state: VolumeState.ERROR, errorReason: error.message, }) } } } ================================================ FILE: apps/api/src/sandbox/proxy/log-proxy.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logger } from '@nestjs/common' import { createProxyMiddleware, fixRequestBody, Options } from 'http-proxy-middleware' import { IncomingMessage, ServerResponse } from 'http' import { NextFunction } from 'express' export class LogProxy { private readonly logger = new Logger(LogProxy.name) constructor( private readonly targetUrl: string, private readonly snapshotRef: string, private readonly authToken: string, private readonly follow: boolean, private readonly req: IncomingMessage, private readonly res: ServerResponse, private readonly next: NextFunction, ) {} create() { const proxyOptions: Options = { target: this.targetUrl, secure: false, changeOrigin: true, autoRewrite: true, pathRewrite: () => `/snapshots/logs?snapshotRef=${this.snapshotRef}&follow=${this.follow}`, on: { proxyReq: (proxyReq: any, req: any) => { proxyReq.setHeader('Authorization', `Bearer ${this.authToken}`) proxyReq.setHeader('Accept', 'application/octet-stream') fixRequestBody(proxyReq, req) }, }, proxyTimeout: 5 * 60 * 1000, } return createProxyMiddleware(proxyOptions)(this.req, this.res, this.next) } } ================================================ FILE: apps/api/src/sandbox/repositories/sandbox.repository.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DataSource, FindOptionsWhere } from 'typeorm' import { Sandbox } from '../entities/sandbox.entity' import { ConflictException, Injectable, Logger, NotFoundException } from '@nestjs/common' import { InjectDataSource } from '@nestjs/typeorm' import { EventEmitter2 } from '@nestjs/event-emitter' import { BaseRepository } from '../../common/repositories/base.repository' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxStateUpdatedEvent } from '../events/sandbox-state-updated.event' import { SandboxDesiredStateUpdatedEvent } from '../events/sandbox-desired-state-updated.event' import { SandboxPublicStatusUpdatedEvent } from '../events/sandbox-public-status-updated.event' import { SandboxOrganizationUpdatedEvent } from '../events/sandbox-organization-updated.event' import { SandboxLookupCacheInvalidationService } from '../services/sandbox-lookup-cache-invalidation.service' @Injectable() export class SandboxRepository extends BaseRepository { private readonly logger = new Logger(SandboxRepository.name) constructor( @InjectDataSource() dataSource: DataSource, eventEmitter: EventEmitter2, private readonly sandboxLookupCacheInvalidationService: SandboxLookupCacheInvalidationService, ) { super(dataSource, eventEmitter, Sandbox) } async insert(sandbox: Sandbox): Promise { const now = new Date() if (!sandbox.createdAt) { sandbox.createdAt = now } if (!sandbox.updatedAt) { sandbox.updatedAt = now } if (!sandbox.lastActivityAt) { sandbox.lastActivityAt = now } sandbox.assertValid() sandbox.enforceInvariants() await this.repository.insert(sandbox) this.invalidateLookupCacheOnInsert(sandbox) return sandbox } /** * @param id - The ID of the sandbox to update. * @param params.updateData - The partial data to update. * * @returns `void` because a raw update is performed. */ async update(id: string, params: { updateData: Partial }, raw: true): Promise /** * @param id - The ID of the sandbox to update. * @param params.updateData - The partial data to update. * @param params.entity - Optional pre-fetched sandbox to use instead of fetching from the database. * * @returns The updated sandbox. */ async update(id: string, params: { updateData: Partial; entity?: Sandbox }, raw?: false): Promise async update( id: string, params: { updateData: Partial; entity?: Sandbox }, raw = false, ): Promise { const { updateData, entity } = params if (updateData.state && !updateData.lastActivityAt) { updateData.lastActivityAt = new Date() } if (raw) { await this.repository.update(id, updateData) return } const sandbox = entity ?? (await this.findOneBy({ id })) if (!sandbox) { throw new NotFoundException('Sandbox not found') } const previousSandbox = { ...sandbox } Object.assign(sandbox, updateData) sandbox.assertValid() const invariantChanges = sandbox.enforceInvariants() const result = await this.repository.update(id, { ...updateData, ...invariantChanges }) if (!result.affected) { throw new NotFoundException('Sandbox not found after update') } sandbox.updatedAt = new Date() this.emitUpdateEvents(sandbox, previousSandbox) this.invalidateLookupCacheOnUpdate(sandbox, previousSandbox) return sandbox } /** * Partially updates a sandbox in the database and optionally emits a corresponding event based on the changes. * * Performs the update in a transaction with a pessimistic write lock to ensure consistency. * * @param id - The ID of the sandbox to update. * @param params.updateData - The partial data to update. * @param params.whereCondition - The where condition to use for the update. * * @throws {ConflictException} if the sandbox was modified by another operation */ async updateWhere( id: string, params: { updateData: Partial whereCondition: FindOptionsWhere }, ): Promise { const { updateData, whereCondition } = params if (updateData.state && !updateData.lastActivityAt) { updateData.lastActivityAt = new Date() } return this.manager.transaction(async (entityManager) => { const whereClause = { ...whereCondition, id, } const sandbox = await entityManager.findOne(Sandbox, { where: whereClause, lock: { mode: 'pessimistic_write' }, relations: [], loadEagerRelations: false, }) if (!sandbox) { throw new ConflictException('Sandbox was modified by another operation, please try again') } const previousSandbox = { ...sandbox } Object.assign(sandbox, updateData) sandbox.assertValid() const invariantChanges = sandbox.enforceInvariants() await entityManager.update(Sandbox, id, { ...updateData, ...invariantChanges }) sandbox.updatedAt = new Date() this.emitUpdateEvents(sandbox, previousSandbox) this.invalidateLookupCacheOnUpdate(sandbox, previousSandbox) return sandbox }) } /** * Invalidates the sandbox lookup cache for the inserted sandbox. */ private invalidateLookupCacheOnInsert(sandbox: Sandbox): void { try { this.sandboxLookupCacheInvalidationService.invalidateOrgId({ sandboxId: sandbox.id, organizationId: sandbox.organizationId, name: sandbox.name, }) } catch (error) { this.logger.warn( `Failed to enqueue sandbox lookup cache invalidation on insert (id, organizationId, name) for ${sandbox.id}: ${error instanceof Error ? error.message : String(error)}`, ) } } /** * Invalidates the sandbox lookup cache for the updated sandbox. */ private invalidateLookupCacheOnUpdate( updatedSandbox: Sandbox, previousSandbox: Pick, ): void { try { this.sandboxLookupCacheInvalidationService.invalidate({ sandboxId: updatedSandbox.id, organizationId: updatedSandbox.organizationId, previousOrganizationId: previousSandbox.organizationId, name: updatedSandbox.name, previousName: previousSandbox.name, }) } catch (error) { this.logger.warn( `Failed to enqueue sandbox lookup cache invalidation on update (id, organizationId, name) for ${updatedSandbox.id}: ${error instanceof Error ? error.message : String(error)}`, ) } try { if (updatedSandbox.authToken !== previousSandbox.authToken) { this.sandboxLookupCacheInvalidationService.invalidate({ authToken: updatedSandbox.authToken, }) } } catch (error) { this.logger.warn( `Failed to enqueue sandbox lookup cache invalidation on update (authToken) for ${updatedSandbox.id}: ${error instanceof Error ? error.message : String(error)}`, ) } } /** * Emits events based on the changes made to a sandbox. */ private emitUpdateEvents( updatedSandbox: Sandbox, previousSandbox: Pick, ): void { if (previousSandbox.state !== updatedSandbox.state) { this.eventEmitter.emit( SandboxEvents.STATE_UPDATED, new SandboxStateUpdatedEvent(updatedSandbox, previousSandbox.state, updatedSandbox.state), ) } if (previousSandbox.desiredState !== updatedSandbox.desiredState) { this.eventEmitter.emit( SandboxEvents.DESIRED_STATE_UPDATED, new SandboxDesiredStateUpdatedEvent(updatedSandbox, previousSandbox.desiredState, updatedSandbox.desiredState), ) } if (previousSandbox.public !== updatedSandbox.public) { this.eventEmitter.emit( SandboxEvents.PUBLIC_STATUS_UPDATED, new SandboxPublicStatusUpdatedEvent(updatedSandbox, previousSandbox.public, updatedSandbox.public), ) } if (previousSandbox.organizationId !== updatedSandbox.organizationId) { this.eventEmitter.emit( SandboxEvents.ORGANIZATION_UPDATED, new SandboxOrganizationUpdatedEvent( updatedSandbox, previousSandbox.organizationId, updatedSandbox.organizationId, ), ) } } } ================================================ FILE: apps/api/src/sandbox/runner-adapter/runnerAdapter.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { Runner } from '../entities/runner.entity' import { ModuleRef } from '@nestjs/core' import { RunnerAdapterV0 } from './runnerAdapter.v0' import { RunnerAdapterV2 } from './runnerAdapter.v2' import { BuildInfo } from '../entities/build-info.entity' import { DockerRegistry } from '../../docker-registry/entities/docker-registry.entity' import { Sandbox } from '../entities/sandbox.entity' import { SandboxState } from '../enums/sandbox-state.enum' import { BackupState } from '../enums/backup-state.enum' import { RunnerServiceInfo } from '../common/runner-service-info' export interface RunnerSandboxInfo { state: SandboxState daemonVersion?: string backupState?: BackupState backupErrorReason?: string } export interface RunnerSnapshotInfo { name: string sizeGB: number entrypoint: string[] cmd: string[] hash: string } export interface SnapshotDigestResponse { hash: string sizeGB: number } export interface RunnerMetrics { currentAllocatedCpu?: number currentAllocatedDiskGiB?: number currentAllocatedMemoryGiB?: number currentCpuUsagePercentage?: number currentDiskUsagePercentage?: number currentMemoryUsagePercentage?: number currentSnapshotCount?: number currentStartedSandboxes?: number } export interface RunnerInfo { serviceHealth?: RunnerServiceInfo[] metrics?: RunnerMetrics appVersion?: string } export interface StartSandboxResponse { daemonVersion: string } export interface RunnerAdapter { init(runner: Runner): Promise healthCheck(signal?: AbortSignal): Promise runnerInfo(signal?: AbortSignal): Promise sandboxInfo(sandboxId: string): Promise createSandbox( sandbox: Sandbox, registry?: DockerRegistry, entrypoint?: string[], metadata?: { [key: string]: string }, otelEndpoint?: string, skipStart?: boolean, ): Promise startSandbox( sandboxId: string, authToken: string, metadata?: { [key: string]: string }, skipStart?: boolean, ): Promise stopSandbox(sandboxId: string): Promise destroySandbox(sandboxId: string): Promise createBackup(sandbox: Sandbox, backupSnapshotName: string, registry?: DockerRegistry): Promise removeSnapshot(snapshotName: string): Promise buildSnapshot( buildInfo: BuildInfo, organizationId?: string, sourceRegistries?: DockerRegistry[], registry?: DockerRegistry, pushToInternalRegistry?: boolean, ): Promise pullSnapshot( snapshotName: string, registry?: DockerRegistry, destinationRegistry?: DockerRegistry, destinationRef?: string, newTag?: string, ): Promise snapshotExists(snapshotRef: string): Promise getSnapshotInfo(snapshotName: string): Promise inspectSnapshotInRegistry(snapshotName: string, registry?: DockerRegistry): Promise updateNetworkSettings( sandboxId: string, networkBlockAll?: boolean, networkAllowList?: string, networkLimitEgress?: boolean, ): Promise recoverSandbox(sandbox: Sandbox): Promise resizeSandbox(sandboxId: string, cpu?: number, memory?: number, disk?: number): Promise } @Injectable() export class RunnerAdapterFactory { private readonly logger = new Logger(RunnerAdapterFactory.name) constructor(private moduleRef: ModuleRef) {} async create(runner: Runner): Promise { switch (runner.apiVersion) { case '0': { const adapter = await this.moduleRef.create(RunnerAdapterV0) await adapter.init(runner) return adapter } case '2': { const adapter = await this.moduleRef.create(RunnerAdapterV2) await adapter.init(runner) return adapter } default: throw new Error(`Unsupported runner version: ${runner.apiVersion}`) } } } ================================================ FILE: apps/api/src/sandbox/runner-adapter/runnerAdapter.v0.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import axios, { AxiosError } from 'axios' import axiosDebug from 'axios-debug-log' import axiosRetry from 'axios-retry' import { Injectable, Logger } from '@nestjs/common' import { RunnerAdapter, RunnerInfo, RunnerSandboxInfo, RunnerSnapshotInfo, StartSandboxResponse, SnapshotDigestResponse, } from './runnerAdapter' import { SnapshotStateError } from '../errors/snapshot-state-error' import { Runner } from '../entities/runner.entity' import { Configuration, SandboxApi, EnumsSandboxState, SnapshotsApi, EnumsBackupState, DefaultApi, CreateSandboxDTO, BuildSnapshotRequestDTO, CreateBackupDTO, PullSnapshotRequestDTO, ToolboxApi, UpdateNetworkSettingsDTO, RecoverSandboxDTO, } from '@daytonaio/runner-api-client' import { Sandbox } from '../entities/sandbox.entity' import { BuildInfo } from '../entities/build-info.entity' import { DockerRegistry } from '../../docker-registry/entities/docker-registry.entity' import { SandboxState } from '../enums/sandbox-state.enum' import { BackupState } from '../enums/backup-state.enum' import { RunnerApiError } from '../errors/runner-api-error' const isDebugEnabled = process.env.DEBUG === 'true' // Network error codes that should trigger a retry const RETRYABLE_NETWORK_ERROR_CODES = ['ECONNRESET', 'ETIMEDOUT'] @Injectable() export class RunnerAdapterV0 implements RunnerAdapter { private readonly logger = new Logger(RunnerAdapterV0.name) private sandboxApiClient: SandboxApi private snapshotApiClient: SnapshotsApi private runnerApiClient: DefaultApi private toolboxApiClient: ToolboxApi private convertSandboxState(state: EnumsSandboxState): SandboxState { switch (state) { case EnumsSandboxState.SandboxStateCreating: return SandboxState.CREATING case EnumsSandboxState.SandboxStateRestoring: return SandboxState.RESTORING case EnumsSandboxState.SandboxStateDestroyed: return SandboxState.DESTROYED case EnumsSandboxState.SandboxStateDestroying: return SandboxState.DESTROYING case EnumsSandboxState.SandboxStateStarted: return SandboxState.STARTED case EnumsSandboxState.SandboxStateStopped: return SandboxState.STOPPED case EnumsSandboxState.SandboxStateStarting: return SandboxState.STARTING case EnumsSandboxState.SandboxStateStopping: return SandboxState.STOPPING case EnumsSandboxState.SandboxStateError: return SandboxState.ERROR case EnumsSandboxState.SandboxStatePullingSnapshot: return SandboxState.PULLING_SNAPSHOT default: return SandboxState.UNKNOWN } } private convertBackupState(state: EnumsBackupState): BackupState { switch (state) { case EnumsBackupState.BackupStatePending: return BackupState.PENDING case EnumsBackupState.BackupStateInProgress: return BackupState.IN_PROGRESS case EnumsBackupState.BackupStateCompleted: return BackupState.COMPLETED case EnumsBackupState.BackupStateFailed: return BackupState.ERROR default: return BackupState.NONE } } public async init(runner: Runner): Promise { if (!runner.apiUrl) { throw new Error('Runner API URL is required') } const axiosInstance = axios.create({ baseURL: runner.apiUrl, headers: { Authorization: `Bearer ${runner.apiKey}`, }, timeout: 1 * 60 * 60 * 1000, // 1 hour }) const retryErrorMap = new WeakMap() // Configure axios-retry to handle network errors axiosRetry(axiosInstance, { retries: 3, retryDelay: axiosRetry.exponentialDelay, retryCondition: (error) => { // Check if error code or message matches any retryable error const matchedErrorCode = RETRYABLE_NETWORK_ERROR_CODES.find( (code) => (error as any).code === code || error.message?.includes(code) || (error as any).cause?.code === code, ) if (matchedErrorCode) { retryErrorMap.set(error, matchedErrorCode) return true } return false }, onRetry: (retryCount, error, requestConfig) => { this.logger.warn( `Retrying request due to ${retryErrorMap.get(error)} (attempt ${retryCount}): ${requestConfig.method?.toUpperCase()} ${requestConfig.url}`, ) }, }) axiosInstance.interceptors.response.use( (response) => { return response }, (error) => { const errorMessage = error.response?.data?.message || error.response?.data || error.message || String(error) const statusCode = error.response?.data?.statusCode || error.response?.status || error.status const code = error.response?.data?.code || (error as any).code || (error as any).cause?.code || '' throw new RunnerApiError(String(errorMessage), statusCode, code) }, ) if (isDebugEnabled) { axiosDebug.addLogger(axiosInstance) } this.sandboxApiClient = new SandboxApi(new Configuration(), '', axiosInstance) this.snapshotApiClient = new SnapshotsApi(new Configuration(), '', axiosInstance) this.runnerApiClient = new DefaultApi(new Configuration(), '', axiosInstance) this.toolboxApiClient = new ToolboxApi(new Configuration(), '', axiosInstance) } async healthCheck(signal?: AbortSignal): Promise { const response = await this.runnerApiClient.healthCheck({ signal }) if (response.data.status !== 'ok') { throw new Error('Runner is not healthy') } } async runnerInfo(signal?: AbortSignal): Promise { const response = await this.runnerApiClient.runnerInfo({ signal }) return { serviceHealth: response.data.serviceHealth, metrics: response.data.metrics, appVersion: response.data.appVersion, } } async sandboxInfo(sandboxId: string): Promise { const sandboxInfo = await this.sandboxApiClient.info(sandboxId) return { state: this.convertSandboxState(sandboxInfo.data.state), backupState: this.convertBackupState(sandboxInfo.data.backupState), backupErrorReason: sandboxInfo.data.backupError, daemonVersion: sandboxInfo.data.daemonVersion, } } async createSandbox( sandbox: Sandbox, registry?: DockerRegistry, entrypoint?: string[], metadata?: { [key: string]: string }, otelEndpoint?: string, skipStart?: boolean, ): Promise { const createSandboxDto: CreateSandboxDTO = { id: sandbox.id, userId: sandbox.organizationId, snapshot: sandbox.snapshot, osUser: sandbox.osUser, cpuQuota: sandbox.cpu, gpuQuota: sandbox.gpu, memoryQuota: sandbox.mem, storageQuota: sandbox.disk, env: sandbox.env, registry: registry ? { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } : undefined, entrypoint: entrypoint, volumes: sandbox.volumes?.map((volume) => ({ volumeId: volume.volumeId, mountPath: volume.mountPath, subpath: volume.subpath, })), networkBlockAll: sandbox.networkBlockAll, networkAllowList: sandbox.networkAllowList, metadata: metadata, authToken: sandbox.authToken, otelEndpoint, skipStart: skipStart, organizationId: sandbox.organizationId, regionId: sandbox.region, } const response = await this.sandboxApiClient.create(createSandboxDto) if (!response?.data?.daemonVersion) { return undefined } return { daemonVersion: response.data.daemonVersion, } } async startSandbox( sandboxId: string, authToken: string, metadata?: { [key: string]: string }, ): Promise { const response = await this.sandboxApiClient.start(sandboxId, authToken, metadata) if (!response?.data?.daemonVersion) { return undefined } return { daemonVersion: response.data.daemonVersion, } } async stopSandbox(sandboxId: string): Promise { await this.sandboxApiClient.stop(sandboxId) } async destroySandbox(sandboxId: string): Promise { await this.sandboxApiClient.destroy(sandboxId) } async createBackup(sandbox: Sandbox, backupSnapshotName: string, registry?: DockerRegistry): Promise { const request: CreateBackupDTO = { snapshot: backupSnapshotName, registry: undefined, } if (registry) { request.registry = { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } } await this.sandboxApiClient.createBackup(sandbox.id, request) } async buildSnapshot( buildInfo: BuildInfo, organizationId?: string, sourceRegistries?: DockerRegistry[], registry?: DockerRegistry, pushToInternalRegistry?: boolean, ): Promise { const request: BuildSnapshotRequestDTO = { snapshot: buildInfo.snapshotRef, dockerfile: buildInfo.dockerfileContent, organizationId: organizationId, context: buildInfo.contextHashes, pushToInternalRegistry: pushToInternalRegistry, } if (sourceRegistries) { request.sourceRegistries = sourceRegistries.map((sourceRegistry) => ({ project: sourceRegistry.project, url: sourceRegistry.url.replace(/^(https?:\/\/)/, ''), username: sourceRegistry.username, password: sourceRegistry.password, })) } if (registry) { request.registry = { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } } await this.snapshotApiClient.buildSnapshot(request) } async removeSnapshot(snapshotName: string): Promise { await this.snapshotApiClient.removeSnapshot(snapshotName) } async pullSnapshot( snapshotName: string, registry?: DockerRegistry, destinationRegistry?: DockerRegistry, destinationRef?: string, newTag?: string, ): Promise { const request: PullSnapshotRequestDTO = { snapshot: snapshotName, newTag, } if (registry) { request.registry = { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } } if (destinationRegistry) { request.destinationRegistry = { project: destinationRegistry.project, url: destinationRegistry.url.replace(/^(https?:\/\/)/, ''), username: destinationRegistry.username, password: destinationRegistry.password, } } if (destinationRef) { request.destinationRef = destinationRef } await this.snapshotApiClient.pullSnapshot(request) } async snapshotExists(snapshotName: string): Promise { const response = await this.snapshotApiClient.snapshotExists(snapshotName) return response.data.exists } async getSnapshotInfo(snapshotName: string): Promise { try { const response = await this.snapshotApiClient.getSnapshotInfo(snapshotName) return { name: response.data.name || '', sizeGB: response.data.sizeGB, entrypoint: response.data.entrypoint, cmd: response.data.cmd, hash: response.data.hash, } } catch (err) { if (err instanceof RunnerApiError && err.statusCode === 422) { throw new SnapshotStateError(err.message) } throw err } } async inspectSnapshotInRegistry(snapshotName: string, registry?: DockerRegistry): Promise { const response = await this.snapshotApiClient.inspectSnapshotInRegistry({ snapshot: snapshotName, registry: registry ? { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } : undefined, }) return { hash: response.data.hash, sizeGB: response.data.sizeGB, } } async updateNetworkSettings( sandboxId: string, networkBlockAll?: boolean, networkAllowList?: string, networkLimitEgress?: boolean, ): Promise { const updateNetworkSettingsDto: UpdateNetworkSettingsDTO = { networkBlockAll: networkBlockAll, networkAllowList: networkAllowList, networkLimitEgress: networkLimitEgress, } await this.sandboxApiClient.updateNetworkSettings(sandboxId, updateNetworkSettingsDto) } async recoverSandbox(sandbox: Sandbox): Promise { const recoverSandboxDTO: RecoverSandboxDTO = { userId: sandbox.organizationId, snapshot: sandbox.snapshot, osUser: sandbox.osUser, cpuQuota: sandbox.cpu, gpuQuota: sandbox.gpu, memoryQuota: sandbox.mem, storageQuota: sandbox.disk, env: sandbox.env, volumes: sandbox.volumes?.map((volume) => ({ volumeId: volume.volumeId, mountPath: volume.mountPath, subpath: volume.subpath, })), networkBlockAll: sandbox.networkBlockAll, networkAllowList: sandbox.networkAllowList, errorReason: sandbox.errorReason, backupErrorReason: sandbox.backupErrorReason, } await this.sandboxApiClient.recover(sandbox.id, recoverSandboxDTO) } async resizeSandbox(sandboxId: string, cpu?: number, memory?: number, disk?: number): Promise { await this.sandboxApiClient.resize(sandboxId, { cpu, memory, disk }) } } ================================================ FILE: apps/api/src/sandbox/runner-adapter/runnerAdapter.v2.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Repository, IsNull, Not } from 'typeorm' import { RunnerAdapter, RunnerInfo, RunnerSandboxInfo, RunnerSnapshotInfo, StartSandboxResponse, SnapshotDigestResponse, } from './runnerAdapter' import { Runner } from '../entities/runner.entity' import { Sandbox } from '../entities/sandbox.entity' import { Job } from '../entities/job.entity' import { BuildInfo } from '../entities/build-info.entity' import { DockerRegistry } from '../../docker-registry/entities/docker-registry.entity' import { SandboxState } from '../enums/sandbox-state.enum' import { JobType } from '../enums/job-type.enum' import { JobStatus } from '../enums/job-status.enum' import { ResourceType } from '../enums/resource-type.enum' import { JobService } from '../services/job.service' import { SandboxRepository } from '../repositories/sandbox.repository' import { CreateSandboxDTO, CreateBackupDTO, BuildSnapshotRequestDTO, PullSnapshotRequestDTO, UpdateNetworkSettingsDTO, InspectSnapshotInRegistryRequest, RecoverSandboxDTO, } from '@daytonaio/runner-api-client' import { SnapshotStateError } from '../errors/snapshot-state-error' /** * RunnerAdapterV2 implements RunnerAdapter for v2 runners. * Instead of making direct API calls to the runner, it creates jobs in the database * that the v2 runner polls and processes asynchronously. */ @Injectable() export class RunnerAdapterV2 implements RunnerAdapter { private readonly logger = new Logger(RunnerAdapterV2.name) private runner: Runner constructor( private readonly sandboxRepository: SandboxRepository, @InjectRepository(Job) private readonly jobRepository: Repository, private readonly jobService: JobService, ) {} async init(runner: Runner): Promise { this.runner = runner } async healthCheck(_signal?: AbortSignal): Promise { throw new Error('healthCheck is not supported for V2 runners') } async runnerInfo(_signal?: AbortSignal): Promise { throw new Error('runnerInfo is not supported for V2 runners') } async sandboxInfo(sandboxId: string): Promise { // Query the sandbox entity const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId }, }) if (!sandbox) { throw new Error(`Sandbox ${sandboxId} not found`) } // Query for any incomplete jobs for this sandbox to determine transitional state const incompleteJob = await this.jobRepository.findOne({ where: { resourceType: ResourceType.SANDBOX, resourceId: sandboxId, completedAt: IsNull(), }, order: { createdAt: 'DESC' }, }) let state = sandbox.state let daemonVersion: string | undefined = undefined // If there's an incomplete job, infer the transitional state from job type if (incompleteJob) { state = this.inferStateFromJob(incompleteJob, sandbox) daemonVersion = incompleteJob.getResultMetadata()?.daemonVersion } else { // Look for latest job for this sandbox const latestJob = await this.jobRepository.findOne({ where: { resourceType: ResourceType.SANDBOX, resourceId: sandboxId, }, order: { createdAt: 'DESC' }, }) if (latestJob) { state = this.inferStateFromJob(latestJob, sandbox) daemonVersion = latestJob.getResultMetadata()?.daemonVersion } } return { state, backupState: sandbox.backupState, backupErrorReason: sandbox.backupErrorReason, daemonVersion, } } private inferStateFromJob(job: Job, sandbox: Sandbox): SandboxState { // Map job types to transitional states switch (job.type) { case JobType.CREATE_SANDBOX: return job.status === JobStatus.COMPLETED ? SandboxState.STARTED : SandboxState.CREATING case JobType.START_SANDBOX: return job.status === JobStatus.COMPLETED ? SandboxState.STARTED : SandboxState.STARTING case JobType.STOP_SANDBOX: return job.status === JobStatus.COMPLETED ? SandboxState.STOPPED : SandboxState.STOPPING case JobType.DESTROY_SANDBOX: return job.status === JobStatus.COMPLETED ? SandboxState.DESTROYED : SandboxState.DESTROYING default: // For other job types (backup, etc.), return current sandbox state return sandbox.state } } async createSandbox( sandbox: Sandbox, registry?: DockerRegistry, entrypoint?: string[], metadata?: { [key: string]: string }, otelEndpoint?: string, skipStart?: boolean, ): Promise { const payload: CreateSandboxDTO = { id: sandbox.id, userId: sandbox.organizationId, snapshot: sandbox.snapshot, osUser: sandbox.osUser, cpuQuota: sandbox.cpu, gpuQuota: sandbox.gpu, memoryQuota: sandbox.mem, storageQuota: sandbox.disk, env: sandbox.env, registry: registry ? { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } : undefined, entrypoint: entrypoint, volumes: sandbox.volumes?.map((volume) => ({ volumeId: volume.volumeId, mountPath: volume.mountPath, subpath: volume.subpath, })), networkBlockAll: sandbox.networkBlockAll, networkAllowList: sandbox.networkAllowList, metadata: metadata, authToken: sandbox.authToken, otelEndpoint: otelEndpoint, skipStart: skipStart, organizationId: sandbox.organizationId, regionId: sandbox.region, } await this.jobService.createJob( null, JobType.CREATE_SANDBOX, this.runner.id, ResourceType.SANDBOX, sandbox.id, payload, ) this.logger.debug(`Created CREATE_SANDBOX job for sandbox ${sandbox.id} on runner ${this.runner.id}`) // Daemon version will be set in the job result metadata return undefined } async startSandbox( sandboxId: string, authToken: string, metadata?: { [key: string]: string }, ): Promise { await this.jobService.createJob(null, JobType.START_SANDBOX, this.runner.id, ResourceType.SANDBOX, sandboxId, { authToken, metadata, }) this.logger.debug(`Created START_SANDBOX job for sandbox ${sandboxId} on runner ${this.runner.id}`) // Daemon version will be set in the job result metadata return undefined } async stopSandbox(sandboxId: string): Promise { await this.jobService.createJob(null, JobType.STOP_SANDBOX, this.runner.id, ResourceType.SANDBOX, sandboxId) this.logger.debug(`Created STOP_SANDBOX job for sandbox ${sandboxId} on runner ${this.runner.id}`) } async destroySandbox(sandboxId: string): Promise { await this.jobService.createJob(null, JobType.DESTROY_SANDBOX, this.runner.id, ResourceType.SANDBOX, sandboxId) this.logger.debug(`Created DESTROY_SANDBOX job for sandbox ${sandboxId} on runner ${this.runner.id}`) } async recoverSandbox(sandbox: Sandbox): Promise { const recoverSandboxDTO: RecoverSandboxDTO = { userId: sandbox.organizationId, snapshot: sandbox.snapshot, osUser: sandbox.osUser, cpuQuota: sandbox.cpu, gpuQuota: sandbox.gpu, memoryQuota: sandbox.mem, storageQuota: sandbox.disk, env: sandbox.env, volumes: sandbox.volumes?.map((volume) => ({ volumeId: volume.volumeId, mountPath: volume.mountPath, subpath: volume.subpath, })), networkBlockAll: sandbox.networkBlockAll, networkAllowList: sandbox.networkAllowList, errorReason: sandbox.errorReason, backupErrorReason: sandbox.backupErrorReason, } await this.jobService.createJob( null, JobType.RECOVER_SANDBOX, this.runner.id, ResourceType.SANDBOX, sandbox.id, recoverSandboxDTO, ) this.logger.debug(`Created RECOVER_SANDBOX job for sandbox ${sandbox.id} on runner ${this.runner.id}`) } async createBackup(sandbox: Sandbox, backupSnapshotName: string, registry?: DockerRegistry): Promise { const payload: CreateBackupDTO = { snapshot: backupSnapshotName, registry: undefined, } if (registry) { payload.registry = { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } } await this.jobService.createJob( null, JobType.CREATE_BACKUP, this.runner.id, ResourceType.SANDBOX, sandbox.id, payload, ) this.logger.debug(`Created CREATE_BACKUP job for sandbox ${sandbox.id} on runner ${this.runner.id}`) } async buildSnapshot( buildInfo: BuildInfo, organizationId?: string, sourceRegistries?: DockerRegistry[], registry?: DockerRegistry, pushToInternalRegistry?: boolean, ): Promise { const payload: BuildSnapshotRequestDTO = { snapshot: buildInfo.snapshotRef, dockerfile: buildInfo.dockerfileContent, organizationId: organizationId, context: buildInfo.contextHashes, pushToInternalRegistry: pushToInternalRegistry, } if (sourceRegistries) { payload.sourceRegistries = sourceRegistries.map((sourceRegistry) => ({ project: sourceRegistry.project, url: sourceRegistry.url.replace(/^(https?:\/\/)/, ''), username: sourceRegistry.username, password: sourceRegistry.password, })) } if (registry) { payload.registry = { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } } await this.jobService.createJob( null, JobType.BUILD_SNAPSHOT, this.runner.id, ResourceType.SNAPSHOT, buildInfo.snapshotRef, payload, ) this.logger.debug(`Created BUILD_SNAPSHOT job for ${buildInfo.snapshotRef} on runner ${this.runner.id}`) } async pullSnapshot( snapshotName: string, registry?: DockerRegistry, destinationRegistry?: DockerRegistry, destinationRef?: string, newTag?: string, ): Promise { const payload: PullSnapshotRequestDTO = { snapshot: snapshotName, newTag, } if (registry) { payload.registry = { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } } if (destinationRegistry) { payload.destinationRegistry = { project: destinationRegistry.project, url: destinationRegistry.url.replace(/^(https?:\/\/)/, ''), username: destinationRegistry.username, password: destinationRegistry.password, } } if (destinationRef) { payload.destinationRef = destinationRef } await this.jobService.createJob( null, JobType.PULL_SNAPSHOT, this.runner.id, ResourceType.SNAPSHOT, destinationRef || snapshotName, payload, ) this.logger.debug(`Created PULL_SNAPSHOT job for ${snapshotName} on runner ${this.runner.id}`) } async removeSnapshot(snapshotName: string): Promise { await this.jobService.createJob(null, JobType.REMOVE_SNAPSHOT, this.runner.id, ResourceType.SNAPSHOT, snapshotName) this.logger.debug(`Created REMOVE_SNAPSHOT job for ${snapshotName} on runner ${this.runner.id}`) } async snapshotExists(snapshotRef: string): Promise { // Find the latest job for this snapshot on this runner // Do not include INSPECT_SNAPSHOT_IN_REGISTRY const latestJob = await this.jobRepository.findOne({ where: [ { runnerId: this.runner.id, resourceType: ResourceType.SNAPSHOT, resourceId: snapshotRef, type: Not(JobType.INSPECT_SNAPSHOT_IN_REGISTRY), }, ], order: { createdAt: 'DESC' }, }) // If no job exists, snapshot doesn't exist if (!latestJob) { return false } // If the latest job is a REMOVE_SNAPSHOT, the snapshot no longer exists if (latestJob.type === JobType.REMOVE_SNAPSHOT) { return false } // If the latest job is PULL_SNAPSHOT or BUILD_SNAPSHOT, check if it completed successfully if (latestJob.type === JobType.PULL_SNAPSHOT || latestJob.type === JobType.BUILD_SNAPSHOT) { return latestJob.status === JobStatus.COMPLETED } // For any other job type, snapshot doesn't exist return false } async getSnapshotInfo(snapshotRef: string): Promise { const latestJob = await this.jobRepository.findOne({ where: [ { runnerId: this.runner.id, resourceType: ResourceType.SNAPSHOT, resourceId: snapshotRef, type: Not(JobType.INSPECT_SNAPSHOT_IN_REGISTRY), }, ], order: { createdAt: 'DESC' }, }) if (!latestJob) { throw new Error(`Snapshot ${snapshotRef} not found on runner ${this.runner.id}`) } const metadata = latestJob.getResultMetadata() switch (latestJob.status) { case JobStatus.COMPLETED: if (latestJob.type === JobType.PULL_SNAPSHOT || latestJob.type === JobType.BUILD_SNAPSHOT) { return { name: latestJob.resourceId, sizeGB: metadata?.sizeGB, entrypoint: metadata?.entrypoint, cmd: metadata?.cmd, hash: metadata?.hash, } } throw new Error( `Snapshot ${snapshotRef} is in an unknown state (${latestJob.status}) on runner ${this.runner.id}`, ) case JobStatus.FAILED: throw new SnapshotStateError( latestJob.errorMessage || `Snapshot ${snapshotRef} failed on runner ${this.runner.id}`, ) default: throw new Error( `Snapshot ${snapshotRef} is in an unknown state (${latestJob.status}) on runner ${this.runner.id}`, ) } } async inspectSnapshotInRegistry(snapshotName: string, registry?: DockerRegistry): Promise { const payload: InspectSnapshotInRegistryRequest = { snapshot: snapshotName, registry: registry ? { project: registry.project, url: registry.url.replace(/^(https?:\/\/)/, ''), username: registry.username, password: registry.password, } : undefined, } const job = await this.jobService.createJob( null, JobType.INSPECT_SNAPSHOT_IN_REGISTRY, this.runner.id, ResourceType.SNAPSHOT, snapshotName, payload, ) this.logger.debug(`Created INSPECT_SNAPSHOT_IN_REGISTRY job for ${snapshotName} on runner ${this.runner.id}`) const waitTimeout = 30 * 1000 // 30 seconds const completedJob = await this.jobService.waitJobCompletion(job.id, waitTimeout) if (!completedJob) { throw new Error(`Snapshot ${snapshotName} not found in registry on runner ${this.runner.id}`) } if (completedJob.status !== JobStatus.COMPLETED) { throw new Error( `Snapshot ${snapshotName} failed to inspect in registry on runner ${this.runner.id}. Error: ${completedJob.errorMessage}`, ) } const resultMetadata = completedJob.getResultMetadata() return { hash: resultMetadata?.hash, sizeGB: resultMetadata?.sizeGB, } } async updateNetworkSettings( sandboxId: string, networkBlockAll?: boolean, networkAllowList?: string, networkLimitEgress?: boolean, ): Promise { const payload: UpdateNetworkSettingsDTO = { networkBlockAll: networkBlockAll, networkAllowList: networkAllowList, networkLimitEgress: networkLimitEgress, } await this.jobService.createJob( null, JobType.UPDATE_SANDBOX_NETWORK_SETTINGS, this.runner.id, ResourceType.SANDBOX, sandboxId, payload, ) this.logger.debug( `Created UPDATE_SANDBOX_NETWORK_SETTINGS job for sandbox ${sandboxId} on runner ${this.runner.id}`, ) } async resizeSandbox(sandboxId: string, cpu?: number, memory?: number, disk?: number): Promise { await this.jobService.createJob(null, JobType.RESIZE_SANDBOX, this.runner.id, ResourceType.SANDBOX, sandboxId, { cpu, memory, disk, }) this.logger.debug(`Created RESIZE_SANDBOX job for sandbox ${sandboxId} on runner ${this.runner.id}`) } } ================================================ FILE: apps/api/src/sandbox/sandbox.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { DataSource } from 'typeorm' import { SandboxController } from './controllers/sandbox.controller' import { SandboxService } from './services/sandbox.service' import { TypeOrmModule } from '@nestjs/typeorm' import { Sandbox } from './entities/sandbox.entity' import { UserModule } from '../user/user.module' import { RunnerService } from './services/runner.service' import { Runner } from './entities/runner.entity' import { RunnerController } from './controllers/runner.controller' import { ToolboxService } from './services/toolbox.deprecated.service' import { DockerRegistryModule } from '../docker-registry/docker-registry.module' import { SandboxManager } from './managers/sandbox.manager' import { ToolboxController } from './controllers/toolbox.deprecated.controller' import { Snapshot } from './entities/snapshot.entity' import { SnapshotController } from './controllers/snapshot.controller' import { SnapshotService } from './services/snapshot.service' import { SnapshotManager } from './managers/snapshot.manager' import { SnapshotRunner } from './entities/snapshot-runner.entity' import { DockerRegistry } from '../docker-registry/entities/docker-registry.entity' import { RedisLockProvider } from './common/redis-lock.provider' import { OrganizationModule } from '../organization/organization.module' import { SandboxWarmPoolService } from './services/sandbox-warm-pool.service' import { WarmPool } from './entities/warm-pool.entity' import { PreviewController } from './controllers/preview.controller' import { SnapshotSubscriber } from './subscribers/snapshot.subscriber' import { VolumeController } from './controllers/volume.controller' import { VolumeService } from './services/volume.service' import { VolumeManager } from './managers/volume.manager' import { Volume } from './entities/volume.entity' import { BuildInfo } from './entities/build-info.entity' import { BackupManager } from './managers/backup.manager' import { VolumeSubscriber } from './subscribers/volume.subscriber' import { RunnerSubscriber } from './subscribers/runner.subscriber' import { WorkspaceController } from './controllers/workspace.deprecated.controller' import { RunnerAdapterFactory } from './runner-adapter/runnerAdapter' import { SandboxStartAction } from './managers/sandbox-actions/sandbox-start.action' import { SandboxStopAction } from './managers/sandbox-actions/sandbox-stop.action' import { SandboxDestroyAction } from './managers/sandbox-actions/sandbox-destroy.action' import { SandboxArchiveAction } from './managers/sandbox-actions/sandbox-archive.action' import { SshAccess } from './entities/ssh-access.entity' import { SandboxRepository } from './repositories/sandbox.repository' import { ProxyCacheInvalidationService } from './services/proxy-cache-invalidation.service' import { RegionModule } from '../region/region.module' import { Region } from '../region/entities/region.entity' import { SnapshotRegion } from './entities/snapshot-region.entity' import { JobController } from './controllers/job.controller' import { JobService } from './services/job.service' import { JobStateHandlerService } from './services/job-state-handler.service' import { Job } from './entities/job.entity' import { SandboxLookupCacheInvalidationService } from './services/sandbox-lookup-cache-invalidation.service' import { SandboxAccessGuard } from './guards/sandbox-access.guard' import { RunnerAccessGuard } from './guards/runner-access.guard' import { RegionRunnerAccessGuard } from './guards/region-runner-access.guard' import { RegionSandboxAccessGuard } from './guards/region-sandbox-access.guard' import { ProxyGuard } from './guards/proxy.guard' import { SshGatewayGuard } from './guards/ssh-gateway.guard' import { EventEmitter2 } from '@nestjs/event-emitter' @Module({ imports: [ UserModule, DockerRegistryModule, OrganizationModule, RegionModule, TypeOrmModule.forFeature([ Sandbox, Runner, Snapshot, BuildInfo, SnapshotRunner, SnapshotRegion, DockerRegistry, WarmPool, Volume, SshAccess, Region, Job, ]), ], controllers: [ SandboxController, RunnerController, ToolboxController, SnapshotController, WorkspaceController, PreviewController, VolumeController, JobController, ], providers: [ SandboxService, SandboxManager, BackupManager, SandboxWarmPoolService, RunnerService, ToolboxService, SnapshotService, ProxyCacheInvalidationService, SandboxLookupCacheInvalidationService, SnapshotManager, RedisLockProvider, SnapshotSubscriber, VolumeService, VolumeManager, VolumeSubscriber, RunnerSubscriber, RunnerAdapterFactory, SandboxStartAction, SandboxStopAction, SandboxDestroyAction, SandboxArchiveAction, JobService, JobStateHandlerService, SandboxAccessGuard, RunnerAccessGuard, RegionRunnerAccessGuard, RegionSandboxAccessGuard, ProxyGuard, SshGatewayGuard, { provide: SandboxRepository, inject: [DataSource, EventEmitter2, SandboxLookupCacheInvalidationService], useFactory: ( dataSource: DataSource, eventEmitter: EventEmitter2, sandboxLookupCacheInvalidationService: SandboxLookupCacheInvalidationService, ) => new SandboxRepository(dataSource, eventEmitter, sandboxLookupCacheInvalidationService), }, ], exports: [ SandboxService, RunnerService, RedisLockProvider, SnapshotService, VolumeService, VolumeManager, SandboxRepository, RunnerAdapterFactory, ], }) export class SandboxModule {} ================================================ FILE: apps/api/src/sandbox/services/job-state-handler.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Repository } from 'typeorm' import { Snapshot } from '../entities/snapshot.entity' import { SnapshotRunner } from '../entities/snapshot-runner.entity' import { SandboxState } from '../enums/sandbox-state.enum' import { SnapshotState } from '../enums/snapshot-state.enum' import { SnapshotRunnerState } from '../enums/snapshot-runner-state.enum' import { JobStatus } from '../enums/job-status.enum' import { JobType } from '../enums/job-type.enum' import { Job } from '../entities/job.entity' import { BackupState } from '../enums/backup-state.enum' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { sanitizeSandboxError } from '../utils/sanitize-error.util' import { OrganizationUsageService } from '../../organization/services/organization-usage.service' import { SandboxRepository } from '../repositories/sandbox.repository' import { Sandbox } from '../entities/sandbox.entity' import { RedisLockProvider } from '../common/redis-lock.provider' import { ResourceType } from '../enums/resource-type.enum' import { getStateChangeLockKey } from '../utils/lock-key.util' /** * Service for handling entity state updates based on job completion (v2 runners only). * This service listens to job status changes and updates entity states accordingly. */ @Injectable() export class JobStateHandlerService { private readonly logger = new Logger(JobStateHandlerService.name) constructor( private readonly sandboxRepository: SandboxRepository, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRepository(SnapshotRunner) private readonly snapshotRunnerRepository: Repository, private readonly organizationUsageService: OrganizationUsageService, private readonly redisLockProvider: RedisLockProvider, ) {} /** * Handle job completion and update entity state accordingly. * Called when a job status is updated to COMPLETED or FAILED. */ async handleJobCompletion(job: Job): Promise { if (job.status !== JobStatus.COMPLETED && job.status !== JobStatus.FAILED) { return } if (!job.resourceId) { return } switch (job.type) { case JobType.CREATE_SANDBOX: await this.handleCreateSandboxJobCompletion(job) break case JobType.START_SANDBOX: await this.handleStartSandboxJobCompletion(job) break case JobType.STOP_SANDBOX: await this.handleStopSandboxJobCompletion(job) break case JobType.DESTROY_SANDBOX: await this.handleDestroySandboxJobCompletion(job) break case JobType.RESIZE_SANDBOX: await this.handleResizeSandboxJobCompletion(job) break case JobType.PULL_SNAPSHOT: await this.handlePullSnapshotJobCompletion(job) break case JobType.BUILD_SNAPSHOT: await this.handleBuildSnapshotJobCompletion(job) break case JobType.REMOVE_SNAPSHOT: await this.handleRemoveSnapshotJobCompletion(job) break case JobType.CREATE_BACKUP: await this.handleCreateBackupJobCompletion(job) break case JobType.RECOVER_SANDBOX: await this.handleRecoverSandboxJobCompletion(job) break default: break } switch (job.resourceType) { case ResourceType.SANDBOX: { const lockKey = getStateChangeLockKey(job.resourceId) this.redisLockProvider .unlock(lockKey) .catch((error) => this.logger.error(`Error unlocking Redis lock for sandbox ${job.resourceId}:`, error)) // Clean up lock after job completion break } default: break } } private async handleCreateSandboxJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for CREATE_SANDBOX job ${job.id}`) return } if (sandbox.desiredState !== SandboxDesiredState.STARTED) { this.logger.error( `Sandbox ${sandboxId} is not in desired state STARTED for CREATE_SANDBOX job ${job.id}. Desired state: ${sandbox.desiredState}`, ) return } const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug( `CREATE_SANDBOX job ${job.id} completed successfully, marking sandbox ${sandboxId} as STARTED`, ) updateData.state = SandboxState.STARTED updateData.errorReason = null const metadata = job.getResultMetadata() if (metadata?.daemonVersion && typeof metadata.daemonVersion === 'string') { updateData.daemonVersion = metadata.daemonVersion } } else if (job.status === JobStatus.FAILED) { this.logger.error(`CREATE_SANDBOX job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) updateData.state = SandboxState.ERROR const { recoverable, errorReason } = sanitizeSandboxError(job.errorMessage) updateData.errorReason = errorReason || 'Failed to create sandbox' updateData.recoverable = recoverable } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling CREATE_SANDBOX job completion for sandbox ${sandboxId}:`, error) } } private async handleStartSandboxJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for START_SANDBOX job ${job.id}`) return } if (sandbox.desiredState !== SandboxDesiredState.STARTED) { this.logger.error( `Sandbox ${sandboxId} is not in desired state STARTED for START_SANDBOX job ${job.id}. Desired state: ${sandbox.desiredState}`, ) return } const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug(`START_SANDBOX job ${job.id} completed successfully, marking sandbox ${sandboxId} as STARTED`) updateData.state = SandboxState.STARTED updateData.errorReason = null const metadata = job.getResultMetadata() if (metadata?.daemonVersion && typeof metadata.daemonVersion === 'string') { updateData.daemonVersion = metadata.daemonVersion } } else if (job.status === JobStatus.FAILED) { this.logger.error(`START_SANDBOX job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) updateData.state = SandboxState.ERROR const { recoverable, errorReason } = sanitizeSandboxError(job.errorMessage) updateData.errorReason = errorReason || 'Failed to start sandbox' updateData.recoverable = recoverable } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling START_SANDBOX job completion for sandbox ${sandboxId}:`, error) } } private async handleStopSandboxJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for STOP_SANDBOX job ${job.id}`) return } if (sandbox.desiredState !== SandboxDesiredState.STOPPED) { this.logger.error( `Sandbox ${sandboxId} is not in desired state STOPPED for STOP_SANDBOX job ${job.id}. Desired state: ${sandbox.desiredState}`, ) return } const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug(`STOP_SANDBOX job ${job.id} completed successfully, marking sandbox ${sandboxId} as STOPPED`) updateData.state = SandboxState.STOPPED updateData.errorReason = null } else if (job.status === JobStatus.FAILED) { this.logger.error(`STOP_SANDBOX job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) updateData.state = SandboxState.ERROR const { recoverable, errorReason } = sanitizeSandboxError(job.errorMessage) updateData.errorReason = errorReason || 'Failed to stop sandbox' updateData.recoverable = recoverable } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling STOP_SANDBOX job completion for sandbox ${sandboxId}:`, error) } } private async handleDestroySandboxJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for DESTROY_SANDBOX job ${job.id}`) return } if (sandbox.desiredState !== SandboxDesiredState.DESTROYED) { // Don't log anything because sandboxes can be destroyed on runners when archiving or moving to a new runner return } const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug( `DESTROY_SANDBOX job ${job.id} completed successfully, marking sandbox ${sandboxId} as DESTROYED`, ) updateData.state = SandboxState.DESTROYED updateData.errorReason = null } else if (job.status === JobStatus.FAILED) { this.logger.error(`DESTROY_SANDBOX job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) updateData.state = SandboxState.ERROR const { recoverable, errorReason } = sanitizeSandboxError(job.errorMessage) updateData.errorReason = errorReason || 'Failed to destroy sandbox' updateData.recoverable = recoverable } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling DESTROY_SANDBOX job completion for sandbox ${sandboxId}:`, error) } } private async handlePullSnapshotJobCompletion(job: Job): Promise { const snapshotRef = job.resourceId const runnerId = job.runnerId if (!snapshotRef || !runnerId) return try { const snapshotRunner = await this.snapshotRunnerRepository.findOne({ where: { snapshotRef, runnerId }, }) if (!snapshotRunner) { this.logger.warn(`SnapshotRunner not found for snapshot ${snapshotRef} on runner ${runnerId}`) return } if (job.status === JobStatus.COMPLETED) { this.logger.debug( `PULL_SNAPSHOT job ${job.id} completed successfully, marking SnapshotRunner ${snapshotRunner.id} as READY`, ) snapshotRunner.state = SnapshotRunnerState.READY snapshotRunner.errorReason = null // Check if this is the initial runner for a snapshot and update the snapshot state const snapshot = await this.snapshotRepository.findOne({ where: { initialRunnerId: runnerId, ref: snapshotRef }, }) if (snapshot && (snapshot.state === SnapshotState.PULLING || snapshot.state === SnapshotState.BUILDING)) { this.logger.debug(`Marking snapshot ${snapshot.id} as ACTIVE after initial pull completed`) snapshot.state = SnapshotState.ACTIVE snapshot.errorReason = null snapshot.lastUsedAt = new Date() await this.snapshotRepository.save(snapshot) } } else if (job.status === JobStatus.FAILED) { this.logger.error(`PULL_SNAPSHOT job ${job.id} failed for snapshot ${snapshotRef}: ${job.errorMessage}`) snapshotRunner.state = SnapshotRunnerState.ERROR snapshotRunner.errorReason = job.errorMessage || 'Failed to pull snapshot' // Check if this is the initial runner for a snapshot and update the snapshot state const snapshot = await this.snapshotRepository.findOne({ where: { initialRunnerId: runnerId, ref: snapshotRef }, }) if (snapshot && snapshot.state === SnapshotState.PULLING) { this.logger.error(`Marking snapshot ${snapshot.id} as ERROR after initial pull failed`) snapshot.state = SnapshotState.ERROR snapshot.errorReason = job.errorMessage || 'Failed to pull snapshot on initial runner' await this.snapshotRepository.save(snapshot) } } await this.snapshotRunnerRepository.save(snapshotRunner) } catch (error) { this.logger.error(`Error handling PULL_SNAPSHOT job completion for snapshot ${snapshotRef}:`, error) } } private async handleBuildSnapshotJobCompletion(job: Job): Promise { const snapshotRef = job.resourceId const runnerId = job.runnerId if (!snapshotRef || !runnerId) return try { // For BUILD_SNAPSHOT, find snapshot by buildInfo.snapshotRef const snapshot = await this.snapshotRepository .createQueryBuilder('snapshot') .leftJoinAndSelect('snapshot.buildInfo', 'buildInfo') .where('snapshot.initialRunnerId = :runnerId', { runnerId }) .andWhere('buildInfo.snapshotRef = :snapshotRef', { snapshotRef }) .getOne() // Update SnapshotRunner state const snapshotRunner = await this.snapshotRunnerRepository.findOne({ where: { snapshotRef, runnerId }, }) if (job.status === JobStatus.COMPLETED) { this.logger.debug(`BUILD_SNAPSHOT job ${job.id} completed successfully for snapshot ref ${snapshotRef}`) if (snapshot?.state === SnapshotState.BUILDING) { snapshot.state = SnapshotState.ACTIVE snapshot.errorReason = null snapshot.lastUsedAt = new Date() await this.snapshotRepository.save(snapshot) this.logger.debug(`Marked snapshot ${snapshot.id} as ACTIVE after build completed`) } if (snapshotRunner) { snapshotRunner.state = SnapshotRunnerState.READY snapshotRunner.errorReason = null await this.snapshotRunnerRepository.save(snapshotRunner) } } else if (job.status === JobStatus.FAILED) { this.logger.error(`BUILD_SNAPSHOT job ${job.id} failed for snapshot ref ${snapshotRef}: ${job.errorMessage}`) if (snapshot?.state === SnapshotState.BUILDING) { snapshot.state = SnapshotState.ERROR snapshot.errorReason = job.errorMessage || 'Failed to build snapshot' await this.snapshotRepository.save(snapshot) } if (snapshotRunner) { snapshotRunner.state = SnapshotRunnerState.ERROR snapshotRunner.errorReason = job.errorMessage || 'Failed to build snapshot' await this.snapshotRunnerRepository.save(snapshotRunner) } } } catch (error) { this.logger.error(`Error handling BUILD_SNAPSHOT job completion for snapshot ref ${snapshotRef}:`, error) } } private async handleRemoveSnapshotJobCompletion(job: Job): Promise { const snapshotRef = job.resourceId const runnerId = job.runnerId if (!snapshotRef || !runnerId) return try { if (job.status === JobStatus.COMPLETED) { this.logger.debug( `REMOVE_SNAPSHOT job ${job.id} completed successfully for snapshot ${snapshotRef} on runner ${runnerId}`, ) const affected = await this.snapshotRunnerRepository.delete({ snapshotRef, runnerId }) if (affected.affected && affected.affected > 0) { this.logger.debug( `Removed ${affected.affected} snapshot runners for snapshot ${snapshotRef} on runner ${runnerId}`, ) } } else if (job.status === JobStatus.FAILED) { this.logger.error( `REMOVE_SNAPSHOT job ${job.id} failed for snapshot ${snapshotRef} on runner ${runnerId}: ${job.errorMessage}`, ) } } catch (error) { this.logger.error(`Error handling REMOVE_SNAPSHOT job completion for snapshot ${snapshotRef}:`, error) } } private async handleCreateBackupJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for CREATE_BACKUP job ${job.id}`) return } const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug( `CREATE_BACKUP job ${job.id} completed successfully, marking sandbox ${sandboxId} as BACKUP_COMPLETED`, ) Object.assign(updateData, Sandbox.getBackupStateUpdate(sandbox, BackupState.COMPLETED)) } else if (job.status === JobStatus.FAILED) { this.logger.error(`CREATE_BACKUP job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) Object.assign( updateData, Sandbox.getBackupStateUpdate(sandbox, BackupState.ERROR, undefined, undefined, job.errorMessage), ) } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling CREATE_BACKUP job completion for sandbox ${sandboxId}:`, error) } } private async handleRecoverSandboxJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for RECOVER_SANDBOX job ${job.id}`) return } if (sandbox.desiredState !== SandboxDesiredState.STARTED) { this.logger.error( `Sandbox ${sandboxId} is not in desired state STARTED for RECOVER_SANDBOX job ${job.id}. Desired state: ${sandbox.desiredState}`, ) return } const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug( `RECOVER_SANDBOX job ${job.id} completed successfully, marking sandbox ${sandboxId} as STARTED`, ) updateData.state = SandboxState.STARTED updateData.errorReason = null } else if (job.status === JobStatus.FAILED) { this.logger.error(`RECOVER_SANDBOX job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) updateData.state = SandboxState.ERROR updateData.errorReason = job.errorMessage || 'Failed to recover sandbox' } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling RECOVER_SANDBOX job completion for sandbox ${sandboxId}:`, error) } } private async handleResizeSandboxJobCompletion(job: Job): Promise { const sandboxId = job.resourceId if (!sandboxId) return try { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId } }) if (!sandbox) { this.logger.warn(`Sandbox ${sandboxId} not found for RESIZE_SANDBOX job ${job.id}`) return } if (sandbox.state !== SandboxState.RESIZING) { this.logger.warn( `Sandbox ${sandboxId} is not in RESIZING state for RESIZE_SANDBOX job ${job.id}. State: ${sandbox.state}`, ) return } // Determine the previous state (STARTED or STOPPED based on desiredState) const previousState = sandbox.desiredState === SandboxDesiredState.STARTED ? SandboxState.STARTED : sandbox.desiredState === SandboxDesiredState.STOPPED ? SandboxState.STOPPED : null if (!previousState) { this.logger.error( `Sandbox ${sandboxId} has unexpected desiredState ${sandbox.desiredState} for RESIZE_SANDBOX job ${job.id}`, ) return } // Calculate deltas before updating sandbox const payload = job.payload as { cpu?: number; memory?: number; disk?: number } // For cold resize (previousState === STOPPED), cpu/memory don't affect org quota. const isHotResize = previousState === SandboxState.STARTED const cpuDeltaForQuota = isHotResize ? (payload.cpu ?? sandbox.cpu) - sandbox.cpu : 0 const memDeltaForQuota = isHotResize ? (payload.memory ?? sandbox.mem) - sandbox.mem : 0 const diskDeltaForQuota = (payload.disk ?? sandbox.disk) - sandbox.disk // Disk only increases const updateData: Partial = {} if (job.status === JobStatus.COMPLETED) { this.logger.debug(`RESIZE_SANDBOX job ${job.id} completed successfully for sandbox ${sandboxId}`) // Update sandbox resources updateData.cpu = payload.cpu ?? sandbox.cpu updateData.mem = payload.memory ?? sandbox.mem updateData.disk = payload.disk ?? sandbox.disk updateData.state = previousState // Apply usage change (handles both positive and negative deltas) await this.organizationUsageService.applyResizeUsageChange( sandbox.organizationId, sandbox.region, cpuDeltaForQuota, memDeltaForQuota, diskDeltaForQuota, ) return } else if (job.status === JobStatus.FAILED) { this.logger.error(`RESIZE_SANDBOX job ${job.id} failed for sandbox ${sandboxId}: ${job.errorMessage}`) // Rollback pending usage (all deltas were tracked, including negative) await this.organizationUsageService.decrementPendingSandboxUsage( sandbox.organizationId, sandbox.region, cpuDeltaForQuota !== 0 ? cpuDeltaForQuota : undefined, memDeltaForQuota !== 0 ? memDeltaForQuota : undefined, diskDeltaForQuota !== 0 ? diskDeltaForQuota : undefined, ) updateData.state = previousState } await this.sandboxRepository.update(sandboxId, { updateData, entity: sandbox }) } catch (error) { this.logger.error(`Error handling RESIZE_SANDBOX job completion for sandbox ${sandboxId}:`, error) } } } ================================================ FILE: apps/api/src/sandbox/services/job.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ConflictException, Injectable, Logger, NotFoundException } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Repository, LessThan, EntityManager } from 'typeorm' import { Job } from '../entities/job.entity' import { JobDto, JobStatus, JobType, ResourceType } from '../dto/job.dto' import { ResourceTypeForJobType } from '../dto/job-type-map.dto' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { Cron, CronExpression } from '@nestjs/schedule' import { JobStateHandlerService } from './job-state-handler.service' import { propagation, context as otelContext } from '@opentelemetry/api' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' const REDIS_BLOCKING_COMMAND_TIMEOUT_BUFFER_MS = 3_000 @Injectable() export class JobService { private readonly logger = new Logger(JobService.name) private readonly REDIS_JOB_QUEUE_PREFIX = 'runner:jobs:' constructor( @InjectRepository(Job) private readonly jobRepository: Repository, @InjectRedis() private readonly redis: Redis, private readonly jobStateHandlerService: JobStateHandlerService, ) {} /** * Create a job within the provided transaction manager * If manager is null, uses the default repository (for non-transactional operations) * @template T The JobType enum value - ensures compile-time type safety for resourceType and payload */ async createJob( manager: EntityManager | null, type: T, runnerId: string, resourceType: ResourceTypeForJobType, resourceId: string, payload?: string | Record, ): Promise { // Use provided manager if available, otherwise use default repository const repo = manager ? manager.getRepository(Job) : this.jobRepository // Capture current OpenTelemetry trace context for distributed tracing const traceContext = this.captureTraceContext() const encodedPayload = typeof payload === 'string' ? payload : payload ? JSON.stringify(payload) : undefined try { const job = new Job({ type, runnerId, resourceType, resourceId, status: JobStatus.PENDING, payload: encodedPayload, traceContext, }) await repo.insert(job) // Log with context-specific info const contextInfo = resourceId ? `${resourceType} ${resourceId}` : 'N/A' this.logger.debug(`Created job ${job.id} of type ${type} for ${contextInfo} on runner ${runnerId}`) // Notify runner via Redis - happens outside transaction // If transaction rolls back, notification is harmless (runner will poll and find nothing) await this.notifyRunner(runnerId, job.id) return job } catch (error) { if (error.code === '23505') { if (error.constraint === 'IDX_UNIQUE_INCOMPLETE_JOB') { this.logger.error(`An incomplete job already exists for ${resourceType} ${resourceId} on runner ${runnerId}`) } throw new ConflictException('An operation is already in progress for this resource') } this.logger.error(`Error creating job: ${error}`) throw error } } private async notifyRunner(runnerId: string, jobId: string): Promise { try { await this.redis.lpush(this.getRunnerQueueKey(runnerId), jobId) this.logger.debug(`Notified runner ${runnerId} about job ${jobId} via Redis`) } catch (error) { this.logger.warn(`Failed to notify runner ${runnerId} via Redis: ${error.message}`) // Job is still in DB, runner will pick it up via fallback polling } } async findOne(jobId: string): Promise { return this.jobRepository.findOneBy({ id: jobId }) } async pollJobs(runnerId: string, limit = 10, timeoutSeconds = 30, abortSignal?: AbortSignal): Promise { const queueKey = this.getRunnerQueueKey(runnerId) const maxTimeout = Math.min(timeoutSeconds, 60) // Max 60 seconds // Check if already aborted if (abortSignal?.aborted) { this.logger.debug(`Poll request for runner ${runnerId} was already aborted`) return [] } // STEP 1: Atomically claim pending jobs from database // This prevents duplicates by updating status to IN_PROGRESS let claimedJobs = await this.claimPendingJobs(runnerId, limit) if (claimedJobs.length > 0) { // Clear any stale job IDs from Redis queue try { await this.redis.del(queueKey) } catch (error) { this.logger.warn(`Failed to clear Redis queue: ${error.message}`) } return claimedJobs } // STEP 2: No existing jobs - wait for notification via Redis BRPOP // Create a new dedicated Redis client for this BRPOP to support concurrent polling from multiple runners // Each runner gets its own connection, preventing blocking issues let blockingClient: Redis | null = null try { this.logger.debug(`No existing jobs, runner ${runnerId} starting BRPOP with timeout ${maxTimeout}s`) blockingClient = this.redis.duplicate({ commandTimeout: maxTimeout * 1000 + REDIS_BLOCKING_COMMAND_TIMEOUT_BUFFER_MS, retryStrategy: () => null, }) // Wrap BRPOP in a promise that can be aborted const brpopPromise = blockingClient.brpop(queueKey, maxTimeout) let result: [string, string] | null = null if (abortSignal) { // Race between BRPOP and abort signal result = await Promise.race([ brpopPromise, new Promise((resolve) => { if (abortSignal.aborted) { resolve(null) } else { abortSignal.addEventListener('abort', () => resolve(null), { once: true }) } }), ]) // If aborted, disconnect immediately to cancel BRPOP if (abortSignal.aborted) { this.logger.debug(`BRPOP aborted for runner ${runnerId}, closing Redis connection`) blockingClient.disconnect() return [] } } else { result = await brpopPromise } if (result) { // Got notification - job(s) available // Clear the entire queue (job IDs are just hints, not used directly) this.logger.debug(`Got notification from Redis for runner ${runnerId}`) try { await this.redis.del(queueKey) } catch (error) { this.logger.warn(`Failed to clear Redis queue: ${error.message}`) } // Atomically claim jobs from database claimedJobs = await this.claimPendingJobs(runnerId, limit) if (claimedJobs.length > 0) { this.logger.debug(`Claimed ${claimedJobs.length} jobs after Redis notification for runner ${runnerId}`) return claimedJobs } // Notification received but no jobs found - possible race condition this.logger.warn(`Received Redis notification but no pending jobs found for runner ${runnerId}`) } else { // BRPOP timeout - no jobs received this.logger.debug(`BRPOP timeout for runner ${runnerId}, no new jobs`) } } catch (error) { this.logger.error(`Redis BRPOP error for runner ${runnerId}: ${error.message}`) // Fall through to database polling fallback } finally { // Always close the blocking client to prevent connection leaks if (blockingClient) { try { blockingClient.disconnect() } catch (error) { this.logger.warn(`Failed to disconnect blocking Redis client for runner ${runnerId}: ${error.message}`) } } } // STEP 3: Final fallback - check database again // This handles race conditions and Redis failures claimedJobs = await this.claimPendingJobs(runnerId, limit) if (claimedJobs.length > 0) { this.logger.debug(`Claimed ${claimedJobs.length} pending jobs in fallback for runner ${runnerId}`) } return claimedJobs } async updateJobStatus( jobId: string, status: JobStatus, errorMessage?: string, resultMetadata?: string, ): Promise { const job = await this.findOne(jobId) if (!job) { throw new NotFoundException(`Job with ID ${jobId} not found`) } if (!this.isValidStatusTransition(job.status, status)) { throw new ConflictException(`Invalid job status transition from ${job.status} to ${status} for job ${jobId}`) } job.status = status if (errorMessage) { job.errorMessage = errorMessage } if (status === JobStatus.IN_PROGRESS && !job.startedAt) { job.startedAt = new Date() } if (status === JobStatus.COMPLETED || status === JobStatus.FAILED) { job.completedAt = new Date() } if (resultMetadata) { job.resultMetadata = resultMetadata } const updatedJob = await this.jobRepository.save(job) this.logger.debug(`Updated job ${jobId} status to ${status}`) // Handle job completion for v2 runners - update sandbox/snapshot/backup state if (status === JobStatus.COMPLETED || status === JobStatus.FAILED) { // Fire and forget - don't block the response this.jobStateHandlerService.handleJobCompletion(updatedJob).catch((error) => { this.logger.error(`Error handling job completion for job ${jobId}:`, error) }) } return updatedJob } async findPendingJobsForRunner(runnerId: string, limit = 10): Promise { return this.jobRepository.find({ where: { runnerId, status: JobStatus.PENDING, }, order: { createdAt: 'ASC', }, take: limit, }) } async findJobsForRunner(runnerId: string, status?: JobStatus, page = 1, limit = 100): Promise> { const whereCondition: { runnerId: string; status?: JobStatus } = { runnerId } if (status) { whereCondition.status = status } const [jobs, total] = await this.jobRepository.findAndCount({ where: whereCondition, order: { createdAt: 'DESC', }, skip: (page - 1) * limit, take: limit, }) return { items: jobs.map((job) => new JobDto(job)), total, page, totalPages: Math.ceil(total / limit), } } async findJobsBySandboxId(sandboxId: string): Promise { return this.findJobsByResourceId(ResourceType.SANDBOX, sandboxId) } async findJobsByResourceId(resourceType: ResourceType, resourceId: string): Promise { return this.jobRepository.find({ where: { resourceType, resourceId, }, order: { createdAt: 'DESC', }, }) } async waitJobCompletion(jobId: string, waitTimeout: number): Promise { const startTime = Date.now() const endTime = startTime + waitTimeout while (Date.now() < endTime) { const job = await this.findOne(jobId) if (!job) { return null } if (job.status === JobStatus.COMPLETED || job.status === JobStatus.FAILED) { return job } await new Promise((resolve) => setTimeout(resolve, 100)) } throw new Error(`Job ${jobId} timed out after ${waitTimeout}ms`) } /** * Captures the current OpenTelemetry trace context in W3C Trace Context format * This allows distributed tracing across the API and runner services * @returns A map of trace context headers (traceparent, tracestate) */ private captureTraceContext(): Record | null { try { const carrier: Record = {} // Extract current trace context into carrier object using W3C Trace Context format propagation.inject(otelContext.active(), carrier) // Return the carrier if it contains trace information if (Object.keys(carrier).length > 0) { this.logger.debug(`Captured trace context: ${JSON.stringify(carrier)}`) return carrier } } catch (error) { this.logger.warn(`Failed to capture trace context: ${error.message}`) } return null } private isValidStatusTransition(currentStatus: JobStatus, newStatus: JobStatus): boolean { if (currentStatus === newStatus) { return true } const allowedTransitions: Record = { [JobStatus.PENDING]: [JobStatus.IN_PROGRESS, JobStatus.FAILED], [JobStatus.IN_PROGRESS]: [JobStatus.COMPLETED, JobStatus.FAILED], [JobStatus.COMPLETED]: [], [JobStatus.FAILED]: [], } return allowedTransitions[currentStatus]?.includes(newStatus) ?? false } private getRunnerQueueKey(runnerId: string): string { return `${this.REDIS_JOB_QUEUE_PREFIX}${runnerId}` } /** * Cron job to check for stale jobs and mark them as failed * Runs every minute to find jobs that have been IN_PROGRESS for too long */ @Cron(CronExpression.EVERY_MINUTE) async handleStaleJobs(): Promise { const staleThresholdMinutes = 10 const staleThreshold = new Date(Date.now() - staleThresholdMinutes * 60 * 1000) try { // Find jobs that are IN_PROGRESS but haven't been updated in the threshold time const staleJobs = await this.jobRepository.find({ where: { status: JobStatus.IN_PROGRESS, updatedAt: LessThan(staleThreshold), }, }) if (staleJobs.length === 0) { return } this.logger.warn(`Found ${staleJobs.length} stale jobs, marking as failed`) // Mark each stale job as failed with timeout error for (const job of staleJobs) { try { await this.updateJobStatus( job.id, JobStatus.FAILED, `Job timed out - no update received for ${staleThresholdMinutes} minutes`, ) this.logger.warn( `Marked job ${job.id} (type: ${job.type}, resource: ${job.resourceType} ${job.resourceId}) as failed due to timeout`, ) } catch (error) { this.logger.error(`Error marking job ${job.id} as failed: ${error.message}`, error.stack) } } } catch (error) { this.logger.error(`Error handling stale jobs: ${error.message}`, error.stack) } } /** * Atomically claim pending jobs by updating their status to IN_PROGRESS * This prevents duplicate processing of the same job */ private async claimPendingJobs(runnerId: string, limit: number): Promise { // Find pending jobs const jobs = await this.jobRepository.find({ where: { runnerId, status: JobStatus.PENDING, }, order: { createdAt: 'ASC', }, take: limit, }) if (jobs.length === 0) { return [] } // Update jobs to IN_PROGRESS const now = new Date() const claimedJobs: JobDto[] = [] for (const job of jobs) { try { job.status = JobStatus.IN_PROGRESS job.startedAt = now job.updatedAt = now // save() with @VersionColumn will automatically check version and throw OptimisticLockVersionMismatchError if changed const savedJob = await this.jobRepository.save(job) claimedJobs.push(new JobDto(savedJob)) } catch (error) { // If optimistic lock fails, job was already claimed by another runner - skip it this.logger.debug(`Job ${job.id} already claimed by another runner (version mismatch)`) } } if (claimedJobs.length > 0) { this.logger.debug(`Claimed ${claimedJobs.length} existing pending jobs for runner ${runnerId}`) } return claimedJobs } } ================================================ FILE: apps/api/src/sandbox/services/proxy-cache-invalidation.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { InjectRedis } from '@nestjs-modules/ioredis' import { Injectable, Logger } from '@nestjs/common' import { OnEvent } from '@nestjs/event-emitter' import Redis from 'ioredis' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxArchivedEvent } from '../events/sandbox-archived.event' @Injectable() export class ProxyCacheInvalidationService { private readonly logger = new Logger(ProxyCacheInvalidationService.name) private static readonly RUNNER_INFO_CACHE_PREFIX = 'proxy:sandbox-runner-info:' constructor(@InjectRedis() private readonly redis: Redis) {} @OnEvent(SandboxEvents.ARCHIVED) async handleSandboxArchived(event: SandboxArchivedEvent): Promise { await this.invalidateRunnerCache(event.sandbox.id) } private async invalidateRunnerCache(sandboxId: string): Promise { try { await this.redis.del(`${ProxyCacheInvalidationService.RUNNER_INFO_CACHE_PREFIX}${sandboxId}`) this.logger.debug(`Invalidated sandbox runner cache for ${sandboxId}`) } catch (error) { this.logger.warn(`Failed to invalidate runner cache for sandbox ${sandboxId}: ${error.message}`) } } } ================================================ FILE: apps/api/src/sandbox/services/runner.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, ConflictException, HttpException, HttpStatus, Inject, Injectable, Logger, NotFoundException, } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Cron, CronExpression } from '@nestjs/schedule' import { DataSource, FindOptionsWhere, In, MoreThanOrEqual, Not, Repository, UpdateResult } from 'typeorm' import { Runner } from '../entities/runner.entity' import { CreateRunnerInternalDto } from '../dto/create-runner-internal.dto' import { SandboxClass } from '../enums/sandbox-class.enum' import { RunnerState } from '../enums/runner-state.enum' import { BadRequestError } from '../../exceptions/bad-request.exception' import { EventEmitter2 } from '@nestjs/event-emitter' import { SandboxState } from '../enums/sandbox-state.enum' import { SnapshotRunner } from '../entities/snapshot-runner.entity' import { SnapshotRunnerState } from '../enums/snapshot-runner-state.enum' import { RunnerSnapshotDto } from '../dto/runner-snapshot.dto' import { RunnerAdapterFactory, RunnerInfo } from '../runner-adapter/runnerAdapter' import { RedisLockProvider } from '../common/redis-lock.provider' import { TypedConfigService } from '../../config/typed-config.service' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { RegionService } from '../../region/services/region.service' import { RUNNER_NAME_REGEX } from '../constants/runner-name-regex.constant' import { RegionType } from '../../region/enums/region-type.enum' import { RunnerDto } from '../dto/runner.dto' import { RunnerEvents } from '../constants/runner-events' import { RunnerStateUpdatedEvent } from '../events/runner-state-updated.event' import { RunnerDeletedEvent } from '../events/runner-deleted.event' import { generateApiKeyValue } from '../../common/utils/api-key' import { RunnerFullDto } from '../dto/runner-full.dto' import { Snapshot } from '../entities/snapshot.entity' import { InjectRedis } from '@nestjs-modules/ioredis' import Redis from 'ioredis' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { runnerLookupCacheKeyById, RUNNER_LOOKUP_CACHE_TTL_MS } from '../utils/runner-lookup-cache.util' import { SandboxRepository } from '../repositories/sandbox.repository' import { RunnerServiceInfo } from '../common/runner-service-info' @Injectable() export class RunnerService { private readonly logger = new Logger(RunnerService.name) private readonly serviceStartTime = new Date() private readonly scoreConfig: AvailabilityScoreConfig constructor( @InjectRepository(Runner) private readonly runnerRepository: Repository, private readonly runnerAdapterFactory: RunnerAdapterFactory, private readonly sandboxRepository: SandboxRepository, @InjectRepository(SnapshotRunner) private readonly snapshotRunnerRepository: Repository, private readonly redisLockProvider: RedisLockProvider, private readonly configService: TypedConfigService, private readonly regionService: RegionService, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @Inject(EventEmitter2) private eventEmitter: EventEmitter2, private readonly dataSource: DataSource, @InjectRedis() private readonly redis: Redis, ) { this.scoreConfig = this.getAvailabilityScoreConfig() } /** * @throws {BadRequestException} If the runner name or class is invalid. * @throws {NotFoundException} If the region is not found. * @throws {ConflictException} If a runner with the same values already exists. */ async create(createRunnerDto: CreateRunnerInternalDto): Promise<{ runner: Runner apiKey: string }> { if (!RUNNER_NAME_REGEX.test(createRunnerDto.name)) { throw new BadRequestException('Runner name must contain only letters, numbers, underscores, periods, and hyphens') } if (createRunnerDto.name.length < 2 || createRunnerDto.name.length > 255) { throw new BadRequestException('Runner name must be between 3 and 255 characters') } const apiKey = createRunnerDto.apiKey ?? generateApiKeyValue() let runner: Runner switch (createRunnerDto.apiVersion) { case '0': runner = new Runner({ region: createRunnerDto.regionId, name: createRunnerDto.name, apiVersion: createRunnerDto.apiVersion, apiKey: apiKey, cpu: createRunnerDto.cpu, memoryGiB: createRunnerDto.memoryGiB, diskGiB: createRunnerDto.diskGiB, domain: createRunnerDto.domain, apiUrl: createRunnerDto.apiUrl, proxyUrl: createRunnerDto.proxyUrl, appVersion: createRunnerDto.appVersion, }) break case '2': runner = new Runner({ region: createRunnerDto.regionId, name: createRunnerDto.name, apiVersion: createRunnerDto.apiVersion, apiKey: apiKey, appVersion: createRunnerDto.appVersion, }) break default: throw new BadRequestException('Invalid runner version') } try { const savedRunner = await this.runnerRepository.save(runner) this.invalidateRunnerCache(savedRunner.id) return { runner: savedRunner, apiKey } } catch (error) { if (error.code === '23505') { if (error.detail.includes('domain')) { throw new ConflictException('This domain is already in use') } if (error.detail.includes('name')) { throw new ConflictException(`Runner with name ${createRunnerDto.name} already exists in this region`) } throw new ConflictException('A runner with these values already exists') } throw error } } async findAllFull(): Promise { const runners = await this.runnerRepository.find() const regionIds = new Set(runners.map((runner) => runner.region)) const regions = await this.regionService.findByIds(Array.from(regionIds)) const regionTypeMap = new Map() regions.forEach((region) => { regionTypeMap.set(region.id, region.regionType) }) return runners.map((runner) => RunnerFullDto.fromRunner(runner, regionTypeMap.get(runner.region))) } async findAllByRegion(regionId: string): Promise { const runners = await this.runnerRepository.find({ where: { region: regionId, }, }) return runners.map(RunnerDto.fromRunner) } async findAllByRegionFull(regionId: string): Promise { const runners = await this.runnerRepository.find({ where: { region: regionId, }, }) const region = await this.regionService.findOne(regionId) return runners.map((runner) => RunnerFullDto.fromRunner(runner, region?.regionType)) } async findAllByOrganization(organizationId: string, regionType?: RegionType): Promise { const regions = await this.regionService.findAllByOrganization(organizationId, regionType) const regionIds = regions.map((region) => region.id) const runners = await this.runnerRepository.find({ where: { region: In(regionIds), }, }) return runners.map(RunnerDto.fromRunner) } async findDrainingPaginated(skip: number, take: number): Promise { return this.runnerRepository.find({ where: { draining: true, state: Not(RunnerState.DECOMMISSIONED), }, order: { id: 'ASC', }, skip, take, }) } async findAllReady(): Promise { return this.runnerRepository.find({ where: { state: RunnerState.READY, }, }) } async findOne(id: string): Promise { return this.runnerRepository.findOne({ where: { id }, cache: { id: runnerLookupCacheKeyById(id), milliseconds: RUNNER_LOOKUP_CACHE_TTL_MS, }, }) } async findOneOrFail(id: string): Promise { const runner = await this.findOne(id) if (!runner) { throw new NotFoundException(`Runner with ID ${id} not found`) } return runner } async findOneFullOrFail(id: string): Promise { const runner = await this.findOneOrFail(id) const region = await this.regionService.findOne(runner.region) return RunnerFullDto.fromRunner(runner, region?.regionType) } async findOneByDomain(domain: string): Promise { return this.runnerRepository.findOneBy({ domain }) } async findByIds(runnerIds: string[]): Promise { if (runnerIds.length === 0) { return [] } return this.runnerRepository.find({ where: { id: In(runnerIds) }, }) } async findByApiKey(apiKey: string): Promise { return this.runnerRepository.findOneBy({ apiKey }) } async findBySandboxId(sandboxId: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId, state: Not(SandboxState.DESTROYED) }, select: ['runnerId'], }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } if (!sandbox.runnerId) { throw new NotFoundException(`Sandbox with ID ${sandboxId} does not have a runner`) } return this.findOne(sandbox.runnerId) } async getRegionId(runnerId: string): Promise { const runner = await this.runnerRepository.findOne({ where: { id: runnerId, }, select: ['region'], loadEagerRelations: false, }) if (!runner || !runner.region) { throw new NotFoundException('Runner not found') } return runner.region } async findAvailableRunners(params: GetRunnerParams): Promise { const runnerFilter: FindOptionsWhere = { state: RunnerState.READY, unschedulable: Not(true), draining: Not(true), availabilityScore: params.availabilityScoreThreshold ? MoreThanOrEqual(params.availabilityScoreThreshold) : MoreThanOrEqual(this.configService.getOrThrow('runnerScore.thresholds.availability')), } const excludedRunnerIds = params.excludedRunnerIds?.length ? params.excludedRunnerIds.filter((id) => !!id) : undefined if (params.snapshotRef !== undefined) { const snapshotRunners = await this.snapshotRunnerRepository.find({ where: { state: SnapshotRunnerState.READY, snapshotRef: params.snapshotRef, }, }) let runnerIds = snapshotRunners.map((snapshotRunner) => snapshotRunner.runnerId) if (excludedRunnerIds?.length) { runnerIds = runnerIds.filter((id) => !excludedRunnerIds.includes(id)) } if (!runnerIds.length) { return [] } runnerFilter.id = In(runnerIds) } else if (excludedRunnerIds?.length) { runnerFilter.id = Not(In(excludedRunnerIds)) } if (params.regions?.length) { runnerFilter.region = In(params.regions) } if (params.sandboxClass !== undefined) { runnerFilter.class = params.sandboxClass } const runners = await this.runnerRepository.find({ where: runnerFilter, }) return runners.sort((a, b) => b.availabilityScore - a.availabilityScore).slice(0, 10) } /** * @throws {NotFoundException} If the runner is not found. * @throws {HttpException} If the runner is not unschedulable. * @throws {HttpException} If the runner has sandboxes associated with it. */ async remove(id: string): Promise { const runner = await this.findOne(id) if (!runner) { throw new NotFoundException('Runner not found') } if (!runner.unschedulable) { throw new HttpException( 'Cannot delete runner which is available for scheduling sandboxes', HttpStatus.PRECONDITION_REQUIRED, ) } const sandboxCount = await this.sandboxRepository.count({ where: { runnerId: id, state: Not(In([SandboxState.ARCHIVED, SandboxState.DESTROYED])) }, }) if (sandboxCount > 0) { throw new HttpException( 'Cannot delete runner which has sandboxes associated with it', HttpStatus.PRECONDITION_REQUIRED, ) } await this.dataSource.transaction(async (em) => { await em.delete(Runner, id) await this.eventEmitter.emitAsync(RunnerEvents.DELETED, new RunnerDeletedEvent(em, id)) }) this.invalidateRunnerCache(id) } async updateRunnerHealth( runnerId: string, domain?: string, apiUrl?: string, proxyUrl?: string, serviceHealth?: RunnerServiceInfo[], metrics?: { currentCpuLoadAverage?: number currentCpuUsagePercentage?: number currentMemoryUsagePercentage?: number currentDiskUsagePercentage?: number currentAllocatedCpu?: number currentAllocatedMemoryGiB?: number currentAllocatedDiskGiB?: number currentSnapshotCount?: number currentStartedSandboxes?: number cpu?: number memoryGiB?: number diskGiB?: number }, appVersion?: string, ): Promise { const runner = await this.findOne(runnerId) if (!runner) { this.logger.error(`Runner ${runnerId} not found when trying to update health`) return } if (runner.state === RunnerState.DECOMMISSIONED) { this.logger.debug(`Runner ${runnerId} is decommissioned, not updating health`) return } const updateData: Partial = { state: RunnerState.READY, lastChecked: new Date(), } if (domain) { updateData.domain = domain } if (apiUrl) { updateData.apiUrl = apiUrl } if (proxyUrl) { updateData.proxyUrl = proxyUrl } if (appVersion) { updateData.appVersion = appVersion } if (serviceHealth !== undefined) { updateData.serviceHealth = serviceHealth } else { // Clear any previously stored service health when no new health data is provided updateData.serviceHealth = null } const unhealthyServices = serviceHealth?.filter((s) => !s.healthy) ?? [] if (unhealthyServices.length > 0) { const unhealthySummary = unhealthyServices .map((s) => `"${s.serviceName}"${s.errorReason ? ` (${s.errorReason})` : ''}`) .join(', ') this.logger.warn(`Runner ${runnerId} services reported unhealthy: ${unhealthySummary}`) updateData.state = RunnerState.UNRESPONSIVE } if (metrics) { updateData.currentCpuLoadAverage = metrics.currentCpuLoadAverage || 0 updateData.currentCpuUsagePercentage = metrics.currentCpuUsagePercentage || 0 updateData.currentMemoryUsagePercentage = metrics.currentMemoryUsagePercentage || 0 updateData.currentDiskUsagePercentage = metrics.currentDiskUsagePercentage || 0 updateData.currentAllocatedCpu = metrics.currentAllocatedCpu || 0 updateData.currentAllocatedMemoryGiB = metrics.currentAllocatedMemoryGiB || 0 updateData.currentAllocatedDiskGiB = metrics.currentAllocatedDiskGiB || 0 updateData.currentSnapshotCount = metrics.currentSnapshotCount || 0 updateData.currentStartedSandboxes = metrics.currentStartedSandboxes || 0 updateData.cpu = metrics.cpu updateData.memoryGiB = metrics.memoryGiB updateData.diskGiB = metrics.diskGiB updateData.availabilityScore = this.calculateAvailabilityScore(runnerId, { cpuLoadAverage: updateData.currentCpuLoadAverage, cpuUsage: updateData.currentCpuUsagePercentage, memoryUsage: updateData.currentMemoryUsagePercentage, diskUsage: updateData.currentDiskUsagePercentage, allocatedCpu: updateData.currentAllocatedCpu, allocatedMemoryGiB: updateData.currentAllocatedMemoryGiB, allocatedDiskGiB: updateData.currentAllocatedDiskGiB, runnerCpu: updateData.cpu || runner.cpu, runnerMemoryGiB: updateData.memoryGiB || runner.memoryGiB, runnerDiskGiB: updateData.diskGiB || runner.diskGiB, startedSandboxes: updateData.currentStartedSandboxes || 0, }) } await this.updateRunner(runnerId, updateData) this.logger.debug(`Updated health for runner ${runnerId}`) this.eventEmitter.emit( RunnerEvents.STATE_UPDATED, new RunnerStateUpdatedEvent(runner, runner.state, updateData.state), ) } private async updateRunnerState(runnerId: string, newState: RunnerState): Promise { const runner = await this.findOne(runnerId) if (!runner) { this.logger.error(`Runner ${runnerId} not found when trying to update state`) return } // Don't change state if runner is decommissioned if (runner.state === RunnerState.DECOMMISSIONED) { this.logger.debug(`Runner ${runnerId} is decommissioned, not updating state`) return } await this.updateRunner(runnerId, { state: newState, lastChecked: new Date(), }) this.eventEmitter.emit(RunnerEvents.STATE_UPDATED, new RunnerStateUpdatedEvent(runner, runner.state, newState)) } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'check-runners', waitForCompletion: true }) @LogExecution('check-runners') @WithInstrumentation() private async handleCheckRunners() { const lockKey = 'check-runners' const hasLock = await this.redisLockProvider.lock(lockKey, 60) if (!hasLock) { return } try { const runners = await this.runnerRepository.find({ where: [ { apiVersion: '0', state: Not(RunnerState.DECOMMISSIONED), }, { // v2 runners report health via healthcheck endpoint, so we only check if the health is stale (lastChecked timestamp) apiVersion: '2', state: RunnerState.READY, }, ], order: { lastChecked: { direction: 'ASC', nulls: 'FIRST', }, }, take: 100, }) await Promise.allSettled( runners.map(async (runner) => { // v2 runners report health via healthcheck endpoint, check based on lastChecked timestamp if (runner.apiVersion === '2') { await this.checkRunnerV2Health(runner) return } // v0 runners: imperative health check via adapter const shouldRetry = runner.state === RunnerState.READY const retryDelays = shouldRetry ? [500, 1000] : [] for (let attempt = 0; attempt <= retryDelays.length; attempt++) { if (attempt > 0) { await new Promise((resolve) => setTimeout(resolve, retryDelays[attempt - 1])) } const abortController = new AbortController() let timeoutId: NodeJS.Timeout | null = null const runnerHealthTimeoutSeconds = this.configService.get('runnerHealthTimeout') try { await Promise.race([ (async () => { this.logger.debug(`Checking runner ${runner.id}`) const runnerAdapter = await this.runnerAdapterFactory.create(runner) await runnerAdapter.healthCheck(abortController.signal) let runnerInfo: RunnerInfo | undefined try { runnerInfo = await runnerAdapter.runnerInfo(abortController.signal) } catch (e) { this.logger.warn(`Failed to get runner info for runner ${runner.id}: ${e.message}`) } await this.updateRunnerHealth( runner.id, undefined, undefined, undefined, runnerInfo?.serviceHealth, runnerInfo?.metrics, runnerInfo?.appVersion, ) })(), new Promise((_, reject) => { timeoutId = setTimeout(() => { abortController.abort() reject(new Error('Health check timeout')) }, runnerHealthTimeoutSeconds * 1000) }), ]) if (timeoutId) { clearTimeout(timeoutId) } return // Success, exit retry loop } catch (e) { if (timeoutId) { clearTimeout(timeoutId) } if (e.message === 'Health check timeout') { this.logger.error( `Runner ${runner.id} health check timed out after ${runnerHealthTimeoutSeconds} seconds`, ) } else if (e.code === 'ECONNREFUSED') { this.logger.error(`Runner ${runner.id} not reachable`) } else if (e.name === 'AbortError') { this.logger.error(`Runner ${runner.id} health check was aborted due to timeout`) } else { this.logger.error(`Error checking runner ${runner.id}`, e) } // If last attempt, mark as unresponsive if (attempt === retryDelays.length) { await this.updateRunnerState(runner.id, RunnerState.UNRESPONSIVE) } } } }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } /** * Check v2 runner health based on lastChecked timestamp. * v2 runners report health via the healthcheck endpoint, so we check if lastChecked is within threshold. */ private async checkRunnerV2Health(runner: Runner): Promise { const markAsUnresponsive = async () => { this.logger.warn( `v2 Runner ${runner.id} health check stale (last: ${Math.round((Date.now() - runner.lastChecked.getTime()) / 1000)}s ago), marking as UNRESPONSIVE`, ) await this.updateRunnerState(runner.id, RunnerState.UNRESPONSIVE) } if (!runner.lastChecked) { return } // v2 runners report health every ~10 seconds via the healthcheck endpoint // Allow 60 seconds (6 missed healthchecks) before marking as UNRESPONSIVE const healthCheckThresholdMs = 60 * 1000 if (runner.lastChecked < this.serviceStartTime) { // Allow the runner a grace period to re-establish health checks const timeSinceServiceStart = Date.now() - this.serviceStartTime.getTime() if (timeSinceServiceStart > healthCheckThresholdMs) { // Grace period expired and runner still hasn't checked in await markAsUnresponsive() } } else { // Runner has checked in since API started - use normal threshold const timeSinceLastCheck = Date.now() - runner.lastChecked.getTime() if (timeSinceLastCheck > healthCheckThresholdMs) { // Runner hasn't reported health recently await markAsUnresponsive() } } } @Cron(CronExpression.EVERY_10_SECONDS, { name: 'check-decommission-runners', waitForCompletion: true }) @LogExecution('check-decommission-runners') @WithInstrumentation() private async handleCheckDecommissionRunners() { const lockKey = 'check-decommission-runners' const hasLock = await this.redisLockProvider.lock(lockKey, 60) if (!hasLock) { return } try { const drainingRunners = await this.runnerRepository.find({ where: { draining: true, state: Not(RunnerState.DECOMMISSIONED), }, }) this.logger.debug(`Checking ${drainingRunners.length} draining runners`) await Promise.allSettled( drainingRunners.map(async (runner) => { try { // Check if runner has any sandboxes with desiredState != DESTROYED const nonDestroyedSandboxCount = await this.sandboxRepository.count({ where: { runnerId: runner.id, desiredState: Not(SandboxDesiredState.DESTROYED), }, }) const redisKey = `runner:draining-check:${runner.id}` if (nonDestroyedSandboxCount > 0) { // Reset counter if there are non-destroyed sandboxes await this.redis.set(redisKey, '0', 'EX', 600) // 10 minute TTL this.logger.debug( `Runner ${runner.id} has ${nonDestroyedSandboxCount} sandboxes with desiredState != DESTROYED, reset counter`, ) } else { // Increment counter const currentCount = await this.redis.get(redisKey) const count = currentCount ? parseInt(currentCount, 10) + 1 : 1 if (count >= 3) { // Decommission the runner await this.updateRunner(runner.id, { state: RunnerState.DECOMMISSIONED, }) await this.redis.del(redisKey) this.logger.log(`Runner ${runner.id} has been decommissioned after 3 successful draining checks`) } else { await this.redis.set(redisKey, count.toString(), 'EX', 600) // 10 minute TTL this.logger.debug( `Runner ${runner.id} draining check passed (${count}/3), all sandboxes have desiredState = DESTROYED`, ) } } } catch (e) { this.logger.error(`Error checking draining runner ${runner.id}`, e) } }), ) } finally { await this.redisLockProvider.unlock(lockKey) } } async updateSchedulingStatus(id: string, unschedulable: boolean): Promise { const runner = await this.findOneOrFail(id) runner.unschedulable = unschedulable await this.runnerRepository.save(runner) return runner } async updateDrainingStatus(id: string, draining: boolean): Promise { const runner = await this.findOneOrFail(id) runner.draining = draining await this.runnerRepository.save(runner) return runner } async getRandomAvailableRunner(params: GetRunnerParams): Promise { const availableRunners = await this.findAvailableRunners(params) if (availableRunners.length === 0) { throw new BadRequestError('No available runners') } // Get random runner from the best available runners const randomIntFromInterval = (min: number, max: number) => Math.floor(Math.random() * (max - min + 1) + min) return availableRunners[randomIntFromInterval(0, availableRunners.length - 1)] } async getSnapshotRunner(runnerId: string, snapshotRef: string): Promise { return this.snapshotRunnerRepository.findOne({ where: { runnerId: runnerId, snapshotRef: snapshotRef, }, }) } async getSnapshotRunners(snapshotRef: string): Promise { return this.snapshotRunnerRepository.find({ where: { snapshotRef, }, order: { state: 'ASC', // Sorts state BUILDING_SNAPSHOT before ERROR createdAt: 'ASC', // Sorts first runner to start building snapshot on top }, }) } async createSnapshotRunnerEntry( runnerId: string, snapshotRef: string, state?: SnapshotRunnerState, errorReason?: string, ): Promise { try { const snapshotRunner = new SnapshotRunner() snapshotRunner.runnerId = runnerId snapshotRunner.snapshotRef = snapshotRef if (state) { snapshotRunner.state = state } if (errorReason) { snapshotRunner.errorReason = errorReason } await this.snapshotRunnerRepository.save(snapshotRunner) } catch (error) { if (error.code === '23505') { // PostgreSQL unique violation error code - entry already exists, allow it this.logger.debug( `SnapshotRunner entry already exists for runnerId: ${runnerId}, snapshotRef: ${snapshotRef}. Continuing...`, ) return } throw error // Re-throw any other errors } } // TODO: combine getRunnersWithMultipleSnapshotsBuilding and getRunnersWithMultipleSnapshotsPulling? async getRunnersWithMultipleSnapshotsBuilding(maxSnapshotCount = 6): Promise { const runners = await this.sandboxRepository .createQueryBuilder('sandbox') .select('sandbox.runnerId', 'runnerId') .where('sandbox.state = :state', { state: SandboxState.BUILDING_SNAPSHOT }) .andWhere('sandbox.buildInfoSnapshotRef IS NOT NULL') .groupBy('sandbox.runnerId') .having('COUNT(DISTINCT sandbox.buildInfoSnapshotRef) > :maxSnapshotCount', { maxSnapshotCount }) .getRawMany() return runners.map((item) => item.runnerId) } async getRunnersWithMultipleSnapshotsPulling(maxSnapshotCount = 6): Promise { const runners = await this.snapshotRunnerRepository .createQueryBuilder('snapshot_runner') .select('snapshot_runner.runnerId') .where('snapshot_runner.state = :state', { state: SnapshotRunnerState.PULLING_SNAPSHOT }) .groupBy('snapshot_runner.runnerId') .having('COUNT(*) > :maxSnapshotCount', { maxSnapshotCount }) .getRawMany() return runners.map((item) => item.runnerId) } async getRunnersBySnapshotRef(ref: string): Promise { const snapshotRunners = await this.snapshotRunnerRepository.find({ where: { snapshotRef: ref, state: Not(SnapshotRunnerState.ERROR), }, select: ['runnerId', 'id'], }) // Extract distinct runnerIds from snapshot runners const runnerIds = [...new Set(snapshotRunners.map((sr) => sr.runnerId))] // Find all runners with these IDs const runners = await this.runnerRepository.find({ where: { id: In(runnerIds) }, select: ['id', 'domain'], }) this.logger.debug(`Found ${runners.length} runners with IDs: ${runners.map((r) => r.id).join(', ')}`) // Map to DTO format, including the snapshot runner ID return runners.map((runner) => { const snapshotRunner = snapshotRunners.find((sr) => sr.runnerId === runner.id) return new RunnerSnapshotDto(snapshotRunner.id, runner.id, runner.domain) }) } async getInitialRunnerBySnapshotId(snapshotId: string): Promise { const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId } }) if (!snapshot) { throw new NotFoundException('Snapshot runner not found') } if (!snapshot.initialRunnerId) { throw new BadRequestException('Initial runner not found') } return await this.findOneOrFail(snapshot.initialRunnerId) } async getRunnerApiVersion(runnerId: string): Promise { const result = await this.runnerRepository.findOneOrFail({ select: ['apiVersion'], where: { id: runnerId }, cache: { id: `runner:apiVersion:${runnerId}`, milliseconds: 60 * 60 * 1000, // Cache for 1 hour }, }) return result.apiVersion } private async updateRunner( id: string, data: Partial>, ): Promise { const result = await this.runnerRepository.update(id, data) this.invalidateRunnerCache(id) return result } private invalidateRunnerCache(runnerId: string): void { const cache = this.dataSource.queryResultCache if (!cache) { return } cache .remove([runnerLookupCacheKeyById(runnerId)]) .then(() => this.logger.debug(`Invalidated runner lookup cache for ${runnerId}`)) .catch((error) => this.logger.warn( `Failed to invalidate runner lookup cache for ${runnerId}: ${error instanceof Error ? error.message : String(error)}`, ), ) } private calculateAvailabilityScore(runnerId: string, params: AvailabilityScoreParams): number { if ( params.cpuLoadAverage < 0 || params.cpuUsage < 0 || params.memoryUsage < 0 || params.diskUsage < 0 || params.allocatedCpu < 0 || params.allocatedMemoryGiB < 0 || params.allocatedDiskGiB < 0 || params.startedSandboxes < 0 ) { this.logger.warn( `Runner ${runnerId} has negative values for load, CPU, memory, disk, allocated CPU, allocated memory, allocated disk, or started sandboxes`, ) return 0 } return this.calculateTOPSISScore(params) } private calculateTOPSISScore(params: AvailabilityScoreParams): number { const current = [ params.cpuUsage, params.memoryUsage, params.diskUsage, // Allocation ratios percentage (params.allocatedCpu / params.runnerCpu) * 100, (params.allocatedMemoryGiB / params.runnerMemoryGiB) * 100, (params.allocatedDiskGiB / params.runnerDiskGiB) * 100, params.startedSandboxes, // Raw count, will be normalized against its critical target value ] // Calculate weighted Euclidean distances let distanceToOptimal = 0 let distanceToCritical = 0 for (let i = 0; i < current.length; i++) { // Normalize to 0-1 scale const normalizedCurrent = current[i] / 100 const normalizedOptimal = this.scoreConfig.targetValues.optimal[i] / 100 const normalizedCritical = this.scoreConfig.targetValues.critical[i] / 100 distanceToOptimal += this.scoreConfig.weights[i] * Math.pow(normalizedCurrent - normalizedOptimal, 2) distanceToCritical += this.scoreConfig.weights[i] * Math.pow(normalizedCurrent - normalizedCritical, 2) } distanceToOptimal = Math.sqrt(distanceToOptimal) distanceToCritical = Math.sqrt(distanceToCritical) // TOPSIS relative closeness score (0 to 1) let topsisScore = distanceToCritical / (distanceToOptimal + distanceToCritical) // Apply exponential penalties for critical thresholds let penaltyMultiplier = 1 if (params.cpuUsage >= this.scoreConfig.penalty.thresholds.cpu) { penaltyMultiplier *= Math.exp( -this.scoreConfig.penalty.exponents.cpu * (params.cpuUsage - this.scoreConfig.penalty.thresholds.cpu), ) } if (params.cpuLoadAverage >= this.scoreConfig.penalty.thresholds.cpuLoadAvg) { penaltyMultiplier *= Math.exp( -this.scoreConfig.penalty.exponents.cpuLoadAvg * (params.cpuLoadAverage - this.scoreConfig.penalty.thresholds.cpuLoadAvg), ) } if (params.memoryUsage >= this.scoreConfig.penalty.thresholds.memory) { penaltyMultiplier *= Math.exp( -this.scoreConfig.penalty.exponents.memory * (params.memoryUsage - this.scoreConfig.penalty.thresholds.memory), ) } if (params.diskUsage >= this.scoreConfig.penalty.thresholds.disk) { penaltyMultiplier *= Math.exp( -this.scoreConfig.penalty.exponents.disk * (params.diskUsage - this.scoreConfig.penalty.thresholds.disk), ) } // Apply penalty topsisScore *= penaltyMultiplier return Math.round(topsisScore * 100) } private getAvailabilityScoreConfig(): AvailabilityScoreConfig { return { availabilityThreshold: this.configService.getOrThrow('runnerScore.thresholds.availability'), weights: [ this.configService.getOrThrow('runnerScore.weights.cpuUsage'), this.configService.getOrThrow('runnerScore.weights.memoryUsage'), this.configService.getOrThrow('runnerScore.weights.diskUsage'), this.configService.getOrThrow('runnerScore.weights.allocatedCpu'), this.configService.getOrThrow('runnerScore.weights.allocatedMemory'), this.configService.getOrThrow('runnerScore.weights.allocatedDisk'), this.configService.getOrThrow('runnerScore.weights.startedSandboxes'), ], penalty: { exponents: { cpu: this.configService.getOrThrow('runnerScore.penalty.exponents.cpu'), cpuLoadAvg: this.configService.getOrThrow('runnerScore.penalty.exponents.cpuLoadAvg'), memory: this.configService.getOrThrow('runnerScore.penalty.exponents.memory'), disk: this.configService.getOrThrow('runnerScore.penalty.exponents.disk'), }, thresholds: { cpu: this.configService.getOrThrow('runnerScore.penalty.thresholds.cpu'), cpuLoadAvg: this.configService.getOrThrow('runnerScore.penalty.thresholds.cpuLoadAvg'), memory: this.configService.getOrThrow('runnerScore.penalty.thresholds.memory'), disk: this.configService.getOrThrow('runnerScore.penalty.thresholds.disk'), }, }, targetValues: { optimal: [ this.configService.getOrThrow('runnerScore.targetValues.optimal.cpu'), this.configService.getOrThrow('runnerScore.targetValues.optimal.memory'), this.configService.getOrThrow('runnerScore.targetValues.optimal.disk'), this.configService.getOrThrow('runnerScore.targetValues.optimal.allocCpu'), this.configService.getOrThrow('runnerScore.targetValues.optimal.allocMem'), this.configService.getOrThrow('runnerScore.targetValues.optimal.allocDisk'), this.configService.getOrThrow('runnerScore.targetValues.optimal.startedSandboxes'), ], critical: [ this.configService.getOrThrow('runnerScore.targetValues.critical.cpu'), this.configService.getOrThrow('runnerScore.targetValues.critical.memory'), this.configService.getOrThrow('runnerScore.targetValues.critical.disk'), this.configService.getOrThrow('runnerScore.targetValues.critical.allocCpu'), this.configService.getOrThrow('runnerScore.targetValues.critical.allocMem'), this.configService.getOrThrow('runnerScore.targetValues.critical.allocDisk'), this.configService.getOrThrow('runnerScore.targetValues.critical.startedSandboxes'), ], }, } } } export class GetRunnerParams { regions?: string[] sandboxClass?: SandboxClass snapshotRef?: string excludedRunnerIds?: string[] availabilityScoreThreshold?: number } interface AvailabilityScoreParams { cpuLoadAverage: number cpuUsage: number memoryUsage: number diskUsage: number allocatedCpu: number allocatedMemoryGiB: number allocatedDiskGiB: number startedSandboxes: number runnerCpu: number runnerMemoryGiB: number runnerDiskGiB: number } interface AvailabilityScoreConfig { availabilityThreshold: number weights: number[] penalty: { exponents: { cpu: number cpuLoadAvg: number memory: number disk: number } thresholds: { cpu: number cpuLoadAvg: number memory: number disk: number } } targetValues: { optimal: number[] critical: number[] } } ================================================ FILE: apps/api/src/sandbox/services/sandbox-lookup-cache-invalidation.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { DataSource } from 'typeorm' import { sandboxLookupCacheKeyByAuthToken, sandboxLookupCacheKeyById, sandboxLookupCacheKeyByName, sandboxOrgIdCacheKeyById, sandboxOrgIdCacheKeyByName, } from '../utils/sandbox-lookup-cache.util' type InvalidateSandboxLookupCacheArgs = | { sandboxId: string organizationId: string name: string previousOrganizationId?: string | null previousName?: string | null } | { authToken: string } @Injectable() export class SandboxLookupCacheInvalidationService { private readonly logger = new Logger(SandboxLookupCacheInvalidationService.name) constructor(private readonly dataSource: DataSource) {} invalidate(args: InvalidateSandboxLookupCacheArgs): void { const cache = this.dataSource.queryResultCache if (!cache) { return } if ('authToken' in args) { cache .remove([sandboxLookupCacheKeyByAuthToken({ authToken: args.authToken })]) .then(() => this.logger.debug(`Invalidated sandbox lookup cache for authToken ${args.authToken}`)) .catch((error) => this.logger.warn( `Failed to invalidate sandbox lookup cache for authToken ${args.authToken}: ${error instanceof Error ? error.message : String(error)}`, ), ) return } const organizationIds = Array.from( new Set( [args.organizationId, args.previousOrganizationId].filter((id): id is string => Boolean(id && id.trim().length > 0), ), ), ) const names = Array.from( new Set([args.name, args.previousName].filter((n): n is string => Boolean(n && n.trim().length > 0))), ) const cacheIds: string[] = [] for (const organizationId of organizationIds) { for (const returnDestroyed of [false, true]) { cacheIds.push( sandboxLookupCacheKeyById({ organizationId, returnDestroyed, sandboxId: args.sandboxId, }), ) for (const sandboxName of names) { cacheIds.push( sandboxLookupCacheKeyByName({ organizationId, returnDestroyed, sandboxName, }), ) } } } if (cacheIds.length === 0) { return } cache .remove(cacheIds) .then(() => this.logger.debug(`Invalidated sandbox lookup cache for ${args.sandboxId}`)) .catch((error) => this.logger.warn( `Failed to invalidate sandbox lookup cache for ${args.sandboxId}: ${error instanceof Error ? error.message : String(error)}`, ), ) } invalidateOrgId(args: { sandboxId: string organizationId: string name: string previousOrganizationId?: string | null previousName?: string | null }): void { const cache = this.dataSource.queryResultCache if (!cache) { return } const organizationIds = Array.from( new Set( [args.organizationId, args.previousOrganizationId].filter((id): id is string => Boolean(id && id.trim().length > 0), ), ), ) const names = Array.from( new Set([args.name, args.previousName].filter((n): n is string => Boolean(n && n.trim().length > 0))), ) const cacheIds: string[] = [] for (const organizationId of organizationIds) { cacheIds.push( sandboxOrgIdCacheKeyById({ organizationId, sandboxId: args.sandboxId, }), ) for (const sandboxName of names) { cacheIds.push( sandboxOrgIdCacheKeyByName({ organizationId, sandboxName, }), ) } } // Also invalidate the "no org" variants (when organizationId was not provided to getOrganizationId) cacheIds.push(sandboxOrgIdCacheKeyById({ sandboxId: args.sandboxId })) for (const sandboxName of names) { cacheIds.push(sandboxOrgIdCacheKeyByName({ sandboxName })) } cache .remove(cacheIds) .then(() => this.logger.debug(`Invalidated sandbox orgId cache for ${args.sandboxId}`)) .catch((error) => this.logger.warn( `Failed to invalidate sandbox orgId cache for ${args.sandboxId}: ${error instanceof Error ? error.message : String(error)}`, ), ) } } ================================================ FILE: apps/api/src/sandbox/services/sandbox-warm-pool.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Inject, Injectable, Logger } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Cron, CronExpression } from '@nestjs/schedule' import { FindOptionsWhere, In, MoreThan, Not, Repository } from 'typeorm' import { RedisLockProvider } from '../common/redis-lock.provider' import { SandboxRepository } from '../repositories/sandbox.repository' import { Sandbox } from '../entities/sandbox.entity' import { SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION } from '../constants/sandbox.constants' import { WarmPool } from '../entities/warm-pool.entity' import { EventEmitter2, OnEvent } from '@nestjs/event-emitter' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxOrganizationUpdatedEvent } from '../events/sandbox-organization-updated.event' import { ConfigService } from '@nestjs/config' import { Snapshot } from '../entities/snapshot.entity' import { SnapshotState } from '../enums/snapshot-state.enum' import { SandboxClass } from '../enums/sandbox-class.enum' import { BadRequestError } from '../../exceptions/bad-request.exception' import { SandboxState } from '../enums/sandbox-state.enum' import { Runner } from '../entities/runner.entity' import { WarmPoolTopUpRequested } from '../events/warmpool-topup-requested.event' import { WarmPoolEvents } from '../constants/warmpool-events.constants' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { isValidUuid } from '../../common/utils/uuid' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' export type FetchWarmPoolSandboxParams = { snapshot: string | Snapshot target: string class: SandboxClass cpu: number mem: number disk: number gpu: number osUser: string env: { [key: string]: string } organizationId: string state: string } @Injectable() export class SandboxWarmPoolService { private readonly logger = new Logger(SandboxWarmPoolService.name) constructor( @InjectRepository(WarmPool) private readonly warmPoolRepository: Repository, private readonly sandboxRepository: SandboxRepository, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRepository(Runner) private readonly runnerRepository: Repository, private readonly redisLockProvider: RedisLockProvider, private readonly configService: ConfigService, @Inject(EventEmitter2) private eventEmitter: EventEmitter2, @InjectRedis() private readonly redis: Redis, ) {} // on init async onApplicationBootstrap() { // await this.adHocBackupCheck() } async fetchWarmPoolSandbox(params: FetchWarmPoolSandboxParams): Promise { // validate snapshot let snapshot: Snapshot | null = null if (typeof params.snapshot === 'string') { const sandboxSnapshot = params.snapshot || this.configService.get('DEFAULT_SNAPSHOT') const snapshotFilter: FindOptionsWhere[] = [ { organizationId: params.organizationId, name: sandboxSnapshot, state: SnapshotState.ACTIVE }, { general: true, name: sandboxSnapshot, state: SnapshotState.ACTIVE }, ] if (isValidUuid(sandboxSnapshot)) { snapshotFilter.push( { organizationId: params.organizationId, id: sandboxSnapshot, state: SnapshotState.ACTIVE }, { general: true, id: sandboxSnapshot, state: SnapshotState.ACTIVE }, ) } snapshot = await this.snapshotRepository.findOne({ where: snapshotFilter, }) if (!snapshot) { throw new BadRequestError( `Snapshot ${sandboxSnapshot} not found. Did you add it through the Daytona Dashboard?`, ) } } else { snapshot = params.snapshot } // check if sandbox is warm pool const warmPoolItem = await this.warmPoolRepository.findOne({ where: { snapshot: snapshot.name, target: params.target, class: params.class, cpu: params.cpu, mem: params.mem, disk: params.disk, gpu: params.gpu, osUser: params.osUser, env: params.env, pool: MoreThan(0), }, }) if (warmPoolItem) { const availabilityScoreThreshold = this.configService.getOrThrow('runnerScore.thresholds.availability') // Build subquery to find excluded runners (unschedulable OR low score) const excludedRunnersSubquery = this.runnerRepository .createQueryBuilder('runner') .select('runner.id') .where('runner.region = :region') .andWhere('(runner.unschedulable = true OR runner.availabilityScore < :scoreThreshold)') const queryBuilder = this.sandboxRepository .createQueryBuilder('sandbox') .where('sandbox.class = :class', { class: warmPoolItem.class }) .andWhere('sandbox.cpu = :cpu', { cpu: warmPoolItem.cpu }) .andWhere('sandbox.mem = :mem', { mem: warmPoolItem.mem }) .andWhere('sandbox.disk = :disk', { disk: warmPoolItem.disk }) .andWhere('sandbox.snapshot = :snapshot', { snapshot: snapshot.name }) .andWhere('sandbox.osUser = :osUser', { osUser: warmPoolItem.osUser }) .andWhere('sandbox.env = :env', { env: warmPoolItem.env }) .andWhere('sandbox.organizationId = :organizationId', { organizationId: SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION, }) .andWhere('sandbox.region = :region', { region: warmPoolItem.target }) .andWhere('sandbox.state = :state', { state: SandboxState.STARTED }) .andWhere(`sandbox.runnerId NOT IN (${excludedRunnersSubquery.getQuery()})`) .setParameters({ region: warmPoolItem.target, scoreThreshold: availabilityScoreThreshold, }) const candidateLimit = this.configService.getOrThrow('warmPool.candidateLimit') const warmPoolSandboxes = await queryBuilder.orderBy('RANDOM()').take(candidateLimit).getMany() // make sure we only release warm pool sandbox once let warmPoolSandbox: Sandbox | null = null for (const sandbox of warmPoolSandboxes) { const lockKey = `sandbox-warm-pool-${sandbox.id}` if (!(await this.redisLockProvider.lock(lockKey, 10))) { continue } warmPoolSandbox = sandbox break } return warmPoolSandbox } // no warm pool config exists for this snapshot — cache it so callers can skip await this.redis.set(`warm-pool:skip:${snapshot.id}`, '1', 'EX', 60) return null } // todo: make frequency configurable or more efficient @Cron(CronExpression.EVERY_10_SECONDS, { name: 'warm-pool-check' }) @LogExecution('warm-pool-check') @WithInstrumentation() async warmPoolCheck(): Promise { const warmPoolItems = await this.warmPoolRepository.find() await Promise.all( warmPoolItems.map(async (warmPoolItem) => { const lockKey = `warm-pool-lock-${warmPoolItem.id}` if (!(await this.redisLockProvider.lock(lockKey, 720))) { return } const sandboxCount = await this.sandboxRepository.count({ where: { snapshot: warmPoolItem.snapshot, organizationId: SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION, class: warmPoolItem.class, osUser: warmPoolItem.osUser, env: warmPoolItem.env, region: warmPoolItem.target, cpu: warmPoolItem.cpu, gpu: warmPoolItem.gpu, mem: warmPoolItem.mem, disk: warmPoolItem.disk, desiredState: SandboxDesiredState.STARTED, state: Not(In([SandboxState.ERROR, SandboxState.BUILD_FAILED])), }, }) const missingCount = warmPoolItem.pool - sandboxCount if (missingCount > 0) { const promises = [] this.logger.debug(`Creating ${missingCount} sandboxes for warm pool id ${warmPoolItem.id}`) for (let i = 0; i < missingCount; i++) { promises.push( this.eventEmitter.emitAsync(WarmPoolEvents.TOPUP_REQUESTED, new WarmPoolTopUpRequested(warmPoolItem)), ) } // Wait for all promises to settle before releasing the lock. Otherwise, another worker could start creating sandboxes await Promise.allSettled(promises) } await this.redisLockProvider.unlock(lockKey) }), ) } @OnEvent(SandboxEvents.ORGANIZATION_UPDATED) async handleSandboxOrganizationUpdated(event: SandboxOrganizationUpdatedEvent) { if (event.newOrganizationId === SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION) { return } const warmPoolItem = await this.warmPoolRepository.findOne({ where: { snapshot: event.sandbox.snapshot, class: event.sandbox.class, cpu: event.sandbox.cpu, mem: event.sandbox.mem, disk: event.sandbox.disk, target: event.sandbox.region, env: event.sandbox.env, gpu: event.sandbox.gpu, osUser: event.sandbox.osUser, }, }) if (!warmPoolItem) { return } const sandboxCount = await this.sandboxRepository.count({ where: { snapshot: warmPoolItem.snapshot, organizationId: SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION, class: warmPoolItem.class, osUser: warmPoolItem.osUser, env: warmPoolItem.env, region: warmPoolItem.target, cpu: warmPoolItem.cpu, gpu: warmPoolItem.gpu, mem: warmPoolItem.mem, disk: warmPoolItem.disk, desiredState: SandboxDesiredState.STARTED, state: Not(In([SandboxState.ERROR, SandboxState.BUILD_FAILED])), }, }) if (warmPoolItem.pool <= sandboxCount) { return } if (warmPoolItem) { this.eventEmitter.emit(WarmPoolEvents.TOPUP_REQUESTED, new WarmPoolTopUpRequested(warmPoolItem)) } } } ================================================ FILE: apps/api/src/sandbox/services/sandbox.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ForbiddenException, Injectable, Logger, NotFoundException, ConflictException } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Not, Repository, LessThan, In, JsonContains, FindOptionsWhere, ILike } from 'typeorm' import { Sandbox } from '../entities/sandbox.entity' import { CreateSandboxDto } from '../dto/create-sandbox.dto' import { ResizeSandboxDto } from '../dto/resize-sandbox.dto' import { SandboxState } from '../enums/sandbox-state.enum' import { SandboxClass } from '../enums/sandbox-class.enum' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' import { RunnerService } from './runner.service' import { SandboxError } from '../../exceptions/sandbox-error.exception' import { BadRequestError } from '../../exceptions/bad-request.exception' import { Cron, CronExpression } from '@nestjs/schedule' import { BackupState } from '../enums/backup-state.enum' import { Snapshot } from '../entities/snapshot.entity' import { SnapshotState } from '../enums/snapshot-state.enum' import { SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION } from '../constants/sandbox.constants' import { SandboxWarmPoolService } from './sandbox-warm-pool.service' import { EventEmitter2, OnEvent } from '@nestjs/event-emitter' import { WarmPoolEvents } from '../constants/warmpool-events.constants' import { WarmPoolTopUpRequested } from '../events/warmpool-topup-requested.event' import { Runner } from '../entities/runner.entity' import { Organization } from '../../organization/entities/organization.entity' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxStateUpdatedEvent } from '../events/sandbox-state-updated.event' import { BuildInfo } from '../entities/build-info.entity' import { generateBuildInfoHash as generateBuildSnapshotRef } from '../entities/build-info.entity' import { SandboxBackupCreatedEvent } from '../events/sandbox-backup-created.event' import { SandboxDestroyedEvent } from '../events/sandbox-destroyed.event' import { SandboxStartedEvent } from '../events/sandbox-started.event' import { SandboxStoppedEvent } from '../events/sandbox-stopped.event' import { SandboxArchivedEvent } from '../events/sandbox-archived.event' import { OrganizationService } from '../../organization/services/organization.service' import { OrganizationEvents } from '../../organization/constants/organization-events.constant' import { OrganizationSuspendedSandboxStoppedEvent } from '../../organization/events/organization-suspended-sandbox-stopped.event' import { TypedConfigService } from '../../config/typed-config.service' import { WarmPool } from '../entities/warm-pool.entity' import { SandboxDto, SandboxVolume } from '../dto/sandbox.dto' import { isValidUuid } from '../../common/utils/uuid' import { RunnerAdapterFactory } from '../runner-adapter/runnerAdapter' import { validateNetworkAllowList } from '../utils/network-validation.util' import { OrganizationUsageService } from '../../organization/services/organization-usage.service' import { SshAccess } from '../entities/ssh-access.entity' import { SshAccessDto, SshAccessValidationDto } from '../dto/ssh-access.dto' import { VolumeService } from './volume.service' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' import { SandboxSortField, SandboxSortDirection, DEFAULT_SANDBOX_SORT_FIELD, DEFAULT_SANDBOX_SORT_DIRECTION, } from '../dto/list-sandboxes-query.dto' import { createRangeFilter } from '../../common/utils/range-filter' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { UPGRADE_TIER_MESSAGE, ARCHIVE_SANDBOXES_MESSAGE, PER_SANDBOX_LIMIT_MESSAGE, } from '../../common/constants/error-messages' import { RedisLockProvider } from '../common/redis-lock.provider' import { customAlphabet as customNanoid, nanoid, urlAlphabet } from 'nanoid' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { validateMountPaths, validateSubpaths } from '../utils/volume-mount-path-validation.util' import { SandboxRepository } from '../repositories/sandbox.repository' import { PortPreviewUrlDto, SignedPortPreviewUrlDto } from '../dto/port-preview-url.dto' import { RegionService } from '../../region/services/region.service' import { DefaultRegionRequiredException } from '../../organization/exceptions/DefaultRegionRequiredException' import { SnapshotService } from './snapshot.service' import { RegionType } from '../../region/enums/region-type.enum' import { SandboxCreatedEvent } from '../events/sandbox-create.event' import { InjectRedis } from '@nestjs-modules/ioredis' import { Redis } from 'ioredis' import { SANDBOX_LOOKUP_CACHE_TTL_MS, SANDBOX_ORG_ID_CACHE_TTL_MS, TOOLBOX_PROXY_URL_CACHE_TTL_S, sandboxLookupCacheKeyById, sandboxLookupCacheKeyByName, sandboxOrgIdCacheKeyById, sandboxOrgIdCacheKeyByName, toolboxProxyUrlCacheKey, } from '../utils/sandbox-lookup-cache.util' import { SandboxLookupCacheInvalidationService } from './sandbox-lookup-cache-invalidation.service' import { Region } from '../../region/entities/region.entity' const DEFAULT_CPU = 1 const DEFAULT_MEMORY = 1 const DEFAULT_DISK = 3 const DEFAULT_GPU = 0 @Injectable() export class SandboxService { private readonly logger = new Logger(SandboxService.name) constructor( private readonly sandboxRepository: SandboxRepository, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRepository(Runner) private readonly runnerRepository: Repository, @InjectRepository(BuildInfo) private readonly buildInfoRepository: Repository, @InjectRepository(SshAccess) private readonly sshAccessRepository: Repository, private readonly runnerService: RunnerService, private readonly volumeService: VolumeService, private readonly configService: TypedConfigService, private readonly warmPoolService: SandboxWarmPoolService, private readonly eventEmitter: EventEmitter2, private readonly organizationService: OrganizationService, private readonly runnerAdapterFactory: RunnerAdapterFactory, private readonly organizationUsageService: OrganizationUsageService, private readonly redisLockProvider: RedisLockProvider, @InjectRedis() private readonly redis: Redis, private readonly regionService: RegionService, private readonly snapshotService: SnapshotService, private readonly sandboxLookupCacheInvalidationService: SandboxLookupCacheInvalidationService, ) {} protected getLockKey(id: string): string { return `sandbox:${id}:state-change` } private assertSandboxNotErrored(sandbox: Sandbox): void { if ([SandboxState.ERROR, SandboxState.BUILD_FAILED].includes(sandbox.state)) { throw new SandboxError('Sandbox is in an errored state') } } private async validateOrganizationQuotas( organization: Organization, region: Region, cpu: number, memory: number, disk: number, excludeSandboxId?: string, ): Promise<{ pendingCpuIncremented: boolean pendingMemoryIncremented: boolean pendingDiskIncremented: boolean }> { // validate per-sandbox quotas if (cpu > organization.maxCpuPerSandbox) { throw new ForbiddenException( `CPU request ${cpu} exceeds maximum allowed per sandbox (${organization.maxCpuPerSandbox}).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } if (memory > organization.maxMemoryPerSandbox) { throw new ForbiddenException( `Memory request ${memory}GB exceeds maximum allowed per sandbox (${organization.maxMemoryPerSandbox}GB).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } if (disk > organization.maxDiskPerSandbox) { throw new ForbiddenException( `Disk request ${disk}GB exceeds maximum allowed per sandbox (${organization.maxDiskPerSandbox}GB).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } // e.g. region belonging to an organization if (!region.enforceQuotas) { return { pendingCpuIncremented: false, pendingMemoryIncremented: false, pendingDiskIncremented: false, } } const regionQuota = await this.organizationService.getRegionQuota(organization.id, region.id) if (!regionQuota) { if (region.regionType === RegionType.SHARED) { // region is public, but the organization does not have a quota for it throw new ForbiddenException(`Region ${region.id} is not available to the organization`) } else { // region is not public, respond as if the region was not found throw new NotFoundException('Region not found') } } // validate usage quotas const { cpuIncremented: pendingCpuIncremented, memoryIncremented: pendingMemoryIncremented, diskIncremented: pendingDiskIncremented, } = await this.organizationUsageService.incrementPendingSandboxUsage( organization.id, region.id, cpu, memory, disk, excludeSandboxId, ) const usageOverview = await this.organizationUsageService.getSandboxUsageOverview( organization.id, region.id, excludeSandboxId, ) try { const upgradeTierMessage = UPGRADE_TIER_MESSAGE(this.configService.getOrThrow('dashboardUrl')) if (usageOverview.currentCpuUsage + usageOverview.pendingCpuUsage > regionQuota.totalCpuQuota) { throw new ForbiddenException( `Total CPU limit exceeded. Maximum allowed: ${regionQuota.totalCpuQuota}.\n${upgradeTierMessage}`, ) } if (usageOverview.currentMemoryUsage + usageOverview.pendingMemoryUsage > regionQuota.totalMemoryQuota) { throw new ForbiddenException( `Total memory limit exceeded. Maximum allowed: ${regionQuota.totalMemoryQuota}GiB.\n${upgradeTierMessage}`, ) } if (usageOverview.currentDiskUsage + usageOverview.pendingDiskUsage > regionQuota.totalDiskQuota) { throw new ForbiddenException( `Total disk limit exceeded. Maximum allowed: ${regionQuota.totalDiskQuota}GiB.\n${ARCHIVE_SANDBOXES_MESSAGE}\n${upgradeTierMessage}`, ) } } catch (error) { await this.rollbackPendingUsage( organization.id, region.id, pendingCpuIncremented ? cpu : undefined, pendingMemoryIncremented ? memory : undefined, pendingDiskIncremented ? disk : undefined, ) throw error } return { pendingCpuIncremented, pendingMemoryIncremented, pendingDiskIncremented, } } async rollbackPendingUsage( organizationId: string, regionId: string, pendingCpuIncrement?: number, pendingMemoryIncrement?: number, pendingDiskIncrement?: number, ): Promise { if (!pendingCpuIncrement && !pendingMemoryIncrement && !pendingDiskIncrement) { return } try { await this.organizationUsageService.decrementPendingSandboxUsage( organizationId, regionId, pendingCpuIncrement, pendingMemoryIncrement, pendingDiskIncrement, ) } catch (error) { this.logger.error(`Error rolling back pending sandbox usage: ${error}`) } } async archive(sandboxIdOrName: string, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) this.assertSandboxNotErrored(sandbox) if (String(sandbox.state) !== String(sandbox.desiredState)) { throw new SandboxError('State change in progress') } if (sandbox.state !== SandboxState.STOPPED) { throw new SandboxError('Sandbox is not stopped') } if (sandbox.pending) { throw new SandboxError('Sandbox state change in progress') } if (sandbox.autoDeleteInterval === 0) { throw new SandboxError('Ephemeral sandboxes cannot be archived') } const updateData: Partial = { state: SandboxState.ARCHIVING, desiredState: SandboxDesiredState.ARCHIVED, } const updatedSandbox = await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: SandboxState.STOPPED }, }) this.eventEmitter.emit(SandboxEvents.ARCHIVED, new SandboxArchivedEvent(updatedSandbox)) return updatedSandbox } async createForWarmPool(warmPoolItem: WarmPool): Promise { const sandbox = new Sandbox(warmPoolItem.target) sandbox.organizationId = SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION sandbox.class = warmPoolItem.class sandbox.snapshot = warmPoolItem.snapshot // TODO: default user should be configurable sandbox.osUser = 'daytona' sandbox.env = warmPoolItem.env || {} sandbox.cpu = warmPoolItem.cpu sandbox.gpu = warmPoolItem.gpu sandbox.mem = warmPoolItem.mem sandbox.disk = warmPoolItem.disk const snapshot = await this.snapshotRepository.findOne({ where: [ { organizationId: sandbox.organizationId, name: sandbox.snapshot, state: SnapshotState.ACTIVE }, { general: true, name: sandbox.snapshot, state: SnapshotState.ACTIVE }, ], }) if (!snapshot) { throw new BadRequestError(`Snapshot ${sandbox.snapshot} not found while creating warm pool sandbox`) } const runner = await this.runnerService.getRandomAvailableRunner({ regions: [sandbox.region], sandboxClass: sandbox.class, snapshotRef: snapshot.ref, }) sandbox.runnerId = runner.id sandbox.pending = true await this.sandboxRepository.insert(sandbox) return sandbox } async createFromSnapshot( createSandboxDto: CreateSandboxDto, organization: Organization, useSandboxResourceParams_deprecated?: boolean, ): Promise { let pendingCpuIncrement: number | undefined let pendingMemoryIncrement: number | undefined let pendingDiskIncrement: number | undefined const region = await this.getValidatedOrDefaultRegion(organization, createSandboxDto.target) try { const sandboxClass = this.getValidatedOrDefaultClass(createSandboxDto.class) let snapshotIdOrName = createSandboxDto.snapshot if (!createSandboxDto.snapshot?.trim()) { snapshotIdOrName = this.configService.getOrThrow('defaultSnapshot') } const snapshotFilter: FindOptionsWhere[] = [ { organizationId: organization.id, name: snapshotIdOrName }, { general: true, name: snapshotIdOrName }, ] if (isValidUuid(snapshotIdOrName)) { snapshotFilter.push( { organizationId: organization.id, id: snapshotIdOrName }, { general: true, id: snapshotIdOrName }, ) } const snapshots = await this.snapshotRepository.find({ where: snapshotFilter, }) if (snapshots.length === 0) { throw new BadRequestError( `Snapshot ${snapshotIdOrName} not found. Did you add it through the Daytona Dashboard?`, ) } let snapshot = snapshots.find((s) => s.state === SnapshotState.ACTIVE) if (!snapshot) { snapshot = snapshots[0] } if (!(await this.snapshotService.isAvailableInRegion(snapshot.id, region.id))) { throw new BadRequestError(`Snapshot ${snapshotIdOrName} is not available in region ${region.id}`) } if (snapshot.state !== SnapshotState.ACTIVE) { throw new BadRequestError(`Snapshot ${snapshotIdOrName} is ${snapshot.state}`) } if (!snapshot.ref) { throw new BadRequestError('Snapshot ref is not defined') } let cpu = snapshot.cpu let mem = snapshot.mem let disk = snapshot.disk let gpu = snapshot.gpu // Remove the deprecated behavior in a future release if (useSandboxResourceParams_deprecated) { if (createSandboxDto.cpu) { cpu = createSandboxDto.cpu } if (createSandboxDto.memory) { mem = createSandboxDto.memory } if (createSandboxDto.disk) { disk = createSandboxDto.disk } if (createSandboxDto.gpu) { gpu = createSandboxDto.gpu } } this.organizationService.assertOrganizationIsNotSuspended(organization) const { pendingCpuIncremented, pendingMemoryIncremented, pendingDiskIncremented } = await this.validateOrganizationQuotas(organization, region, cpu, mem, disk) if (pendingCpuIncremented) { pendingCpuIncrement = cpu } if (pendingMemoryIncremented) { pendingMemoryIncrement = mem } if (pendingDiskIncremented) { pendingDiskIncrement = disk } if (!createSandboxDto.volumes || createSandboxDto.volumes.length === 0) { const skipWarmPool = (await this.redis.exists(`warm-pool:skip:${snapshot.id}`)) === 1 if (!skipWarmPool) { const warmPoolSandbox = await this.warmPoolService.fetchWarmPoolSandbox({ organizationId: organization.id, snapshot, target: region.id, class: createSandboxDto.class, cpu: cpu, mem: mem, disk: disk, gpu: gpu, osUser: createSandboxDto.user, env: createSandboxDto.env, state: SandboxState.STARTED, }) if (warmPoolSandbox) { return await this.assignWarmPoolSandbox(warmPoolSandbox, createSandboxDto, organization) } } } else { const volumeIdOrNames = createSandboxDto.volumes.map((v) => v.volumeId) await this.volumeService.validateVolumes(organization.id, volumeIdOrNames) } const runner = await this.runnerService.getRandomAvailableRunner({ regions: [region.id], sandboxClass, snapshotRef: snapshot.ref, }) const sandbox = new Sandbox(region.id, createSandboxDto.name) sandbox.organizationId = organization.id // TODO: make configurable sandbox.class = sandboxClass sandbox.snapshot = snapshot.name // TODO: default user should be configurable sandbox.osUser = createSandboxDto.user || 'daytona' sandbox.env = createSandboxDto.env || {} sandbox.labels = createSandboxDto.labels || {} sandbox.cpu = cpu sandbox.gpu = gpu sandbox.mem = mem sandbox.disk = disk sandbox.public = createSandboxDto.public || false if (createSandboxDto.networkBlockAll !== undefined) { sandbox.networkBlockAll = createSandboxDto.networkBlockAll } if (createSandboxDto.networkAllowList !== undefined) { sandbox.networkAllowList = this.resolveNetworkAllowList(createSandboxDto.networkAllowList) } if (createSandboxDto.autoStopInterval !== undefined) { sandbox.autoStopInterval = this.resolveAutoStopInterval(createSandboxDto.autoStopInterval) } if (createSandboxDto.autoArchiveInterval !== undefined) { sandbox.autoArchiveInterval = this.resolveAutoArchiveInterval(createSandboxDto.autoArchiveInterval) } if (createSandboxDto.autoDeleteInterval !== undefined) { sandbox.autoDeleteInterval = createSandboxDto.autoDeleteInterval } if (createSandboxDto.volumes !== undefined) { sandbox.volumes = this.resolveVolumes(createSandboxDto.volumes) } sandbox.runnerId = runner.id sandbox.pending = true const insertedSandbox = await this.sandboxRepository.insert(sandbox) this.eventEmitter.emit(SandboxEvents.CREATED, new SandboxCreatedEvent(insertedSandbox)) return this.toSandboxDto(insertedSandbox) } catch (error) { await this.rollbackPendingUsage( organization.id, region.id, pendingCpuIncrement, pendingMemoryIncrement, pendingDiskIncrement, ) if (error.code === '23505') { throw new ConflictException(`Sandbox with name ${createSandboxDto.name} already exists`) } throw error } } private async assignWarmPoolSandbox( warmPoolSandbox: Sandbox, createSandboxDto: CreateSandboxDto, organization: Organization, ): Promise { const now = new Date() const updateData: Partial = { public: createSandboxDto.public || false, labels: createSandboxDto.labels || {}, organizationId: organization.id, createdAt: now, lastActivityAt: now, } if (createSandboxDto.name) { updateData.name = createSandboxDto.name } if (createSandboxDto.autoStopInterval !== undefined) { updateData.autoStopInterval = this.resolveAutoStopInterval(createSandboxDto.autoStopInterval) } if (createSandboxDto.autoArchiveInterval !== undefined) { updateData.autoArchiveInterval = this.resolveAutoArchiveInterval(createSandboxDto.autoArchiveInterval) } if (createSandboxDto.autoDeleteInterval !== undefined) { updateData.autoDeleteInterval = createSandboxDto.autoDeleteInterval } if (createSandboxDto.networkBlockAll !== undefined) { updateData.networkBlockAll = createSandboxDto.networkBlockAll } if (createSandboxDto.networkAllowList !== undefined) { updateData.networkAllowList = this.resolveNetworkAllowList(createSandboxDto.networkAllowList) } if (!warmPoolSandbox.runnerId) { throw new SandboxError('Runner not found for warm pool sandbox') } if ( createSandboxDto.networkBlockAll !== undefined || createSandboxDto.networkAllowList !== undefined || organization.sandboxLimitedNetworkEgress ) { const runner = await this.runnerService.findOneOrFail(warmPoolSandbox.runnerId) const runnerAdapter = await this.runnerAdapterFactory.create(runner) await runnerAdapter.updateNetworkSettings( warmPoolSandbox.id, createSandboxDto.networkBlockAll, createSandboxDto.networkAllowList, organization.sandboxLimitedNetworkEgress, ) } const updatedSandbox = await this.sandboxRepository.update(warmPoolSandbox.id, { updateData, entity: warmPoolSandbox, }) // Defensive invalidation of orgId cache since the sandbox moved from unassigned to a real organization this.sandboxLookupCacheInvalidationService.invalidateOrgId({ sandboxId: warmPoolSandbox.id, organizationId: organization.id, name: warmPoolSandbox.name, previousOrganizationId: SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION, }) // Treat this as a newly started sandbox this.eventEmitter.emit( SandboxEvents.STATE_UPDATED, new SandboxStateUpdatedEvent(updatedSandbox, SandboxState.STARTED, SandboxState.STARTED), ) return this.toSandboxDto(updatedSandbox) } async createFromBuildInfo(createSandboxDto: CreateSandboxDto, organization: Organization): Promise { let pendingCpuIncrement: number | undefined let pendingMemoryIncrement: number | undefined let pendingDiskIncrement: number | undefined const region = await this.getValidatedOrDefaultRegion(organization, createSandboxDto.target) try { const sandboxClass = this.getValidatedOrDefaultClass(createSandboxDto.class) const cpu = createSandboxDto.cpu || DEFAULT_CPU const mem = createSandboxDto.memory || DEFAULT_MEMORY const disk = createSandboxDto.disk || DEFAULT_DISK const gpu = createSandboxDto.gpu || DEFAULT_GPU this.organizationService.assertOrganizationIsNotSuspended(organization) const { pendingCpuIncremented, pendingMemoryIncremented, pendingDiskIncremented } = await this.validateOrganizationQuotas(organization, region, cpu, mem, disk) if (pendingCpuIncremented) { pendingCpuIncrement = cpu } if (pendingMemoryIncremented) { pendingMemoryIncrement = mem } if (pendingDiskIncremented) { pendingDiskIncrement = disk } if (createSandboxDto.volumes && createSandboxDto.volumes.length > 0) { const volumeIdOrNames = createSandboxDto.volumes.map((v) => v.volumeId) await this.volumeService.validateVolumes(organization.id, volumeIdOrNames) } const sandbox = new Sandbox(region.id, createSandboxDto.name) sandbox.organizationId = organization.id sandbox.class = sandboxClass sandbox.osUser = createSandboxDto.user || 'daytona' sandbox.env = createSandboxDto.env || {} sandbox.labels = createSandboxDto.labels || {} sandbox.cpu = cpu sandbox.gpu = gpu sandbox.mem = mem sandbox.disk = disk sandbox.public = createSandboxDto.public || false if (createSandboxDto.networkBlockAll !== undefined) { sandbox.networkBlockAll = createSandboxDto.networkBlockAll } if (createSandboxDto.networkAllowList !== undefined) { sandbox.networkAllowList = this.resolveNetworkAllowList(createSandboxDto.networkAllowList) } if (createSandboxDto.autoStopInterval !== undefined) { sandbox.autoStopInterval = this.resolveAutoStopInterval(createSandboxDto.autoStopInterval) } if (createSandboxDto.autoArchiveInterval !== undefined) { sandbox.autoArchiveInterval = this.resolveAutoArchiveInterval(createSandboxDto.autoArchiveInterval) } if (createSandboxDto.autoDeleteInterval !== undefined) { sandbox.autoDeleteInterval = createSandboxDto.autoDeleteInterval } if (createSandboxDto.volumes !== undefined) { sandbox.volumes = this.resolveVolumes(createSandboxDto.volumes) } const buildInfoSnapshotRef = generateBuildSnapshotRef( createSandboxDto.buildInfo.dockerfileContent, createSandboxDto.buildInfo.contextHashes, ) // Check if buildInfo with the same snapshotRef already exists const existingBuildInfo = await this.buildInfoRepository.findOne({ where: { snapshotRef: buildInfoSnapshotRef }, }) if (existingBuildInfo) { sandbox.buildInfo = existingBuildInfo if (await this.redisLockProvider.lock(`build-info:${existingBuildInfo.snapshotRef}:update`, 60)) { await this.buildInfoRepository.update(sandbox.buildInfo.snapshotRef, { lastUsedAt: new Date() }) } } else { const buildInfoEntity = this.buildInfoRepository.create({ ...createSandboxDto.buildInfo, }) await this.buildInfoRepository.save(buildInfoEntity) sandbox.buildInfo = buildInfoEntity } let runner: Runner try { const declarativeBuildScoreThreshold = this.configService.get('runnerScore.thresholds.declarativeBuild') runner = await this.runnerService.getRandomAvailableRunner({ regions: [sandbox.region], sandboxClass: sandbox.class, snapshotRef: sandbox.buildInfo.snapshotRef, ...(declarativeBuildScoreThreshold !== undefined && { availabilityScoreThreshold: declarativeBuildScoreThreshold, }), }) sandbox.runnerId = runner.id } catch (error) { if ( error instanceof BadRequestError == false || error.message !== 'No available runners' || !sandbox.buildInfo ) { throw error } sandbox.state = SandboxState.PENDING_BUILD } sandbox.pending = true const insertedSandbox = await this.sandboxRepository.insert(sandbox) this.eventEmitter.emit(SandboxEvents.CREATED, new SandboxCreatedEvent(insertedSandbox)) return this.toSandboxDto(insertedSandbox) } catch (error) { await this.rollbackPendingUsage( organization.id, region.id, pendingCpuIncrement, pendingMemoryIncrement, pendingDiskIncrement, ) if (error.code === '23505') { throw new ConflictException(`Sandbox with name ${createSandboxDto.name} already exists`) } throw error } } async createBackup(sandboxIdOrName: string, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) if (sandbox.autoDeleteInterval === 0) { throw new SandboxError('Ephemeral sandboxes cannot be backed up') } if (![BackupState.COMPLETED, BackupState.NONE].includes(sandbox.backupState)) { throw new SandboxError('Sandbox backup is already in progress') } this.eventEmitter.emit(SandboxEvents.BACKUP_CREATED, new SandboxBackupCreatedEvent(sandbox)) return sandbox } async findAllDeprecated( organizationId: string, labels?: { [key: string]: string }, includeErroredDestroyed?: boolean, ): Promise { const baseFindOptions: FindOptionsWhere = { organizationId, ...(labels ? { labels: JsonContains(labels) } : {}), } const where: FindOptionsWhere[] = [ { ...baseFindOptions, state: Not(In([SandboxState.DESTROYED, SandboxState.ERROR, SandboxState.BUILD_FAILED])), }, { ...baseFindOptions, state: In([SandboxState.ERROR, SandboxState.BUILD_FAILED]), ...(includeErroredDestroyed ? {} : { desiredState: Not(SandboxDesiredState.DESTROYED) }), }, ] return this.sandboxRepository.find({ where }) } async findAll( organizationId: string, page = 1, limit = 10, filters?: { id?: string name?: string labels?: { [key: string]: string } includeErroredDestroyed?: boolean states?: SandboxState[] snapshots?: string[] regionIds?: string[] minCpu?: number maxCpu?: number minMemoryGiB?: number maxMemoryGiB?: number minDiskGiB?: number maxDiskGiB?: number lastEventAfter?: Date lastEventBefore?: Date }, sort?: { field?: SandboxSortField direction?: SandboxSortDirection }, ): Promise> { const pageNum = Number(page) const limitNum = Number(limit) const { id, name, labels, includeErroredDestroyed, states, snapshots, regionIds, minCpu, maxCpu, minMemoryGiB, maxMemoryGiB, minDiskGiB, maxDiskGiB, lastEventAfter, lastEventBefore, } = filters || {} const { field: sortField = DEFAULT_SANDBOX_SORT_FIELD, direction: sortDirection = DEFAULT_SANDBOX_SORT_DIRECTION } = sort || {} const baseFindOptions: FindOptionsWhere = { organizationId, ...(id ? { id: ILike(`${id}%`) } : {}), ...(name ? { name: ILike(`${name}%`) } : {}), ...(labels ? { labels: JsonContains(labels) } : {}), ...(snapshots ? { snapshot: In(snapshots) } : {}), ...(regionIds ? { region: In(regionIds) } : {}), } baseFindOptions.cpu = createRangeFilter(minCpu, maxCpu) baseFindOptions.mem = createRangeFilter(minMemoryGiB, maxMemoryGiB) baseFindOptions.disk = createRangeFilter(minDiskGiB, maxDiskGiB) baseFindOptions.lastActivityAt = createRangeFilter(lastEventAfter, lastEventBefore) const statesToInclude = (states || Object.values(SandboxState)).filter((state) => state !== SandboxState.DESTROYED) const errorStates = [SandboxState.ERROR, SandboxState.BUILD_FAILED] const nonErrorStatesToInclude = statesToInclude.filter((state) => !errorStates.includes(state)) const errorStatesToInclude = statesToInclude.filter((state) => errorStates.includes(state)) const where: FindOptionsWhere[] = [] if (nonErrorStatesToInclude.length > 0) { where.push({ ...baseFindOptions, state: In(nonErrorStatesToInclude), }) } if (errorStatesToInclude.length > 0) { where.push({ ...baseFindOptions, state: In(errorStatesToInclude), ...(includeErroredDestroyed ? {} : { desiredState: Not(SandboxDesiredState.DESTROYED) }), }) } const [items, total] = await this.sandboxRepository.findAndCount({ where, order: { [sortField]: { direction: sortDirection, nulls: 'LAST', }, ...(sortField !== SandboxSortField.CREATED_AT && { createdAt: 'DESC' }), }, skip: (pageNum - 1) * limitNum, take: limitNum, }) return { items, total, page: pageNum, totalPages: Math.ceil(total / limitNum), } } private getExpectedDesiredStateForState(state: SandboxState): SandboxDesiredState | undefined { switch (state) { case SandboxState.STARTED: return SandboxDesiredState.STARTED case SandboxState.STOPPED: return SandboxDesiredState.STOPPED case SandboxState.ARCHIVED: return SandboxDesiredState.ARCHIVED case SandboxState.DESTROYED: return SandboxDesiredState.DESTROYED default: return undefined } } private hasValidDesiredState(state: SandboxState): boolean { return this.getExpectedDesiredStateForState(state) !== undefined } async findByRunnerId( runnerId: string, states?: SandboxState[], skipReconcilingSandboxes?: boolean, ): Promise { const where: FindOptionsWhere = { runnerId } if (states && states.length > 0) { // Validate that all states have corresponding desired states states.forEach((state) => { if (!this.hasValidDesiredState(state)) { throw new BadRequestError(`State ${state} does not have a corresponding desired state`) } }) where.state = In(states) } let sandboxes = await this.sandboxRepository.find({ where }) if (skipReconcilingSandboxes) { sandboxes = sandboxes.filter((sandbox) => { const expectedDesiredState = this.getExpectedDesiredStateForState(sandbox.state) return expectedDesiredState !== undefined && expectedDesiredState === sandbox.desiredState }) } return sandboxes } async findOneByIdOrName( sandboxIdOrName: string, organizationId: string, returnDestroyed?: boolean, ): Promise { const stateFilter = returnDestroyed ? {} : { state: Not(SandboxState.DESTROYED) } const relations: ['buildInfo'] = ['buildInfo'] // Try lookup by ID first let sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxIdOrName, organizationId, ...stateFilter, }, relations, cache: { id: sandboxLookupCacheKeyById({ organizationId, returnDestroyed, sandboxId: sandboxIdOrName }), milliseconds: SANDBOX_LOOKUP_CACHE_TTL_MS, }, }) // Fallback to lookup by name if (!sandbox) { sandbox = await this.sandboxRepository.findOne({ where: { name: sandboxIdOrName, organizationId, ...stateFilter, }, relations, cache: { id: sandboxLookupCacheKeyByName({ organizationId, returnDestroyed, sandboxName: sandboxIdOrName }), milliseconds: SANDBOX_LOOKUP_CACHE_TTL_MS, }, }) } if ( !sandbox || (!returnDestroyed && [SandboxState.ERROR, SandboxState.BUILD_FAILED].includes(sandbox.state) && sandbox.desiredState === SandboxDesiredState.DESTROYED) ) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} not found`) } return sandbox } async findOne(sandboxId: string, returnDestroyed?: boolean): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId, ...(returnDestroyed ? {} : { state: Not(SandboxState.DESTROYED) }), }, }) if ( !sandbox || (!returnDestroyed && [SandboxState.ERROR, SandboxState.BUILD_FAILED].includes(sandbox.state) && sandbox.desiredState === SandboxDesiredState.DESTROYED) ) { throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } return sandbox } async getOrganizationId(sandboxIdOrName: string, organizationId?: string): Promise { let sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxIdOrName, ...(organizationId ? { organizationId: organizationId } : {}), }, select: ['organizationId'], cache: { id: sandboxOrgIdCacheKeyById({ organizationId, sandboxId: sandboxIdOrName }), milliseconds: SANDBOX_ORG_ID_CACHE_TTL_MS, }, }) if (!sandbox && organizationId) { sandbox = await this.sandboxRepository.findOne({ where: { name: sandboxIdOrName, organizationId: organizationId, }, select: ['organizationId'], cache: { id: sandboxOrgIdCacheKeyByName({ organizationId, sandboxName: sandboxIdOrName }), milliseconds: SANDBOX_ORG_ID_CACHE_TTL_MS, }, }) } if (!sandbox || !sandbox.organizationId) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} not found`) } return sandbox.organizationId } async getRunnerId(sandboxId: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId, }, select: ['runnerId'], loadEagerRelations: false, }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } return sandbox.runnerId || null } async getRegionId(sandboxId: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId, }, select: ['region'], loadEagerRelations: false, }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } return sandbox.region } async getPortPreviewUrl(sandboxIdOrName: string, organizationId: string, port: number): Promise { if (port < 1 || port > 65535) { throw new BadRequestError('Invalid port') } const proxyDomain = this.configService.getOrThrow('proxy.domain') const proxyProtocol = this.configService.getOrThrow('proxy.protocol') const where: FindOptionsWhere = { organizationId: organizationId, state: Not(SandboxState.DESTROYED), } const sandbox = await this.sandboxRepository.findOne({ where: [ { id: sandboxIdOrName, ...where, }, { name: sandboxIdOrName, ...where, }, ], cache: { id: `sandbox:${sandboxIdOrName}:organization:${organizationId}`, milliseconds: 1000, }, }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} not found`) } let url = `${proxyProtocol}://${port}-${sandbox.id}.${proxyDomain}` const region = await this.regionService.findOne(sandbox.region, true) if (region && region.proxyUrl) { // Insert port and sandbox.id into the custom proxy URL url = region.proxyUrl.replace(/(https?:\/)(\/)/, `$1/${port}-${sandbox.id}.`) } return { sandboxId: sandbox.id, url, token: sandbox.authToken, } } async getSignedPortPreviewUrl( sandboxIdOrName: string, organizationId: string, port: number, expiresInSeconds = 60, ): Promise { if (port < 1 || port > 65535) { throw new BadRequestError('Invalid port') } if (expiresInSeconds < 1 || expiresInSeconds > 60 * 60 * 24) { throw new BadRequestError('expiresInSeconds must be between 1 second and 24 hours') } const proxyDomain = this.configService.getOrThrow('proxy.domain') const proxyProtocol = this.configService.getOrThrow('proxy.protocol') const where: FindOptionsWhere = { organizationId: organizationId, state: Not(SandboxState.DESTROYED), } const sandbox = await this.sandboxRepository.findOne({ where: [ { id: sandboxIdOrName, ...where, }, { name: sandboxIdOrName, ...where, }, ], cache: { id: `sandbox:${sandboxIdOrName}:organization:${organizationId}`, milliseconds: 1000, }, }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} not found`) } const token = customNanoid(urlAlphabet.replace('_', '').replace('-', ''))(16).toLocaleLowerCase() const lockKey = `sandbox:signed-preview-url-token:${port}:${token}` await this.redis.setex(lockKey, expiresInSeconds, sandbox.id) let url = `${proxyProtocol}://${port}-${token}.${proxyDomain}` const region = await this.regionService.findOne(sandbox.region, true) if (region && region.proxyUrl) { // Insert port and sandbox.id into the custom proxy URL url = region.proxyUrl.replace(/(https?:\/)(\/)/, `$1/${port}-${token}.`) } return { sandboxId: sandbox.id, port, token, url, } } async getSandboxIdFromSignedPreviewUrlToken(token: string, port: number): Promise { const lockKey = `sandbox:signed-preview-url-token:${port}:${token}` const sandboxId = await this.redis.get(lockKey) if (!sandboxId) { throw new ForbiddenException('Invalid or expired token') } return sandboxId } async expireSignedPreviewUrlToken( sandboxIdOrName: string, organizationId: string, token: string, port: number, ): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) if (!sandbox) { throw new NotFoundException(`Sandbox with ID or name ${sandboxIdOrName} not found`) } const lockKey = `sandbox:signed-preview-url-token:${port}:${token}` await this.redis.del(lockKey) } async destroy(sandboxIdOrName: string, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) if (sandbox.pending && sandbox.state !== SandboxState.PENDING_BUILD) { throw new SandboxError('Sandbox state change in progress') } const updateData = Sandbox.getSoftDeleteUpdate(sandbox) const updatedSandbox = await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: sandbox.pending, state: sandbox.state }, }) this.eventEmitter.emit(SandboxEvents.DESTROYED, new SandboxDestroyedEvent(updatedSandbox)) return updatedSandbox } async start(sandboxIdOrName: string, organization: Organization): Promise { let pendingCpuIncrement: number | undefined let pendingMemoryIncrement: number | undefined let pendingDiskIncrement: number | undefined const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organization.id) const region = await this.regionService.findOne(sandbox.region) if (!region) { throw new NotFoundException(`Region with ID ${sandbox.region} not found`) } try { if (sandbox.state === SandboxState.STARTED && sandbox.desiredState === SandboxDesiredState.STARTED) { return sandbox } this.assertSandboxNotErrored(sandbox) if (String(sandbox.state) !== String(sandbox.desiredState)) { // Allow start of stopped | archived and archiving | archived sandboxes if ( sandbox.desiredState !== SandboxDesiredState.ARCHIVED || (sandbox.state !== SandboxState.STOPPED && sandbox.state !== SandboxState.ARCHIVING) ) { throw new SandboxError('State change in progress') } } if (![SandboxState.STOPPED, SandboxState.ARCHIVED, SandboxState.ARCHIVING].includes(sandbox.state)) { throw new SandboxError('Sandbox is not in valid state') } if (sandbox.pending) { throw new SandboxError('Sandbox state change in progress') } this.organizationService.assertOrganizationIsNotSuspended(organization) const { pendingCpuIncremented, pendingMemoryIncremented, pendingDiskIncremented } = await this.validateOrganizationQuotas(organization, region, sandbox.cpu, sandbox.mem, sandbox.disk, sandbox.id) if (pendingCpuIncremented) { pendingCpuIncrement = sandbox.cpu } if (pendingMemoryIncremented) { pendingMemoryIncrement = sandbox.mem } if (pendingDiskIncremented) { pendingDiskIncrement = sandbox.disk } const updateData: Partial = { pending: true, desiredState: SandboxDesiredState.STARTED, authToken: nanoid(32).toLocaleLowerCase(), } const updatedSandbox = await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: sandbox.state }, }) this.eventEmitter.emit(SandboxEvents.STARTED, new SandboxStartedEvent(updatedSandbox)) return updatedSandbox } catch (error) { await this.rollbackPendingUsage( organization.id, sandbox.region, pendingCpuIncrement, pendingMemoryIncrement, pendingDiskIncrement, ) throw error } } async stop(sandboxIdOrName: string, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) this.assertSandboxNotErrored(sandbox) if (String(sandbox.state) !== String(sandbox.desiredState)) { throw new SandboxError('State change in progress') } if (sandbox.state !== SandboxState.STARTED) { throw new SandboxError('Sandbox is not started') } if (sandbox.pending) { throw new SandboxError('Sandbox state change in progress') } const updateData: Partial = { pending: true, desiredState: sandbox.autoDeleteInterval === 0 ? SandboxDesiredState.DESTROYED : SandboxDesiredState.STOPPED, } const updatedSandbox = await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: sandbox.state }, }) if (sandbox.autoDeleteInterval === 0) { this.eventEmitter.emit(SandboxEvents.DESTROYED, new SandboxDestroyedEvent(updatedSandbox)) } else { this.eventEmitter.emit(SandboxEvents.STOPPED, new SandboxStoppedEvent(updatedSandbox)) } return updatedSandbox } async recover(sandboxIdOrName: string, organization: Organization): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organization.id) if (sandbox.state !== SandboxState.ERROR) { throw new BadRequestError('Sandbox must be in error state to recover') } if (sandbox.pending) { throw new SandboxError('Sandbox state change in progress') } // Validate runner exists if (!sandbox.runnerId) { throw new NotFoundException(`Sandbox with ID ${sandbox.id} does not have a runner`) } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (runner.apiVersion === '2') { // TODO: we need "recovering" state that can be set after calling recover // Once in recovering, we abort further processing and let the manager/job handler take care of it // (Also, since desiredState would be STARTED, we need to check the quota) throw new ForbiddenException('Recovering sandboxes with runner API version 2 is not supported') } const runnerAdapter = await this.runnerAdapterFactory.create(runner) try { await runnerAdapter.recoverSandbox(sandbox) } catch (error) { if (error instanceof Error && error.message.includes('storage cannot be further expanded')) { const errorMsg = `Sandbox storage cannot be further expanded. Maximum expansion of ${(sandbox.disk * 0.1).toFixed(2)}GB (10% of original ${sandbox.disk.toFixed(2)}GB) has been reached. Please contact support for further assistance.` throw new ForbiddenException(errorMsg) } throw error } const updateData: Partial = { state: SandboxState.STOPPED, desiredState: SandboxDesiredState.STOPPED, errorReason: null, recoverable: false, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { state: SandboxState.ERROR }, }) // Now that sandbox is in STOPPED state, use the normal start flow // This handles quota validation, pending usage, event emission, etc. return await this.start(sandbox.id, organization) } async resize(sandboxIdOrName: string, resizeDto: ResizeSandboxDto, organization: Organization): Promise { let pendingCpuIncrement: number | undefined let pendingMemoryIncrement: number | undefined let pendingDiskIncrement: number | undefined const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organization.id) const region = await this.regionService.findOne(sandbox.region) if (!region) { throw new NotFoundException(`Region with ID ${sandbox.region} not found`) } try { // Validate sandbox is in a valid state for resize if (sandbox.state !== SandboxState.STARTED && sandbox.state !== SandboxState.STOPPED) { throw new BadRequestError('Sandbox must be in started or stopped state to resize') } if (sandbox.pending) { throw new SandboxError('Sandbox state change in progress') } // If no resize parameters provided, throw error if (resizeDto.cpu === undefined && resizeDto.memory === undefined && resizeDto.disk === undefined) { throw new BadRequestError('No resource changes specified - sandbox is already at the desired configuration') } // Disk resize requires stopped sandbox (cold resize only) if (resizeDto.disk !== undefined && sandbox.state !== SandboxState.STOPPED) { throw new BadRequestError('Disk resize can only be performed on a stopped sandbox') } // Hot resize (sandbox is running): only CPU and memory can be increased const isHotResize = sandbox.state === SandboxState.STARTED // Validate hot resize constraints if (isHotResize) { if (resizeDto.cpu !== undefined && resizeDto.cpu < sandbox.cpu) { throw new BadRequestError('Sandbox must be in stopped state to decrease the number of CPU cores') } if (resizeDto.memory !== undefined && resizeDto.memory < sandbox.mem) { throw new BadRequestError('Sandbox must be in stopped state to decrease memory') } } // Disk can only be increased (never decreased) if (resizeDto.disk !== undefined && resizeDto.disk < sandbox.disk) { throw new BadRequestError('Sandbox disk size cannot be decreased') } // Calculate new resource values const newCpu = resizeDto.cpu ?? sandbox.cpu const newMem = resizeDto.memory ?? sandbox.mem const newDisk = resizeDto.disk ?? sandbox.disk // Throw if nothing actually changes if (newCpu === sandbox.cpu && newMem === sandbox.mem && newDisk === sandbox.disk) { throw new BadRequestError('No resource changes specified - sandbox is already at the desired configuration') } // Validate organization quotas for the new resource values this.organizationService.assertOrganizationIsNotSuspended(organization) // Validate per-sandbox quotas with total new values if (newCpu > organization.maxCpuPerSandbox) { throw new ForbiddenException( `CPU request ${newCpu} exceeds maximum allowed per sandbox (${organization.maxCpuPerSandbox}).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } if (newMem > organization.maxMemoryPerSandbox) { throw new ForbiddenException( `Memory request ${newMem}GB exceeds maximum allowed per sandbox (${organization.maxMemoryPerSandbox}GB).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } if (newDisk > organization.maxDiskPerSandbox) { throw new ForbiddenException( `Disk request ${newDisk}GB exceeds maximum allowed per sandbox (${organization.maxDiskPerSandbox}GB).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } // For cold resize, cpu/memory don't affect quota until sandbox is STARTED. // For hot resize, track all deltas (positive reserves quota, negative frees quota for others). const cpuDeltaForQuota = isHotResize ? newCpu - sandbox.cpu : 0 const memDeltaForQuota = isHotResize ? newMem - sandbox.mem : 0 const diskDeltaForQuota = newDisk - sandbox.disk // Disk only increases (validated at start of method) // Validate and track pending for any non-zero quota changes if (cpuDeltaForQuota !== 0 || memDeltaForQuota !== 0 || diskDeltaForQuota !== 0) { const { pendingCpuIncremented, pendingMemoryIncremented, pendingDiskIncremented } = await this.validateOrganizationQuotas( organization, region, cpuDeltaForQuota, memDeltaForQuota, diskDeltaForQuota, ) if (pendingCpuIncremented) { pendingCpuIncrement = cpuDeltaForQuota } if (pendingMemoryIncremented) { pendingMemoryIncrement = memDeltaForQuota } if (pendingDiskIncremented) { pendingDiskIncrement = diskDeltaForQuota } } // Get runner and validate before changing state if (!sandbox.runnerId) { throw new BadRequestError('Sandbox has no runner assigned') } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) // Capture the previous state before transitioning to RESIZING (STARTED or STOPPED) const previousState = sandbox.state === SandboxState.STARTED ? SandboxState.STARTED : sandbox.state === SandboxState.STOPPED ? SandboxState.STOPPED : null if (!previousState) { throw new BadRequestError('Sandbox must be in started or stopped state to resize') } // Now transition to RESIZING state const updateData: Partial = { state: SandboxState.RESIZING, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: previousState }, }) try { const runnerAdapter = await this.runnerAdapterFactory.create(runner) await runnerAdapter.resizeSandbox(sandbox.id, resizeDto.cpu, resizeDto.memory, resizeDto.disk) // For V0 runners, update resources immediately (subscriber emits STATE_UPDATED) // For V2 runners, job handler will update resources on completion if (runner.apiVersion === '0') { const updateData: Partial = { cpu: newCpu, mem: newMem, disk: newDisk, state: previousState, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { state: SandboxState.RESIZING }, }) // Apply the usage change (increments current, decrements pending) // Only apply deltas for quotas that were validated/pending-incremented await this.organizationUsageService.applyResizeUsageChange( organization.id, sandbox.region, cpuDeltaForQuota, memDeltaForQuota, diskDeltaForQuota, ) } return await this.findOneByIdOrName(sandbox.id, organization.id) } catch (error) { // Return to previous state on error const updateData: Partial = { state: previousState, } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { state: SandboxState.RESIZING }, }) throw error } } catch (error) { await this.rollbackPendingUsage( organization.id, sandbox.region, pendingCpuIncrement, pendingMemoryIncrement, pendingDiskIncrement, ) throw error } } async updatePublicStatus(sandboxIdOrName: string, isPublic: boolean, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) const updateData: Partial = { public: isPublic, } return await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox, }) } async updateLastActivityAt(sandboxId: string, lastActivityAt: Date): Promise { // Prevent spamming updates const lockKey = `sandbox:update-last-activity:${sandboxId}` const acquired = await this.redisLockProvider.lock(lockKey, 45) if (!acquired) { return } await this.sandboxRepository.update(sandboxId, { updateData: { lastActivityAt } }, true) } async getToolboxProxyUrl(sandboxId: string): Promise { const sandbox = await this.findOne(sandboxId) return this.resolveToolboxProxyUrl(sandbox.region) } async toSandboxDto(sandbox: Sandbox): Promise { const toolboxProxyUrl = await this.resolveToolboxProxyUrl(sandbox.region) return SandboxDto.fromSandbox(sandbox, toolboxProxyUrl) } async toSandboxDtos(sandboxes: Sandbox[]): Promise { const urlMap = await this.resolveToolboxProxyUrls(sandboxes.map((s) => s.region)) return sandboxes.map((s) => { const url = urlMap.get(s.region) if (!url) { throw new NotFoundException(`Toolbox proxy URL not resolved for region ${s.region}`) } return SandboxDto.fromSandbox(s, url) }) } async resolveToolboxProxyUrl(regionId: string): Promise { const cacheKey = toolboxProxyUrlCacheKey(regionId) const cached = await this.redis.get(cacheKey) if (cached) { return cached } const region = await this.regionService.findOne(regionId) const url = region?.toolboxProxyUrl ? region.toolboxProxyUrl.replace(/\/+$/, '') + '/toolbox' : this.configService.getOrThrow('proxy.toolboxUrl') this.redis.setex(cacheKey, TOOLBOX_PROXY_URL_CACHE_TTL_S, url).catch((err) => { this.logger.warn(`Failed to cache toolbox proxy URL for region ${regionId}: ${err.message}`) }) return url } async resolveToolboxProxyUrls(regionIds: string[]): Promise> { const unique = [...new Set(regionIds)] const result = new Map() const pipeline = this.redis.pipeline() for (const id of unique) { pipeline.get(toolboxProxyUrlCacheKey(id)) } const cached = await pipeline.exec() const uncached: string[] = [] for (let i = 0; i < unique.length; i++) { const err = cached?.[i]?.[0] if (err) { this.logger.warn(`Failed to get cached toolbox proxy URL for region ${unique[i]}: ${err.message}`) } const val = cached?.[i]?.[1] as string | null if (val) { result.set(unique[i], val) } else { uncached.push(unique[i]) } } if (uncached.length > 0) { const regions = await this.regionService.findByIds(uncached) const regionMap = new Map(regions.map((r) => [r.id, r])) const fallback = this.configService.getOrThrow('proxy.toolboxUrl') const setPipeline = this.redis.pipeline() for (const id of uncached) { const region = regionMap.get(id) const url = region?.toolboxProxyUrl ? region.toolboxProxyUrl.replace(/\/+$/, '') + '/toolbox' : fallback result.set(id, url) setPipeline.setex(toolboxProxyUrlCacheKey(id), TOOLBOX_PROXY_URL_CACHE_TTL_S, url) } const setResults = await setPipeline.exec() setResults?.forEach(([err], i) => { if (err) { this.logger.warn(`Failed to cache toolbox proxy URL for region ${uncached[i]}: ${err.message}`) } }) } return result } async getBuildLogsUrl(sandboxIdOrName: string, organizationId: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) if (!sandbox.buildInfo?.snapshotRef) { throw new NotFoundException(`Sandbox ${sandboxIdOrName} has no build info`) } const region = await this.regionService.findOne(sandbox.region, true) if (!region) { throw new NotFoundException(`Region for runner for sandbox ${sandboxIdOrName} not found`) } if (!region.proxyUrl) { return `${this.configService.getOrThrow('proxy.protocol')}://${this.configService.getOrThrow('proxy.domain')}/sandboxes/${sandbox.id}/build-logs` } return region.proxyUrl + '/sandboxes/' + sandbox.id + '/build-logs' } private async getValidatedOrDefaultRegion(organization: Organization, regionIdOrName?: string): Promise { if (!organization.defaultRegionId) { throw new DefaultRegionRequiredException() } regionIdOrName = regionIdOrName?.trim() if (!regionIdOrName) { const region = await this.regionService.findOne(organization.defaultRegionId) if (!region) { throw new NotFoundException('Default region not found') } return region } const region = (await this.regionService.findOneByName(regionIdOrName, organization.id)) ?? (await this.regionService.findOneByName(regionIdOrName, null)) ?? (await this.regionService.findOne(regionIdOrName)) if (!region) { throw new NotFoundException('Region not found') } return region } private getValidatedOrDefaultClass(sandboxClass: SandboxClass): SandboxClass { if (!sandboxClass) { return SandboxClass.SMALL } if (Object.values(SandboxClass).includes(sandboxClass)) { return sandboxClass } else { throw new BadRequestError('Invalid class') } } async replaceLabels( sandboxIdOrName: string, labels: { [key: string]: string }, organizationId?: string, ): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) // Replace all labels const updateData: Partial = { labels, } return await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) } @Cron(CronExpression.EVERY_SECOND, { name: 'cleanup-destroyed-sandboxes' }) @LogExecution('cleanup-destroyed-sandboxes') @WithInstrumentation() async cleanupDestroyedSandboxes() { const twentyFourHoursAgo = new Date() twentyFourHoursAgo.setHours(twentyFourHoursAgo.getHours() - 24) const destroyedSandboxs = await this.sandboxRepository.delete({ state: SandboxState.DESTROYED, updatedAt: LessThan(twentyFourHoursAgo), }) if (destroyedSandboxs.affected > 0) { this.logger.debug(`Cleaned up ${destroyedSandboxs.affected} destroyed sandboxes`) } } @Cron(CronExpression.EVERY_10_MINUTES, { name: 'cleanup-build-failed-sandboxes' }) @LogExecution('cleanup-build-failed-sandboxes') @WithInstrumentation() async cleanupBuildFailedSandboxes() { const twentyFourHoursAgo = new Date() twentyFourHoursAgo.setHours(twentyFourHoursAgo.getHours() - 24) const destroyedSandboxs = await this.sandboxRepository.delete({ state: SandboxState.BUILD_FAILED, desiredState: SandboxDesiredState.DESTROYED, updatedAt: LessThan(twentyFourHoursAgo), }) if (destroyedSandboxs.affected > 0) { this.logger.debug(`Cleaned up ${destroyedSandboxs.affected} build failed sandboxes`) } } @Cron(CronExpression.EVERY_SECOND, { name: 'cleanup-stale-build-failed-sandboxes' }) @LogExecution('cleanup-stale-build-failed-sandboxes') @WithInstrumentation() async cleanupStaleBuildFailedSandboxes() { const sevenDaysAgo = new Date() sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 7) const result = await this.sandboxRepository.delete({ state: SandboxState.BUILD_FAILED, desiredState: SandboxDesiredState.STARTED, updatedAt: LessThan(sevenDaysAgo), }) if (result.affected > 0) { this.logger.debug(`Cleaned up ${result.affected} stale build failed sandboxes`) } } @Cron(CronExpression.EVERY_SECOND, { name: 'cleanup-stale-error-sandboxes' }) @LogExecution('cleanup-stale-error-sandboxes') @WithInstrumentation() async cleanupStaleErrorSandboxes() { const sevenDaysAgo = new Date() sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 7) const result = await this.sandboxRepository.delete({ state: SandboxState.ERROR, desiredState: SandboxDesiredState.DESTROYED, updatedAt: LessThan(sevenDaysAgo), }) if (result.affected > 0) { this.logger.debug(`Cleaned up ${result.affected} stale error sandboxes`) } } async setAutostopInterval(sandboxIdOrName: string, interval: number, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) const updateData: Partial = { autoStopInterval: this.resolveAutoStopInterval(interval), } return await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) } async setAutoArchiveInterval(sandboxIdOrName: string, interval: number, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) const updateData: Partial = { autoArchiveInterval: this.resolveAutoArchiveInterval(interval), } return await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) } async setAutoDeleteInterval(sandboxIdOrName: string, interval: number, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) const updateData: Partial = { autoDeleteInterval: interval, } return await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) } async updateNetworkSettings( sandboxIdOrName: string, networkBlockAll?: boolean, networkAllowList?: string, organizationId?: string, ): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) const updateData: Partial = {} if (networkBlockAll !== undefined) { updateData.networkBlockAll = networkBlockAll } if (networkAllowList !== undefined) { updateData.networkAllowList = this.resolveNetworkAllowList(networkAllowList) } const updatedSandbox = await this.sandboxRepository.update(sandbox.id, { updateData, entity: sandbox }) // Update network settings on the runner if (sandbox.runnerId) { const runner = await this.runnerService.findOne(sandbox.runnerId) if (runner) { const runnerAdapter = await this.runnerAdapterFactory.create(runner) await runnerAdapter.updateNetworkSettings(sandbox.id, networkBlockAll, networkAllowList) } } return updatedSandbox } // used by internal services to update the state of a sandbox to resolve domain and runner state mismatch // notably, when a sandbox instance stops or errors on the runner, the domain state needs to be updated to reflect the actual state async updateState( sandboxId: string, newState: SandboxState, recoverable = false, errorReason?: string, ): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId }, }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } if (sandbox.state === newState) { this.logger.debug(`Sandbox ${sandboxId} is already in state ${newState}`) return } // only allow updating the state of started | stopped sandboxes if (![SandboxState.STARTED, SandboxState.STOPPED].includes(sandbox.state)) { throw new BadRequestError('Sandbox is not in a valid state to be updated') } if (sandbox.desiredState == SandboxDesiredState.DESTROYED) { this.logger.debug(`Sandbox ${sandboxId} is already DESTROYED, skipping state update`) return } const oldState = sandbox.state const oldDesiredState = sandbox.desiredState const updateData: Partial = { state: newState, recoverable: false, } if (errorReason !== undefined) { updateData.errorReason = errorReason if (newState === SandboxState.ERROR) { updateData.recoverable = recoverable } } // we need to update the desired state to match the new state const desiredState = this.getExpectedDesiredStateForState(newState) if (desiredState) { updateData.desiredState = desiredState } await this.sandboxRepository.updateWhere(sandbox.id, { updateData, whereCondition: { pending: false, state: oldState, desiredState: oldDesiredState }, }) } @OnEvent(WarmPoolEvents.TOPUP_REQUESTED) private async createWarmPoolSandbox(event: WarmPoolTopUpRequested) { await this.createForWarmPool(event.warmPool) } @Cron(CronExpression.EVERY_MINUTE, { name: 'handle-unschedulable-runners' }) @LogExecution('handle-unschedulable-runners') @WithInstrumentation() private async handleUnschedulableRunners() { const runners = await this.runnerRepository.find({ where: { unschedulable: true } }) if (runners.length === 0) { return } // find all sandboxes that are using the unschedulable runners and have organizationId = '00000000-0000-0000-0000-000000000000' const sandboxes = await this.sandboxRepository.find({ where: { runnerId: In(runners.map((runner) => runner.id)), organizationId: '00000000-0000-0000-0000-000000000000', state: SandboxState.STARTED, desiredState: Not(SandboxDesiredState.DESTROYED), }, }) if (sandboxes.length === 0) { return } const destroyPromises = sandboxes.map((sandbox) => this.destroy(sandbox.id)) const results = await Promise.allSettled(destroyPromises) // Log any failed sandbox destructions results.forEach((result, index) => { if (result.status === 'rejected') { this.logger.error(`Failed to destroy sandbox ${sandboxes[index].id}: ${result.reason}`) } }) } async isSandboxPublic(sandboxId: string): Promise { const sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId }, }) if (!sandbox) { throw new NotFoundException(`Sandbox with ID ${sandboxId} not found`) } return sandbox.public } @OnEvent(OrganizationEvents.SUSPENDED_SANDBOX_STOPPED) async handleSuspendedSandboxStopped(event: OrganizationSuspendedSandboxStoppedEvent) { await this.stop(event.sandboxId).catch((error) => { // log the error for now, but don't throw it as it will be retried this.logger.error(`Error stopping sandbox from suspended organization. SandboxId: ${event.sandboxId}: `, error) }) } private resolveAutoStopInterval(autoStopInterval: number): number { if (autoStopInterval < 0) { throw new BadRequestError('Auto-stop interval must be non-negative') } return autoStopInterval } private resolveAutoArchiveInterval(autoArchiveInterval: number): number { if (autoArchiveInterval < 0) { throw new BadRequestError('Auto-archive interval must be non-negative') } const maxAutoArchiveInterval = this.configService.getOrThrow('maxAutoArchiveInterval') if (autoArchiveInterval === 0) { return maxAutoArchiveInterval } return Math.min(autoArchiveInterval, maxAutoArchiveInterval) } private resolveNetworkAllowList(networkAllowList: string): string { try { validateNetworkAllowList(networkAllowList) } catch (error) { throw new BadRequestError(error instanceof Error ? error.message : 'Invalid network allow list') } return networkAllowList } private resolveVolumes(volumes: SandboxVolume[]): SandboxVolume[] { try { validateMountPaths(volumes) } catch (error) { throw new BadRequestError(error instanceof Error ? error.message : 'Invalid volume mount configuration') } try { validateSubpaths(volumes) } catch (error) { throw new BadRequestError(error instanceof Error ? error.message : 'Invalid volume subpath configuration') } return volumes } async createSshAccess( sandboxIdOrName: string, expiresInMinutes = 60, organizationId?: string, ): Promise { // check if sandbox exists const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) // Revoke any existing SSH access for this sandbox await this.revokeSshAccess(sandbox.id) const sshAccess = new SshAccess() sshAccess.sandboxId = sandbox.id // Generate a safe token that can't doesn't have _ or - to avoid CLI issues sshAccess.token = customNanoid(urlAlphabet.replace('_', '').replace('-', ''))(32) sshAccess.expiresAt = new Date(Date.now() + expiresInMinutes * 60 * 1000) await this.sshAccessRepository.save(sshAccess) const region = await this.regionService.findOne(sandbox.region, true) if (region && region.sshGatewayUrl) { return SshAccessDto.fromSshAccess(sshAccess, region.sshGatewayUrl) } return SshAccessDto.fromSshAccess(sshAccess, this.configService.getOrThrow('sshGateway.url')) } async revokeSshAccess(sandboxIdOrName: string, token?: string, organizationId?: string): Promise { const sandbox = await this.findOneByIdOrName(sandboxIdOrName, organizationId) if (token) { // Revoke specific SSH access by token await this.sshAccessRepository.delete({ sandboxId: sandbox.id, token }) } else { // Revoke all SSH access for the sandbox await this.sshAccessRepository.delete({ sandboxId: sandbox.id }) } return sandbox } async validateSshAccess(token: string): Promise { const sshAccess = await this.sshAccessRepository.findOne({ where: { token, }, relations: ['sandbox'], }) if (!sshAccess) { return { valid: false, sandboxId: null } } // Check if token is expired const isExpired = sshAccess.expiresAt < new Date() if (isExpired) { return { valid: false, sandboxId: null } } // Get runner information if sandbox exists if (sshAccess.sandbox && sshAccess.sandbox.runnerId) { const runner = await this.runnerService.findOne(sshAccess.sandbox.runnerId) if (runner) { return { valid: true, sandboxId: sshAccess.sandbox.id, } } } return { valid: true, sandboxId: sshAccess.sandbox.id } } async updateSandboxBackupState( sandboxId: string, backupState: BackupState, backupSnapshot?: string | null, backupRegistryId?: string | null, backupErrorReason?: string | null, ): Promise { const sandboxToUpdate = await this.sandboxRepository.findOneByOrFail({ id: sandboxId, }) const updateData = Sandbox.getBackupStateUpdate( sandboxToUpdate, backupState, backupSnapshot, backupRegistryId, backupErrorReason, ) await this.sandboxRepository.update(sandboxId, { updateData, entity: sandboxToUpdate }) } } ================================================ FILE: apps/api/src/sandbox/services/snapshot.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NotFoundException, ConflictException, ForbiddenException, BadRequestException, Logger, } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Repository, Not, In, Raw, ILike, FindOptionsWhere } from 'typeorm' import { v4 as uuidv4, validate as isUUID } from 'uuid' import { Snapshot } from '../entities/snapshot.entity' import { SnapshotState } from '../enums/snapshot-state.enum' import { CreateSnapshotDto } from '../dto/create-snapshot.dto' import { BuildInfo } from '../entities/build-info.entity' import { generateBuildInfoHash as generateBuildSnapshotRef } from '../entities/build-info.entity' import { EventEmitter2, OnEvent } from '@nestjs/event-emitter' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxCreatedEvent } from '../events/sandbox-create.event' import { Organization } from '../../organization/entities/organization.entity' import { OrganizationService } from '../../organization/services/organization.service' import { SnapshotRunner } from '../entities/snapshot-runner.entity' import { SandboxState } from '../enums/sandbox-state.enum' import { OrganizationEvents } from '../../organization/constants/organization-events.constant' import { OrganizationSuspendedSnapshotDeactivatedEvent } from '../../organization/events/organization-suspended-snapshot-deactivated.event' import { SnapshotRunnerState } from '../enums/snapshot-runner-state.enum' import { PaginatedList } from '../../common/interfaces/paginated-list.interface' import { OrganizationUsageService } from '../../organization/services/organization-usage.service' import { RedisLockProvider } from '../common/redis-lock.provider' import { SnapshotSortDirection, SnapshotSortField } from '../dto/list-snapshots-query.dto' import { PER_SANDBOX_LIMIT_MESSAGE } from '../../common/constants/error-messages' import { DockerRegistryService } from '../../docker-registry/services/docker-registry.service' import { DefaultRegionRequiredException } from '../../organization/exceptions/DefaultRegionRequiredException' import { Region } from '../../region/entities/region.entity' import { RunnerState } from '../enums/runner-state.enum' import { OnAsyncEvent } from '../../common/decorators/on-async-event.decorator' import { RunnerEvents } from '../constants/runner-events' import { RunnerDeletedEvent } from '../events/runner-deleted.event' import { SnapshotRegion } from '../entities/snapshot-region.entity' import { RegionType } from '../../region/enums/region-type.enum' import { SnapshotEvents } from '../constants/snapshot-events' import { SnapshotCreatedEvent } from '../events/snapshot-created.event' import { RunnerService } from './runner.service' import { RegionService } from '../../region/services/region.service' import { TypedConfigService } from '../../config/typed-config.service' import { SandboxRepository } from '../repositories/sandbox.repository' import { SnapshotActivatedEvent } from '../events/snapshot-activated.event' const IMAGE_NAME_REGEX = /^[a-zA-Z0-9_.\-:]+(\/[a-zA-Z0-9_.\-:]+)*(@sha256:[a-f0-9]{64})?$/ @Injectable() export class SnapshotService { private readonly logger = new Logger(SnapshotService.name) constructor( private readonly sandboxRepository: SandboxRepository, @InjectRepository(Snapshot) private readonly snapshotRepository: Repository, @InjectRepository(BuildInfo) private readonly buildInfoRepository: Repository, @InjectRepository(SnapshotRunner) private readonly snapshotRunnerRepository: Repository, @InjectRepository(Region) private readonly regionRepository: Repository, @InjectRepository(SnapshotRegion) private readonly snapshotRegionRepository: Repository, private readonly organizationService: OrganizationService, private readonly organizationUsageService: OrganizationUsageService, private readonly redisLockProvider: RedisLockProvider, private readonly runnerService: RunnerService, private readonly regionService: RegionService, private readonly dockerRegistryService: DockerRegistryService, private readonly eventEmitter: EventEmitter2, private readonly configService: TypedConfigService, ) {} private validateImageName(name: string): string | null { // Check for digest format (@sha256:hash) if (name.includes('@sha256:')) { const [imageName, digest] = name.split('@sha256:') if (!imageName || !digest || !/^[a-f0-9]{64}$/.test(digest)) { return 'Invalid digest format. Must be image@sha256:64_hex_characters' } return null } // Handle tag format (image:tag) if (!name.includes(':') || name.endsWith(':') || /:\s*$/.test(name)) { return 'Image name must include a tag (e.g., ubuntu:22.04) or digest (@sha256:...)' } if (name.endsWith(':latest')) { return 'Images with tag ":latest" are not allowed' } if (!IMAGE_NAME_REGEX.test(name)) { return 'Invalid image name format. Must be lowercase, may contain digits, dots, dashes, and single slashes between components' } return null } private validateSnapshotName(name: string): string | null { if (!IMAGE_NAME_REGEX.test(name)) { return 'Invalid snapshot name format. May contain letters, digits, dots, colons, and dashes' } return null } private processEntrypoint(entrypoint?: string[]): string[] | undefined { if (!entrypoint || entrypoint.length === 0) { return undefined } // Filter out empty strings from the array const filteredEntrypoint = entrypoint.filter((cmd) => cmd && cmd.trim().length > 0) return filteredEntrypoint.length > 0 ? filteredEntrypoint : undefined } private async readySnapshotRunnerExists(ref: string, regionId: string): Promise { return await this.snapshotRunnerRepository .createQueryBuilder('sr') .innerJoin('runner', 'r', 'r.id::text = sr."runnerId"::text') .where('sr."snapshotRef" = :ref', { ref }) .andWhere('sr.state = :snapshotRunnerState', { snapshotRunnerState: SnapshotRunnerState.READY }) .andWhere('r.region = :regionId', { regionId }) .andWhere('r.state = :runnerState', { runnerState: RunnerState.READY }) .andWhere('r.unschedulable = false') .getExists() } async createFromPull(organization: Organization, createSnapshotDto: CreateSnapshotDto, general = false) { if (!organization.defaultRegionId) { throw new DefaultRegionRequiredException() } const regionId = await this.getValidatedOrDefaultRegionId(organization, createSnapshotDto.regionId) let pendingSnapshotCountIncrement: number | undefined if (!createSnapshotDto.imageName) { throw new BadRequestException('Must specify an image name') } try { const entrypoint = createSnapshotDto.entrypoint const ref: string | undefined = undefined const state: SnapshotState = SnapshotState.PENDING const nameValidationError = this.validateSnapshotName(createSnapshotDto.name) if (nameValidationError) { throw new BadRequestException(nameValidationError) } const imageValidationError = this.validateImageName(createSnapshotDto.imageName) if (imageValidationError) { throw new BadRequestException(imageValidationError) } this.organizationService.assertOrganizationIsNotSuspended(organization) const newSnapshotCount = 1 const { pendingSnapshotCountIncremented } = await this.validateOrganizationQuotas( organization, newSnapshotCount, createSnapshotDto.cpu, createSnapshotDto.memory, createSnapshotDto.disk, ) if (pendingSnapshotCountIncremented) { pendingSnapshotCountIncrement = newSnapshotCount } try { const snapshotId = uuidv4() const snapshot = this.snapshotRepository.create({ id: snapshotId, organizationId: organization.id, ...createSnapshotDto, entrypoint: this.processEntrypoint(entrypoint), mem: createSnapshotDto.memory, // Map memory to mem state, ref, general, snapshotRegions: [{ snapshotId, regionId }], }) const savedSnapshot = await this.snapshotRepository.save(snapshot) this.eventEmitter.emit(SnapshotEvents.CREATED, new SnapshotCreatedEvent(savedSnapshot)) return savedSnapshot } catch (error) { if (error.code === '23505') { // PostgreSQL unique violation error code throw new ConflictException( `Snapshot with name "${createSnapshotDto.name}" already exists for this organization`, ) } throw error } } catch (error) { await this.rollbackPendingUsage(organization.id, pendingSnapshotCountIncrement) throw error } } async createFromBuildInfo(organization: Organization, createSnapshotDto: CreateSnapshotDto, general = false) { if (!organization.defaultRegionId) { throw new DefaultRegionRequiredException() } const regionId = await this.getValidatedOrDefaultRegionId(organization, createSnapshotDto.regionId) let pendingSnapshotCountIncrement: number | undefined let entrypoint: string[] | undefined = undefined try { const nameValidationError = this.validateSnapshotName(createSnapshotDto.name) if (nameValidationError) { throw new BadRequestException(nameValidationError) } this.organizationService.assertOrganizationIsNotSuspended(organization) const newSnapshotCount = 1 const { pendingSnapshotCountIncremented } = await this.validateOrganizationQuotas( organization, newSnapshotCount, createSnapshotDto.cpu, createSnapshotDto.memory, createSnapshotDto.disk, ) if (pendingSnapshotCountIncremented) { pendingSnapshotCountIncrement = newSnapshotCount } entrypoint = this.getEntrypointFromDockerfile(createSnapshotDto.buildInfo.dockerfileContent) const snapshotId = uuidv4() const snapshot = this.snapshotRepository.create({ id: snapshotId, organizationId: organization.id, ...createSnapshotDto, entrypoint: this.processEntrypoint(entrypoint), mem: createSnapshotDto.memory, // Map memory to mem state: SnapshotState.PENDING, general, snapshotRegions: [{ snapshotId, regionId }], }) const buildSnapshotRef = generateBuildSnapshotRef( createSnapshotDto.buildInfo.dockerfileContent, createSnapshotDto.buildInfo.contextHashes, ) // Check if buildInfo with the same snapshotRef already exists const existingBuildInfo = await this.buildInfoRepository.findOne({ where: { snapshotRef: buildSnapshotRef }, }) if (existingBuildInfo) { snapshot.buildInfo = existingBuildInfo // Update lastUsed once per minute at most if (await this.redisLockProvider.lock(`build-info:${existingBuildInfo.snapshotRef}:update`, 60)) { existingBuildInfo.lastUsedAt = new Date() await this.buildInfoRepository.save(existingBuildInfo) } } else { const buildInfoEntity = this.buildInfoRepository.create({ ...createSnapshotDto.buildInfo, }) await this.buildInfoRepository.save(buildInfoEntity) snapshot.buildInfo = buildInfoEntity } const internalRegistry = await this.dockerRegistryService.getAvailableInternalRegistry(regionId) if (!internalRegistry) { throw new Error('No internal registry found for snapshot') } snapshot.ref = `${internalRegistry.url.replace(/^(https?:\/\/)/, '')}/${internalRegistry.project || 'daytona'}/${buildSnapshotRef}` const exists = await this.readySnapshotRunnerExists(snapshot.ref, regionId) if (exists) { snapshot.state = SnapshotState.ACTIVE snapshot.lastUsedAt = new Date() } try { const savedSnapshot = await this.snapshotRepository.save(snapshot) this.eventEmitter.emit(SnapshotEvents.CREATED, new SnapshotCreatedEvent(savedSnapshot)) return savedSnapshot } catch (error) { if (error.code === '23505') { // PostgreSQL unique violation error code throw new ConflictException( `Snapshot with name "${createSnapshotDto.name}" already exists for this organization`, ) } throw error } } catch (error) { await this.rollbackPendingUsage(organization.id, pendingSnapshotCountIncrement) throw error } } async removeSnapshot(snapshotId: string) { const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId }, }) if (!snapshot) { throw new NotFoundException(`Snapshot ${snapshotId} not found`) } if (snapshot.general) { throw new ForbiddenException('You cannot delete a general snapshot') } snapshot.state = SnapshotState.REMOVING await this.snapshotRepository.save(snapshot) } async getAllSnapshots( organizationId: string, page = 1, limit = 10, filters?: { name?: string }, sort?: { field?: SnapshotSortField; direction?: SnapshotSortDirection }, ): Promise> { const pageNum = Number(page) const limitNum = Number(limit) const { name } = filters || {} const { field: sortField, direction: sortDirection } = sort || {} const baseFindOptions: FindOptionsWhere = { ...(name ? { name: ILike(`%${name}%`) } : {}), } // Retrieve all snapshots belonging to the organization as well as all general snapshots const where: FindOptionsWhere[] = [ { ...baseFindOptions, organizationId, }, { ...baseFindOptions, general: true, hideFromUsers: false, }, ] const [items, total] = await this.snapshotRepository.findAndCount({ where, relations: ['snapshotRegions'], order: { general: 'ASC', // Sort general snapshots last [sortField]: { direction: sortDirection, nulls: 'LAST', }, ...(sortField !== SnapshotSortField.CREATED_AT && { createdAt: 'DESC' }), }, skip: (pageNum - 1) * limitNum, take: limitNum, }) // Filter out snapshot regions that are not available to the organization const availableRegions = await this.organizationService.listAvailableRegions(organizationId) const availableRegionIds = new Set(availableRegions.map((r) => r.id)) for (const snapshot of items) { if (snapshot.snapshotRegions) { snapshot.snapshotRegions = snapshot.snapshotRegions.filter((sr) => availableRegionIds.has(sr.regionId)) } } return { items, total, page: pageNum, totalPages: Math.ceil(total / limit), } } async getSnapshot(snapshotId: string): Promise { const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId }, }) if (!snapshot) { throw new NotFoundException(`Snapshot ${snapshotId} not found`) } return snapshot } async getSnapshotWithRegions(snapshotIdOrName: string, organizationId: string): Promise { const where: FindOptionsWhere[] = [ { name: snapshotIdOrName, organizationId }, { name: snapshotIdOrName, general: true }, ] if (isUUID(snapshotIdOrName)) { where.push({ id: snapshotIdOrName }) } const snapshot = await this.snapshotRepository.findOne({ where, relations: ['snapshotRegions'], order: { general: 'ASC' }, }) if (!snapshot) { throw new NotFoundException(`Snapshot ${snapshotIdOrName} not found`) } const availableRegions = await this.organizationService.listAvailableRegions(organizationId) const availableRegionIds = new Set(availableRegions.map((r) => r.id)) if (snapshot.snapshotRegions) { snapshot.snapshotRegions = snapshot.snapshotRegions.filter((sr) => availableRegionIds.has(sr.regionId)) } return snapshot } async getSnapshotByName(snapshotName: string, organizationId: string): Promise { const snapshot = await this.snapshotRepository.findOne({ where: { name: snapshotName, organizationId }, }) if (!snapshot) { // check if the snapshot is general const generalSnapshot = await this.snapshotRepository.findOne({ where: { name: snapshotName, general: true }, }) if (generalSnapshot) { return generalSnapshot } throw new NotFoundException(`Snapshot with name ${snapshotName} not found`) } return snapshot } async setSnapshotGeneralStatus(snapshotId: string, general: boolean) { const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId }, }) if (!snapshot) { throw new NotFoundException(`Snapshot ${snapshotId} not found`) } snapshot.general = general return await this.snapshotRepository.save(snapshot) } async getBuildLogsUrl(snapshot: Snapshot): Promise { if (!snapshot.initialRunnerId) { throw new NotFoundException(`Snapshot ${snapshot.id} has no initial runner`) } const runner = await this.runnerService.findOneOrFail(snapshot.initialRunnerId) const region = await this.regionService.findOne(runner.region, true) if (!region) { throw new NotFoundException(`Region for initial runner for snapshot ${snapshot.id} not found`) } if (!region.proxyUrl) { return `${this.configService.getOrThrow('proxy.protocol')}://${this.configService.getOrThrow('proxy.domain')}/snapshots/${snapshot.id}/build-logs` } return region.proxyUrl + '/snapshots/' + snapshot.id + '/build-logs' } private async validateOrganizationQuotas( organization: Organization, addedSnapshotCount: number, cpu?: number, memory?: number, disk?: number, ): Promise<{ pendingSnapshotCountIncremented: boolean }> { // validate per-sandbox quotas if (cpu && cpu > organization.maxCpuPerSandbox) { throw new ForbiddenException( `CPU request ${cpu} exceeds maximum allowed per sandbox (${organization.maxCpuPerSandbox}).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } if (memory && memory > organization.maxMemoryPerSandbox) { throw new ForbiddenException( `Memory request ${memory}GB exceeds maximum allowed per sandbox (${organization.maxMemoryPerSandbox}GB).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } if (disk && disk > organization.maxDiskPerSandbox) { throw new ForbiddenException( `Disk request ${disk}GB exceeds maximum allowed per sandbox (${organization.maxDiskPerSandbox}GB).\n${PER_SANDBOX_LIMIT_MESSAGE}`, ) } // validate usage quotas await this.organizationUsageService.incrementPendingSnapshotUsage(organization.id, addedSnapshotCount) const usageOverview = await this.organizationUsageService.getSnapshotUsageOverview(organization.id) try { if (usageOverview.currentSnapshotUsage + usageOverview.pendingSnapshotUsage > organization.snapshotQuota) { throw new ForbiddenException(`Snapshot quota exceeded. Maximum allowed: ${organization.snapshotQuota}`) } } catch (error) { await this.rollbackPendingUsage(organization.id, addedSnapshotCount) throw error } return { pendingSnapshotCountIncremented: true, } } async rollbackPendingUsage(organizationId: string, pendingSnapshotCountIncrement?: number): Promise { if (!pendingSnapshotCountIncrement) { return } try { await this.organizationUsageService.decrementPendingSnapshotUsage(organizationId, pendingSnapshotCountIncrement) } catch (error) { this.logger.error(`Error rolling back pending snapshot usage: ${error}`) } } @OnEvent(SandboxEvents.CREATED) private async handleSandboxCreatedEvent(event: SandboxCreatedEvent) { if (!event.sandbox.snapshot) { return } // Update once per minute at most if (!(await this.redisLockProvider.lock(`snapshot:${event.sandbox.snapshot}:update-last-used`, 60))) { return } const snapshot = await this.getSnapshotByName(event.sandbox.snapshot, event.sandbox.organizationId) snapshot.lastUsedAt = event.sandbox.createdAt await this.snapshotRepository.save(snapshot) } async activateSnapshot(snapshotId: string, organization: Organization): Promise { const lockKey = `snapshot:${snapshotId}:activate` await this.redisLockProvider.waitForLock(lockKey, 60) let pendingSnapshotCountIncrement: number | undefined try { const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId }, }) if (!snapshot) { throw new NotFoundException(`Snapshot ${snapshotId} not found`) } if (snapshot.state === SnapshotState.ACTIVE) { throw new BadRequestException(`Snapshot ${snapshotId} is already active`) } if (snapshot.state !== SnapshotState.INACTIVE) { throw new BadRequestException(`Snapshot ${snapshotId} cannot be activated - it is in ${snapshot.state} state`) } this.organizationService.assertOrganizationIsNotSuspended(organization) const activatedSnapshotCount = 1 const { pendingSnapshotCountIncremented } = await this.validateOrganizationQuotas( organization, activatedSnapshotCount, snapshot.cpu, snapshot.mem, snapshot.disk, ) if (pendingSnapshotCountIncremented) { pendingSnapshotCountIncrement = activatedSnapshotCount } snapshot.state = SnapshotState.PENDING const savedSnapshot = await this.snapshotRepository.save(snapshot) this.eventEmitter.emit(SnapshotEvents.ACTIVATED, new SnapshotActivatedEvent(savedSnapshot)) return savedSnapshot } catch (error) { await this.rollbackPendingUsage(organization.id, pendingSnapshotCountIncrement) throw error } finally { await this.redisLockProvider.unlock(lockKey) } } async canCleanupImage(imageName: string): Promise { const snapshot = await this.snapshotRepository.findOne({ where: { state: Not(In([SnapshotState.ERROR, SnapshotState.BUILD_FAILED])), ref: imageName, }, }) if (snapshot) { return false } const sandbox = await this.sandboxRepository.findOne({ where: [ { existingBackupSnapshots: Raw((alias) => `${alias} @> '[{"snapshotName":"${imageName}"}]'::jsonb`), }, { existingBackupSnapshots: Raw((alias) => `${alias} @> '[{"imageName":"${imageName}"}]'::jsonb`), }, { backupSnapshot: imageName, }, ], }) if (sandbox && sandbox.state !== SandboxState.DESTROYED) { return false } return true } async deactivateSnapshot(snapshotId: string): Promise { const snapshot = await this.snapshotRepository.findOne({ where: { id: snapshotId }, }) if (!snapshot) { throw new NotFoundException(`Snapshot ${snapshotId} not found`) } if (snapshot.state === SnapshotState.INACTIVE) { return } snapshot.state = SnapshotState.INACTIVE await this.snapshotRepository.save(snapshot) try { const countActiveSnapshots = await this.snapshotRepository.count({ where: { state: SnapshotState.ACTIVE, ref: snapshot.ref, }, }) if (countActiveSnapshots === 0) { // Set associated SnapshotRunner records to REMOVING state const result = await this.snapshotRunnerRepository.update( { snapshotRef: snapshot.ref }, { state: SnapshotRunnerState.REMOVING }, ) this.logger.debug( `Deactivated snapshot ${snapshot.id} and marked ${result.affected} SnapshotRunners for removal`, ) } } catch (error) { this.logger.error(`Deactivated snapshot ${snapshot.id}, but failed to mark snapshot runners for removal`, error) } } // TODO: revise/cleanup getEntrypointFromDockerfile(dockerfileContent: string): string[] { // Match ENTRYPOINT with either a string or JSON array const entrypointMatch = dockerfileContent.match(/ENTRYPOINT\s+(.*)/) if (entrypointMatch) { const rawEntrypoint = entrypointMatch[1].trim() try { // Try parsing as JSON array const parsed = JSON.parse(rawEntrypoint) if (Array.isArray(parsed)) { return parsed } } catch { // Fallback: it's probably a plain string return [rawEntrypoint.replace(/["']/g, '')] } } return ['sleep', 'infinity'] } /** * Validates and returns a region ID for snapshot availability. * * @param organization - The organization which is creating the snapshot. * @param regionId - The requested region ID. If omitted, the organization's default region is used. * @returns The validated region ID * @throws {NotFoundException} If the requested region is not available to the organization */ private async getValidatedOrDefaultRegionId(organization: Organization, regionId?: string): Promise { if (!regionId) { return organization.defaultRegionId } const region = await this.regionRepository.findOne({ where: { id: regionId }, }) if (!region) { throw new NotFoundException('Region not found') } const availableRegions = await this.organizationService.listAvailableRegions(organization.id) if (!availableRegions.some((r) => r.id === regionId)) { if (region.regionType === RegionType.SHARED) { // region is public, but the organization does not have a quota for it throw new ForbiddenException(`Region ${regionId} is not available to the organization`) } else { // region is not public, respond as if the region was not found throw new NotFoundException('Region not found') } } return regionId } /** * @param snapshotId * @returns The regions where the snapshot is configured to be propagated to. */ async getSnapshotRegions(snapshotId: string): Promise { return await this.regionRepository .createQueryBuilder('r') .innerJoin('snapshot_region', 'sr', 'sr."regionId" = r.id') .where('sr."snapshotId" = :snapshotId', { snapshotId }) .getMany() } /** * @param snapshotId - The ID of the snapshot. * @param regionId - The ID of the region. * @returns true if the snapshot is available in the region, false otherwise. */ async isAvailableInRegion(snapshotId: string, regionId: string): Promise { return await this.snapshotRegionRepository.exists({ where: { snapshotId, regionId, }, }) } @OnEvent(OrganizationEvents.SUSPENDED_SNAPSHOT_DEACTIVATED) async handleSuspendedOrganizationSnapshotDeactivated(event: OrganizationSuspendedSnapshotDeactivatedEvent) { await this.deactivateSnapshot(event.snapshotId).catch((error) => { // log the error for now, but don't throw it as it will be retried this.logger.error( `Error deactivating snapshot from suspended organization. SnapshotId: ${event.snapshotId}: `, error, ) }) } @OnAsyncEvent({ event: RunnerEvents.DELETED, }) async handleRunnerDeletedEvent(payload: RunnerDeletedEvent): Promise { await payload.entityManager.update( SnapshotRunner, { runnerId: payload.runnerId }, { state: SnapshotRunnerState.REMOVING }, ) } } ================================================ FILE: apps/api/src/sandbox/services/toolbox.deprecated.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NotFoundException, HttpException, BadRequestException, Logger } from '@nestjs/common' import { Sandbox } from '../entities/sandbox.entity' import { Runner } from '../entities/runner.entity' import axios from 'axios' import { SandboxState } from '../enums/sandbox-state.enum' import { RedisLockProvider } from '../common/redis-lock.provider' import { SandboxService } from './sandbox.service' import { RunnerService } from './runner.service' import { SandboxRepository } from '../repositories/sandbox.repository' @Injectable() export class ToolboxService { private readonly logger = new Logger(ToolboxService.name) constructor( private readonly sandboxRepository: SandboxRepository, private readonly redisLockProvider: RedisLockProvider, private readonly sandboxService: SandboxService, private readonly runnerService: RunnerService, ) {} async forwardRequestToRunner(sandboxId: string, method: string, path: string, data?: any): Promise { const runner = await this.getRunner(sandboxId) if (!runner.proxyUrl) { throw new NotFoundException(`Runner for sandbox ${sandboxId} has no proxy URL`) } const maxRetries = 5 let attempt = 1 while (attempt <= maxRetries) { try { const headers: any = { Authorization: `Bearer ${runner.apiKey}`, } // Only set Content-Type for requests with body data if (data && typeof data === 'object' && Object.keys(data).length > 0) { headers['Content-Type'] = 'application/json' } const requestConfig: any = { method, url: `${runner.proxyUrl}/sandboxes/${sandboxId}${path}`, headers, maxBodyLength: 209715200, // 200MB in bytes maxContentLength: 209715200, // 200MB in bytes timeout: 360000, // 360 seconds } // Only add data if it's not an empty string or undefined if (data !== undefined && data !== '') { requestConfig.data = data } const response = await axios(requestConfig) return response.data } catch (error) { if (error.message.includes('ECONNREFUSED')) { if (attempt === maxRetries) { throw new HttpException('Failed to connect to runner after multiple attempts', 500) } // Wait for attempt * 1000ms (1s, 2s, 3s) await new Promise((resolve) => setTimeout(resolve, attempt * 1000)) attempt++ continue } // If it's an axios error with a response, throw a NestJS HttpException if (error.response) { throw new HttpException(error.response.data, error.response.status) } // For other types of errors, throw a generic 500 error throw new HttpException(`Error forwarding request to runner: ${error.message}`, 500) } } } public async getRunner(sandboxId: string): Promise { let sandbox: Sandbox | null = null try { sandbox = await this.sandboxRepository.findOne({ where: { id: sandboxId }, }) if (!sandbox) { throw new NotFoundException('Sandbox not found') } const runner = await this.runnerService.findOneOrFail(sandbox.runnerId) if (sandbox.state !== SandboxState.STARTED) { throw new BadRequestException('Sandbox is not running') } return runner } finally { const lockKey = `sandbox-last-activity-${sandboxId}` const acquired = await this.redisLockProvider.lock(lockKey, 10) // redis for cooldown period - 10 seconds // prevents database flooding when multiple requests are made at the same time if (acquired) { await this.sandboxService.updateLastActivityAt(sandboxId, new Date()) } } } } ================================================ FILE: apps/api/src/sandbox/services/volume.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ConflictException, ForbiddenException, Injectable, Logger, NotFoundException, ServiceUnavailableException, } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { Repository, Not, In } from 'typeorm' import { Volume } from '../entities/volume.entity' import { VolumeState } from '../enums/volume-state.enum' import { CreateVolumeDto } from '../dto/create-volume.dto' import { v4 as uuidv4 } from 'uuid' import { BadRequestError } from '../../exceptions/bad-request.exception' import { Organization } from '../../organization/entities/organization.entity' import { OnEvent } from '@nestjs/event-emitter' import { SandboxEvents } from '../constants/sandbox-events.constants' import { SandboxCreatedEvent } from '../events/sandbox-create.event' import { OrganizationService } from '../../organization/services/organization.service' import { OrganizationUsageService } from '../../organization/services/organization-usage.service' import { TypedConfigService } from '../../config/typed-config.service' import { RedisLockProvider } from '../common/redis-lock.provider' import { SandboxRepository } from '../repositories/sandbox.repository' import { SandboxDesiredState } from '../enums/sandbox-desired-state.enum' @Injectable() export class VolumeService { private readonly logger = new Logger(VolumeService.name) constructor( @InjectRepository(Volume) private readonly volumeRepository: Repository, private readonly sandboxRepository: SandboxRepository, private readonly organizationService: OrganizationService, private readonly organizationUsageService: OrganizationUsageService, private readonly configService: TypedConfigService, private readonly redisLockProvider: RedisLockProvider, ) {} private async validateOrganizationQuotas( organization: Organization, addedVolumeCount: number, ): Promise<{ pendingVolumeCountIncremented: boolean }> { // validate usage quotas await this.organizationUsageService.incrementPendingVolumeUsage(organization.id, addedVolumeCount) const usageOverview = await this.organizationUsageService.getVolumeUsageOverview(organization.id) try { if (usageOverview.currentVolumeUsage + usageOverview.pendingVolumeUsage > organization.volumeQuota) { throw new ForbiddenException(`Volume quota exceeded. Maximum allowed: ${organization.volumeQuota}`) } } catch (error) { await this.rollbackPendingUsage(organization.id, addedVolumeCount) throw error } return { pendingVolumeCountIncremented: true, } } async rollbackPendingUsage(organizationId: string, pendingVolumeCountIncrement?: number): Promise { if (!pendingVolumeCountIncrement) { return } try { await this.organizationUsageService.decrementPendingVolumeUsage(organizationId, pendingVolumeCountIncrement) } catch (error) { this.logger.error(`Error rolling back pending volume usage: ${error}`) } } async create(organization: Organization, createVolumeDto: CreateVolumeDto): Promise { if (!this.configService.get('s3.endpoint')) { throw new ServiceUnavailableException('Object storage is not configured') } let pendingVolumeCountIncrement: number | undefined try { this.organizationService.assertOrganizationIsNotSuspended(organization) const newVolumeCount = 1 const { pendingVolumeCountIncremented } = await this.validateOrganizationQuotas(organization, newVolumeCount) if (pendingVolumeCountIncremented) { pendingVolumeCountIncrement = newVolumeCount } const volume = new Volume() // Generate ID volume.id = uuidv4() // Set name from DTO or use ID as default volume.name = createVolumeDto.name || volume.id // Check if volume with same name already exists for organization const existingVolume = await this.volumeRepository.findOne({ where: { organizationId: organization.id, name: volume.name, state: Not(VolumeState.DELETED), }, }) if (existingVolume) { throw new BadRequestError(`Volume with name ${volume.name} already exists`) } volume.organizationId = organization.id volume.state = VolumeState.PENDING_CREATE const savedVolume = await this.volumeRepository.save(volume) this.logger.debug(`Created volume ${savedVolume.id} for organization ${organization.id}`) return savedVolume } catch (error) { await this.rollbackPendingUsage(organization.id, pendingVolumeCountIncrement) throw error } } async delete(volumeId: string): Promise { const volume = await this.volumeRepository.findOne({ where: { id: volumeId, }, }) if (!volume) { throw new NotFoundException(`Volume with ID ${volumeId} not found`) } if (volume.state !== VolumeState.READY && volume.state !== VolumeState.ERROR) { throw new BadRequestError( `Volume must be in '${VolumeState.READY}' or '${VolumeState.ERROR}' state in order to be deleted`, ) } // Check if any non-destroyed sandboxes are using this volume const sandboxUsingVolume = await this.sandboxRepository .createQueryBuilder('sandbox') .where('sandbox.organizationId = :organizationId', { organizationId: volume.organizationId, }) .andWhere('sandbox.volumes @> :volFilter::jsonb', { volFilter: JSON.stringify([{ volumeId }]), }) .andWhere('sandbox.desiredState != :destroyed', { destroyed: SandboxDesiredState.DESTROYED, }) .select(['sandbox.id', 'sandbox.name']) .getOne() if (sandboxUsingVolume) { throw new ConflictException( `Volume cannot be deleted because it is in use by one or more sandboxes (e.g. ${sandboxUsingVolume.name})`, ) } // Update state to mark as deleting volume.state = VolumeState.PENDING_DELETE await this.volumeRepository.save(volume) this.logger.debug(`Marked volume ${volumeId} for deletion`) } async findOne(volumeId: string): Promise { const volume = await this.volumeRepository.findOne({ where: { id: volumeId }, }) if (!volume) { throw new NotFoundException(`Volume with ID ${volumeId} not found`) } return volume } async findAll(organizationId: string, includeDeleted = false): Promise { return this.volumeRepository.find({ where: { organizationId, ...(includeDeleted ? {} : { state: Not(VolumeState.DELETED) }), }, order: { lastUsedAt: { direction: 'DESC', nulls: 'LAST', }, createdAt: 'DESC', }, }) } async findByName(organizationId: string, name: string): Promise { const volume = await this.volumeRepository.findOne({ where: { organizationId, name, state: Not(VolumeState.DELETED), }, }) if (!volume) { throw new NotFoundException(`Volume with name ${name} not found`) } return volume } async validateVolumes(organizationId: string, volumeIdOrNames: string[]): Promise { if (!volumeIdOrNames.length) { return } const volumes = await this.volumeRepository.find({ where: [ { id: In(volumeIdOrNames), organizationId, state: Not(VolumeState.DELETED) }, { name: In(volumeIdOrNames), organizationId, state: Not(VolumeState.DELETED) }, ], }) // Check if all requested volumes were found and are in a READY state const foundIds = new Set(volumes.map((v) => v.id)) const foundNames = new Set(volumes.map((v) => v.name)) for (const idOrName of volumeIdOrNames) { if (!foundIds.has(idOrName) && !foundNames.has(idOrName)) { throw new NotFoundException(`Volume '${idOrName}' not found`) } } for (const volume of volumes) { if (volume.state !== VolumeState.READY) { throw new BadRequestError(`Volume '${volume.name}' is not in a ready state. Current state: ${volume.state}`) } } } async getOrganizationId(params: { id: string } | { name: string; organizationId: string }): Promise { if ('id' in params) { const volume = await this.volumeRepository.findOneOrFail({ where: { id: params.id, }, select: ['organizationId'], loadEagerRelations: false, }) return volume.organizationId } const volume = await this.volumeRepository.findOneOrFail({ where: { name: params.name, organizationId: params.organizationId, }, select: ['organizationId'], loadEagerRelations: false, }) return volume.organizationId } @OnEvent(SandboxEvents.CREATED) private async handleSandboxCreatedEvent(event: SandboxCreatedEvent) { if (!event.sandbox.volumes.length) { return } try { const volumeIds = event.sandbox.volumes.map((vol) => vol.volumeId) const volumes = await this.volumeRepository.find({ where: { id: In(volumeIds) } }) const results = await Promise.allSettled( volumes.map(async (volume) => { // Update once per minute at most if (!(await this.redisLockProvider.lock(`volume:${volume.id}:update-last-used`, 60))) { return } volume.lastUsedAt = event.sandbox.createdAt return this.volumeRepository.save(volume) }), ) results.forEach((result) => { if (result.status === 'rejected') { this.logger.error( `Failed to update volume lastUsedAt timestamp for sandbox ${event.sandbox.id}: ${result.reason}`, ) } }) } catch (err) { this.logger.error(err) } } } ================================================ FILE: apps/api/src/sandbox/subscribers/runner.subscriber.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Inject, Logger } from '@nestjs/common' import { EventEmitter2 } from '@nestjs/event-emitter' import { DataSource, EntitySubscriberInterface, EventSubscriber, InsertEvent, UpdateEvent } from 'typeorm' import { RunnerEvents } from '../constants/runner-events' import { Runner } from '../entities/runner.entity' import { RunnerCreatedEvent } from '../events/runner-created.event' import { RunnerStateUpdatedEvent } from '../events/runner-state-updated.event' import { RunnerUnschedulableUpdatedEvent } from '../events/runner-unschedulable-updated.event' import { runnerLookupCacheKeyById } from '../utils/runner-lookup-cache.util' @EventSubscriber() export class RunnerSubscriber implements EntitySubscriberInterface { private readonly logger = new Logger(RunnerSubscriber.name) @Inject(EventEmitter2) private eventEmitter: EventEmitter2 private dataSource: DataSource constructor(dataSource: DataSource) { this.dataSource = dataSource dataSource.subscribers.push(this) } listenTo() { return Runner } afterInsert(event: InsertEvent) { this.eventEmitter.emit(RunnerEvents.CREATED, new RunnerCreatedEvent(event.entity as Runner)) } afterUpdate(event: UpdateEvent) { const updatedColumns = event.updatedColumns.map((col) => col.propertyName) updatedColumns.forEach((column) => { // For Repository.update(), TypeORM doesn't provide databaseEntity. if (!event.entity || !event.databaseEntity) { return } switch (column) { case 'state': this.eventEmitter.emit( RunnerEvents.STATE_UPDATED, new RunnerStateUpdatedEvent(event.entity as Runner, event.databaseEntity[column], event.entity[column]), ) break case 'unschedulable': this.eventEmitter.emit( RunnerEvents.UNSCHEDULABLE_UPDATED, new RunnerUnschedulableUpdatedEvent( event.entity as Runner, event.databaseEntity[column], event.entity[column], ), ) break default: break } }) // Invalidate cached runner lookup queries on any update triggered via save(). // Note: Repository.update() does not provide databaseEntity, so those paths // invalidate explicitly via RunnerService.updateRunner(). const entity = event.entity as Runner | undefined if (!entity?.id) { return } const cache = this.dataSource.queryResultCache if (!cache) { return } cache .remove([runnerLookupCacheKeyById(entity.id)]) .catch((error) => this.logger.warn( `Failed to invalidate runner lookup cache for ${entity.id}: ${error instanceof Error ? error.message : String(error)}`, ), ) } } ================================================ FILE: apps/api/src/sandbox/subscribers/snapshot.subscriber.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Inject } from '@nestjs/common' import { EventEmitter2 } from '@nestjs/event-emitter' import { DataSource, EntitySubscriberInterface, EventSubscriber, RemoveEvent, UpdateEvent } from 'typeorm' import { SnapshotEvents } from '../constants/snapshot-events' import { Snapshot } from '../entities/snapshot.entity' import { SnapshotStateUpdatedEvent } from '../events/snapshot-state-updated.event' import { SnapshotRemovedEvent } from '../events/snapshot-removed.event' @EventSubscriber() export class SnapshotSubscriber implements EntitySubscriberInterface { @Inject(EventEmitter2) private eventEmitter: EventEmitter2 constructor(dataSource: DataSource) { dataSource.subscribers.push(this) } listenTo() { return Snapshot } afterUpdate(event: UpdateEvent) { const updatedColumns = event.updatedColumns.map((col) => col.propertyName) updatedColumns.forEach((column) => { switch (column) { case 'state': this.eventEmitter.emit( SnapshotEvents.STATE_UPDATED, new SnapshotStateUpdatedEvent(event.entity as Snapshot, event.databaseEntity[column], event.entity[column]), ) break default: break } }) } beforeRemove(event: RemoveEvent) { this.eventEmitter.emit(SnapshotEvents.REMOVED, new SnapshotRemovedEvent(event.databaseEntity as Snapshot)) } } ================================================ FILE: apps/api/src/sandbox/subscribers/volume.subscriber.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Inject } from '@nestjs/common' import { EventEmitter2 } from '@nestjs/event-emitter' import { DataSource, EntitySubscriberInterface, EventSubscriber, InsertEvent, UpdateEvent } from 'typeorm' import { VolumeEvents } from '../constants/volume-events' import { Volume } from '../entities/volume.entity' import { VolumeCreatedEvent } from '../events/volume-created.event' import { VolumeStateUpdatedEvent } from '../events/volume-state-updated.event' import { VolumeLastUsedAtUpdatedEvent } from '../events/volume-last-used-at-updated.event' @EventSubscriber() export class VolumeSubscriber implements EntitySubscriberInterface { @Inject(EventEmitter2) private eventEmitter: EventEmitter2 constructor(dataSource: DataSource) { dataSource.subscribers.push(this) } listenTo() { return Volume } afterInsert(event: InsertEvent) { this.eventEmitter.emit(VolumeEvents.CREATED, new VolumeCreatedEvent(event.entity as Volume)) } afterUpdate(event: UpdateEvent) { const updatedColumns = event.updatedColumns.map((col) => col.propertyName) updatedColumns.forEach((column) => { switch (column) { case 'state': this.eventEmitter.emit( VolumeEvents.STATE_UPDATED, new VolumeStateUpdatedEvent(event.entity as Volume, event.databaseEntity[column], event.entity[column]), ) break case 'lastUsedAt': this.eventEmitter.emit( VolumeEvents.LAST_USED_AT_UPDATED, new VolumeLastUsedAtUpdatedEvent(event.entity as Volume), ) break default: break } }) } } ================================================ FILE: apps/api/src/sandbox/utils/lock-key.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export function getStateChangeLockKey(id: string): string { return `sandbox:${id}:state-change` } ================================================ FILE: apps/api/src/sandbox/utils/network-validation.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { isIPv4 } from 'net' /** * Validates network allow list to ensure valid CIDR network addresses are allowed * @param networkAllowList - Comma-separated string of network addresses * @returns null if valid, error message string if invalid */ export function validateNetworkAllowList(networkAllowList: string): void { const networks = networkAllowList.split(',').map((net: string) => net.trim()) for (const network of networks) { if (!network) continue // Skip empty entries const [ipAddress, prefixLength] = network.split('/') if (!isIPv4(ipAddress)) { throw new Error(`Invalid IP address: "${ipAddress}" in network "${network}". Must be a valid IPv4 address`) } if (!prefixLength) { throw new Error(`Invalid network format: "${network}". Missing CIDR prefix length (e.g., /24)`) } // Validate CIDR prefix length (0-32 for IPv4) const prefix = parseInt(prefixLength, 10) if (prefix < 0 || prefix > 32) { throw new Error(`Invalid CIDR prefix length: ${network}. Prefix must be between 0 and 32`) } } if (networks.length > 10) { throw new Error(`Network allow list cannot contain more than 10 networks`) } } ================================================ FILE: apps/api/src/sandbox/utils/runner-lookup-cache.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const RUNNER_LOOKUP_CACHE_TTL_MS = 60_000 export function runnerLookupCacheKeyById(runnerId: string): string { return `runner:lookup:by-id:${runnerId}` } ================================================ FILE: apps/api/src/sandbox/utils/sandbox-lookup-cache.util.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const SANDBOX_LOOKUP_CACHE_TTL_MS = 10_000 export const SANDBOX_BUILD_INFO_CACHE_TTL_MS = 60_000 export const SANDBOX_ORG_ID_CACHE_TTL_MS = 60_000 export const TOOLBOX_PROXY_URL_CACHE_TTL_S = 30 * 60 // 30 minutes type SandboxLookupCacheKeyArgs = { organizationId?: string | null returnDestroyed?: boolean } export function sandboxLookupCacheKeyById(args: SandboxLookupCacheKeyArgs & { sandboxId: string }): string { const organizationId = args.organizationId ?? 'none' const returnDestroyed = args.returnDestroyed ? 1 : 0 return `sandbox:lookup:by-id:org:${organizationId}:returnDestroyed:${returnDestroyed}:value:${args.sandboxId}` } export function sandboxLookupCacheKeyByName(args: SandboxLookupCacheKeyArgs & { sandboxName: string }): string { const organizationId = args.organizationId ?? 'none' const returnDestroyed = args.returnDestroyed ? 1 : 0 return `sandbox:lookup:by-name:org:${organizationId}:returnDestroyed:${returnDestroyed}:value:${args.sandboxName}` } export function sandboxLookupCacheKeyByAuthToken(args: { authToken: string }): string { return `sandbox:lookup:by-authToken:${args.authToken}` } type SandboxOrgIdCacheKeyArgs = { organizationId?: string } export function sandboxOrgIdCacheKeyById(args: SandboxOrgIdCacheKeyArgs & { sandboxId: string }): string { const organizationId = args.organizationId ?? 'none' return `sandbox:orgId:by-id:org:${organizationId}:value:${args.sandboxId}` } export function sandboxOrgIdCacheKeyByName(args: SandboxOrgIdCacheKeyArgs & { sandboxName: string }): string { const organizationId = args.organizationId ?? 'none' return `sandbox:orgId:by-name:org:${organizationId}:value:${args.sandboxName}` } export function toolboxProxyUrlCacheKey(regionId: string): string { return `toolbox-proxy-url:region:${regionId}` } ================================================ FILE: apps/api/src/sandbox/utils/sanitize-error.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export function sanitizeSandboxError(error: any): { recoverable: boolean; errorReason: string } { if (typeof error === 'string') { try { const errObj = JSON.parse(error) as { recoverable: boolean; errorReason: string } return { recoverable: errObj.recoverable, errorReason: errObj.errorReason } } catch { return { recoverable: false, errorReason: error } } } else if (typeof error === 'object' && error !== null && 'recoverable' in error && 'errorReason' in error) { return { recoverable: error.recoverable, errorReason: error.errorReason } } else if (typeof error === 'object' && error.message) { return sanitizeSandboxError(error.message) } return { recoverable: false, errorReason: String(error) } } ================================================ FILE: apps/api/src/sandbox/utils/volume-mount-path-validation.util.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxVolume } from '../dto/sandbox.dto' /** * Validates mount paths for sandbox volumes to ensure they are safe and valid * @param volumes - Array of SandboxVolume objects to validate * @throws Error with descriptive message if any mount path is invalid */ export function validateMountPaths(volumes: SandboxVolume[]): void { const errors: string[] = [] for (const volume of volumes) { const value = volume.mountPath if (typeof value !== 'string') { errors.push(`Invalid mount path ${value} (must be a string)`) continue } if (!value.startsWith('/')) { errors.push(`Invalid mount path ${value} (must be absolute)`) continue } if (value === '/' || value === '//') { errors.push(`Invalid mount path ${value} (cannot mount to the root directory)`) continue } if (value.includes('/../') || value.includes('/./') || value.endsWith('/..') || value.endsWith('/.')) { errors.push(`Invalid mount path ${value} (cannot contain relative path components)`) continue } if (/\/\/+/.test(value.slice(1))) { errors.push(`Invalid mount path ${value} (cannot contain consecutive slashes)`) continue } const invalidPaths = ['/proc', '/sys', '/dev', '/boot', '/etc', '/bin', '/sbin', '/lib', '/lib64'] const matchedInvalid = invalidPaths.find((invalid) => value === invalid || value.startsWith(invalid + '/')) if (matchedInvalid) { errors.push(`Invalid mount path ${value} (cannot mount to system directory)`) } } if (errors.length > 0) { throw new Error(errors.join(', ')) } } /** * Validates subpaths for sandbox volumes to ensure they are safe S3 key prefixes * @param volumes - Array of SandboxVolume objects to validate * @throws Error with descriptive message if any subpath is invalid */ export function validateSubpaths(volumes: SandboxVolume[]): void { const errors: string[] = [] for (const volume of volumes) { const subpath = volume.subpath // Empty/undefined subpath is valid (means mount entire volume) if (!subpath) { continue } if (typeof subpath !== 'string') { errors.push(`Invalid subpath ${subpath} (must be a string)`) continue } // S3 keys should not start with / if (subpath.startsWith('/')) { errors.push(`Invalid subpath "${subpath}" (S3 key prefixes cannot start with /)`) continue } // Prevent path traversal if (subpath.includes('..')) { errors.push(`Invalid subpath "${subpath}" (cannot contain .. for security)`) continue } // No consecutive slashes if (subpath.includes('//')) { errors.push(`Invalid subpath "${subpath}" (cannot contain consecutive slashes)`) continue } } if (errors.length > 0) { throw new Error(errors.join(', ')) } } ================================================ FILE: apps/api/src/sandbox-telemetry/controllers/sandbox-telemetry.controller.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Get, Param, Query, UseGuards } from '@nestjs/common' import { ApiOAuth2, ApiResponse, ApiOperation, ApiParam, ApiTags, ApiHeader, ApiBearerAuth } from '@nestjs/swagger' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { OrganizationResourceActionGuard } from '../../organization/guards/organization-resource-action.guard' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' import { SandboxAccessGuard } from '../../sandbox/guards/sandbox-access.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { SandboxTelemetryService } from '../services/sandbox-telemetry.service' import { LogsQueryParamsDto, TelemetryQueryParamsDto, MetricsQueryParamsDto } from '../dto/telemetry-query-params.dto' import { PaginatedLogsDto } from '../dto/paginated-logs.dto' import { PaginatedTracesDto } from '../dto/paginated-traces.dto' import { TraceSpanDto } from '../dto/trace-span.dto' import { MetricsResponseDto } from '../dto/metrics-response.dto' import { RequireFlagsEnabled } from '@openfeature/nestjs-sdk' import { AnalyticsApiDisabledGuard } from '../guards/analytics-api-disabled.guard' @ApiTags('sandbox') @Controller('sandbox') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, OrganizationResourceActionGuard, AuthenticatedRateLimitGuard, AnalyticsApiDisabledGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class SandboxTelemetryController { constructor(private readonly sandboxTelemetryService: SandboxTelemetryService) {} @Get(':sandboxId/telemetry/logs') @ApiOperation({ summary: 'Get sandbox logs', operationId: 'getSandboxLogs', description: 'Retrieve OTEL logs for a sandbox within a time range', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Paginated list of log entries', type: PaginatedLogsDto, }) @UseGuards(SandboxAccessGuard) @RequireFlagsEnabled({ flags: [{ flagKey: 'organization_experiments', defaultValue: true }] }) async getSandboxLogs( @Param('sandboxId') sandboxId: string, @Query() queryParams: LogsQueryParamsDto, ): Promise { return this.sandboxTelemetryService.getLogs( sandboxId, queryParams.from, queryParams.to, queryParams.page ?? 1, queryParams.limit ?? 100, queryParams.severities, queryParams.search, ) } @Get(':sandboxId/telemetry/traces') @ApiOperation({ summary: 'Get sandbox traces', operationId: 'getSandboxTraces', description: 'Retrieve OTEL traces for a sandbox within a time range', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Paginated list of trace summaries', type: PaginatedTracesDto, }) @UseGuards(SandboxAccessGuard) @RequireFlagsEnabled({ flags: [{ flagKey: 'organization_experiments', defaultValue: true }] }) async getSandboxTraces( @Param('sandboxId') sandboxId: string, @Query() queryParams: TelemetryQueryParamsDto, ): Promise { return this.sandboxTelemetryService.getTraces( sandboxId, queryParams.from, queryParams.to, queryParams.page ?? 1, queryParams.limit ?? 100, ) } @Get(':sandboxId/telemetry/traces/:traceId') @ApiOperation({ summary: 'Get trace spans', operationId: 'getSandboxTraceSpans', description: 'Retrieve all spans for a specific trace', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiParam({ name: 'traceId', description: 'ID of the trace', type: 'string', }) @ApiResponse({ status: 200, description: 'List of spans in the trace', type: [TraceSpanDto], }) @UseGuards(SandboxAccessGuard) @RequireFlagsEnabled({ flags: [{ flagKey: 'organization_experiments', defaultValue: true }] }) async getSandboxTraceSpans( @Param('sandboxId') sandboxId: string, @Param('traceId') traceId: string, ): Promise { return this.sandboxTelemetryService.getTraceSpans(sandboxId, traceId) } @Get(':sandboxId/telemetry/metrics') @ApiOperation({ summary: 'Get sandbox metrics', operationId: 'getSandboxMetrics', description: 'Retrieve OTEL metrics for a sandbox within a time range', }) @ApiParam({ name: 'sandboxId', description: 'ID of the sandbox', type: 'string', }) @ApiResponse({ status: 200, description: 'Metrics time series data', type: MetricsResponseDto, }) @UseGuards(SandboxAccessGuard) @RequireFlagsEnabled({ flags: [{ flagKey: 'organization_experiments', defaultValue: true }] }) async getSandboxMetrics( @Param('sandboxId') sandboxId: string, @Query() queryParams: MetricsQueryParamsDto, ): Promise { return this.sandboxTelemetryService.getMetrics(sandboxId, queryParams.from, queryParams.to, queryParams.metricNames) } } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/index.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export * from './telemetry-query-params.dto' export * from './log-entry.dto' export * from './paginated-logs.dto' export * from './trace-summary.dto' export * from './trace-span.dto' export * from './paginated-traces.dto' export * from './metrics-response.dto' ================================================ FILE: apps/api/src/sandbox-telemetry/dto/log-entry.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'LogEntry' }) export class LogEntryDto { @ApiProperty({ description: 'Timestamp of the log entry' }) timestamp: string @ApiProperty({ description: 'Log message body' }) body: string @ApiProperty({ description: 'Severity level text (e.g., INFO, WARN, ERROR)' }) severityText: string @ApiPropertyOptional({ description: 'Severity level number' }) severityNumber?: number @ApiProperty({ description: 'Service name that generated the log' }) serviceName: string @ApiProperty({ type: 'object', description: 'Resource attributes from OTEL', additionalProperties: { type: 'string' }, }) resourceAttributes: Record @ApiProperty({ type: 'object', description: 'Log-specific attributes', additionalProperties: { type: 'string' } }) logAttributes: Record @ApiPropertyOptional({ description: 'Associated trace ID if available' }) traceId?: string @ApiPropertyOptional({ description: 'Associated span ID if available' }) spanId?: string } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/metrics-response.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'MetricDataPoint' }) export class MetricDataPointDto { @ApiProperty({ description: 'Timestamp of the data point' }) timestamp: string @ApiProperty({ description: 'Value at this timestamp' }) value: number } @ApiSchema({ name: 'MetricSeries' }) export class MetricSeriesDto { @ApiProperty({ description: 'Name of the metric' }) metricName: string @ApiProperty({ type: [MetricDataPointDto], description: 'Data points for this metric' }) dataPoints: MetricDataPointDto[] } @ApiSchema({ name: 'MetricsResponse' }) export class MetricsResponseDto { @ApiProperty({ type: [MetricSeriesDto], description: 'List of metric series' }) series: MetricSeriesDto[] } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/paginated-logs.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { LogEntryDto } from './log-entry.dto' @ApiSchema({ name: 'PaginatedLogs' }) export class PaginatedLogsDto { @ApiProperty({ type: [LogEntryDto], description: 'List of log entries' }) items: LogEntryDto[] @ApiProperty({ description: 'Total number of log entries matching the query' }) total: number @ApiProperty({ description: 'Current page number' }) page: number @ApiProperty({ description: 'Total number of pages' }) totalPages: number } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/paginated-traces.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { TraceSummaryDto } from './trace-summary.dto' @ApiSchema({ name: 'PaginatedTraces' }) export class PaginatedTracesDto { @ApiProperty({ type: [TraceSummaryDto], description: 'List of trace summaries' }) items: TraceSummaryDto[] @ApiProperty({ description: 'Total number of traces matching the query' }) total: number @ApiProperty({ description: 'Current page number' }) page: number @ApiProperty({ description: 'Total number of pages' }) totalPages: number } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/telemetry-query-params.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger' import { IsDateString, IsOptional, IsArray, IsString, IsNumber, Min } from 'class-validator' import { Type, Transform } from 'class-transformer' export class TelemetryQueryParamsDto { @ApiProperty({ type: String, format: 'date-time', description: 'Start of time range (ISO 8601)' }) @IsDateString() from: string @ApiProperty({ type: String, format: 'date-time', description: 'End of time range (ISO 8601)' }) @IsDateString() to: string @ApiPropertyOptional({ type: Number, default: 1, description: 'Page number (1-indexed)' }) @IsOptional() @Type(() => Number) @IsNumber() @Min(1) page?: number = 1 @ApiPropertyOptional({ type: Number, default: 100, description: 'Number of items per page' }) @IsOptional() @Type(() => Number) @IsNumber() @Min(1) limit?: number = 100 } export class LogsQueryParamsDto extends TelemetryQueryParamsDto { @ApiPropertyOptional({ type: [String], description: 'Filter by severity levels (DEBUG, INFO, WARN, ERROR)', }) @IsOptional() @IsArray() @IsString({ each: true }) @Transform(({ value }) => (Array.isArray(value) ? value : [value])) severities?: string[] @ApiPropertyOptional({ type: String, description: 'Search in log body' }) @IsOptional() @IsString() search?: string } export class MetricsQueryParamsDto { @ApiProperty({ type: String, format: 'date-time', description: 'Start of time range (ISO 8601)' }) @IsDateString() from: string @ApiProperty({ type: String, format: 'date-time', description: 'End of time range (ISO 8601)' }) @IsDateString() to: string @ApiPropertyOptional({ type: [String], description: 'Filter by metric names', }) @IsOptional() @IsArray() @IsString({ each: true }) @Transform(({ value }) => (Array.isArray(value) ? value : [value])) metricNames?: string[] } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/trace-span.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'TraceSpan' }) export class TraceSpanDto { @ApiProperty({ description: 'Trace identifier' }) traceId: string @ApiProperty({ description: 'Span identifier' }) spanId: string @ApiPropertyOptional({ description: 'Parent span identifier' }) parentSpanId?: string @ApiProperty({ description: 'Span name' }) spanName: string @ApiProperty({ description: 'Span start timestamp' }) timestamp: string @ApiProperty({ description: 'Span duration in nanoseconds' }) durationNs: number @ApiProperty({ type: 'object', description: 'Span attributes', additionalProperties: { type: 'string' } }) spanAttributes: Record @ApiPropertyOptional({ description: 'Status code of the span' }) statusCode?: string @ApiPropertyOptional({ description: 'Status message' }) statusMessage?: string } ================================================ FILE: apps/api/src/sandbox-telemetry/dto/trace-summary.dto.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'TraceSummary' }) export class TraceSummaryDto { @ApiProperty({ description: 'Unique trace identifier' }) traceId: string @ApiProperty({ description: 'Name of the root span' }) rootSpanName: string @ApiProperty({ description: 'Trace start time' }) startTime: string @ApiProperty({ description: 'Trace end time' }) endTime: string @ApiProperty({ description: 'Total duration in milliseconds' }) durationMs: number @ApiProperty({ description: 'Number of spans in this trace' }) spanCount: number @ApiPropertyOptional({ description: 'Status code of the trace' }) statusCode?: string } ================================================ FILE: apps/api/src/sandbox-telemetry/guards/analytics-api-disabled.guard.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CanActivate, Injectable, ForbiddenException } from '@nestjs/common' import { TypedConfigService } from '../../config/typed-config.service' @Injectable() export class AnalyticsApiDisabledGuard implements CanActivate { constructor(private readonly configService: TypedConfigService) {} canActivate(): boolean { if (this.configService.get('analyticsApiUrl')) { throw new ForbiddenException('Telemetry endpoints are disabled when Analytics API is configured') } return true } } ================================================ FILE: apps/api/src/sandbox-telemetry/index.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export * from './sandbox-telemetry.module' export * from './services/sandbox-telemetry.service' export * from './dto' ================================================ FILE: apps/api/src/sandbox-telemetry/sandbox-telemetry.module.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { SandboxTelemetryController } from './controllers/sandbox-telemetry.controller' import { SandboxTelemetryService } from './services/sandbox-telemetry.service' import { SandboxModule } from '../sandbox/sandbox.module' import { OrganizationModule } from '../organization/organization.module' @Module({ imports: [SandboxModule, OrganizationModule], controllers: [SandboxTelemetryController], providers: [SandboxTelemetryService], exports: [SandboxTelemetryService], }) export class SandboxTelemetryModule {} ================================================ FILE: apps/api/src/sandbox-telemetry/services/sandbox-telemetry.service.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { ClickHouseService } from '../../clickhouse/clickhouse.service' import { LogEntryDto } from '../dto/log-entry.dto' import { PaginatedLogsDto } from '../dto/paginated-logs.dto' import { TraceSummaryDto } from '../dto/trace-summary.dto' import { TraceSpanDto } from '../dto/trace-span.dto' import { PaginatedTracesDto } from '../dto/paginated-traces.dto' import { MetricsResponseDto, MetricSeriesDto, MetricDataPointDto } from '../dto/metrics-response.dto' interface ClickHouseLogRow { Timestamp: string Body: string SeverityText: string SeverityNumber: number ServiceName: string ResourceAttributes: Record LogAttributes: Record TraceId: string SpanId: string } interface ClickHouseTraceAggregateRow { TraceId: string startTime: string endTime: string spanCount: number rootSpanName: string totalDuration: number statusCode: string } interface ClickHouseSpanRow { TraceId: string SpanId: string ParentSpanId: string SpanName: string Timestamp: string Duration: number SpanAttributes: Record StatusCode: string StatusMessage: string } interface ClickHouseMetricRow { timestamp: string MetricName: string value: number } interface ClickHouseCountRow { count: number } @Injectable() export class SandboxTelemetryService { private readonly logger = new Logger(SandboxTelemetryService.name) constructor(private readonly clickhouseService: ClickHouseService) {} private getServiceName(sandboxId: string): string { return `sandbox-${sandboxId}` } isConfigured(): boolean { return this.clickhouseService.isConfigured() } async getLogs( sandboxId: string, from: string, to: string, page: number, limit: number, severities?: string[], search?: string, ): Promise { const serviceName = this.getServiceName(sandboxId) const offset = (page - 1) * limit // Build WHERE clause for optional filters let whereClause = `ServiceName = {serviceName:String} AND Timestamp >= {from:DateTime64} AND Timestamp <= {to:DateTime64}` if (severities && severities.length > 0) { whereClause += ` AND SeverityText IN ({severities:Array(String)})` } if (search) { whereClause += ` AND Body ILIKE {search:String}` } const params: Record = { serviceName, from: new Date(from), to: new Date(to), limit, offset, } if (severities && severities.length > 0) { params.severities = severities } if (search) { params.search = `%${search}%` } // Get total count const countQuery = ` SELECT count() as count FROM otel_logs WHERE ${whereClause} ` const countResult = await this.clickhouseService.query(countQuery, params) const total = countResult[0]?.count || 0 // Get paginated logs const logsQuery = ` SELECT Timestamp, Body, SeverityText, SeverityNumber, ServiceName, ResourceAttributes, LogAttributes, TraceId, SpanId FROM otel_logs WHERE ${whereClause} ORDER BY Timestamp DESC LIMIT {limit:UInt32} OFFSET {offset:UInt32} ` const rows = await this.clickhouseService.query(logsQuery, params) const items: LogEntryDto[] = rows.map((row) => ({ timestamp: row.Timestamp, body: row.Body, severityText: row.SeverityText, severityNumber: row.SeverityNumber, serviceName: row.ServiceName, resourceAttributes: row.ResourceAttributes || {}, logAttributes: row.LogAttributes || {}, traceId: row.TraceId || undefined, spanId: row.SpanId || undefined, })) return { items, total, page, totalPages: Math.ceil(total / limit), } } async getTraces( sandboxId: string, from: string, to: string, page: number, limit: number, ): Promise { const serviceName = this.getServiceName(sandboxId) const offset = (page - 1) * limit const params = { serviceName, from: new Date(from), to: new Date(to), limit, offset, } // Get total count of unique traces const countQuery = ` SELECT count(DISTINCT TraceId) as count FROM otel_traces WHERE ServiceName = {serviceName:String} AND Timestamp >= {from:DateTime64} AND Timestamp <= {to:DateTime64} ` const countResult = await this.clickhouseService.query(countQuery, params) const total = countResult[0]?.count || 0 // Get aggregated trace data const tracesQuery = ` SELECT TraceId, min(Timestamp) as startTime, max(Timestamp) as endTime, count() as spanCount, argMinIf(SpanName, Timestamp, ParentSpanId = '') as rootSpanName, max(Duration) as totalDuration, any(StatusCode) as statusCode FROM otel_traces WHERE ServiceName = {serviceName:String} AND Timestamp >= {from:DateTime64} AND Timestamp <= {to:DateTime64} GROUP BY TraceId ORDER BY startTime DESC LIMIT {limit:UInt32} OFFSET {offset:UInt32} ` const rows = await this.clickhouseService.query(tracesQuery, params) const items: TraceSummaryDto[] = rows.map((row) => ({ traceId: row.TraceId, rootSpanName: row.rootSpanName, startTime: row.startTime, endTime: row.endTime, durationMs: row.totalDuration / 1_000_000, // Convert nanoseconds to milliseconds spanCount: row.spanCount, statusCode: row.statusCode || undefined, })) return { items, total, page, totalPages: Math.ceil(total / limit), } } async getTraceSpans(sandboxId: string, traceId: string): Promise { const serviceName = this.getServiceName(sandboxId) const query = ` SELECT TraceId, SpanId, ParentSpanId, SpanName, Timestamp, Duration, SpanAttributes, StatusCode, StatusMessage FROM otel_traces WHERE TraceId = {traceId:String} AND ServiceName = {serviceName:String} ORDER BY Timestamp ASC ` const rows = await this.clickhouseService.query(query, { traceId, serviceName }) return rows.map((row) => ({ traceId: row.TraceId, spanId: row.SpanId, parentSpanId: row.ParentSpanId || undefined, spanName: row.SpanName, timestamp: row.Timestamp, durationNs: row.Duration, spanAttributes: row.SpanAttributes || {}, statusCode: row.StatusCode || undefined, statusMessage: row.StatusMessage || undefined, })) } async getMetrics(sandboxId: string, from: string, to: string, metricNames?: string[]): Promise { const serviceName = this.getServiceName(sandboxId) let whereClause = `ServiceName = {serviceName:String} AND TimeUnix >= {from:DateTime64} AND TimeUnix <= {to:DateTime64}` const params: Record = { serviceName, from: new Date(from), to: new Date(to), } if (metricNames && metricNames.length > 0) { whereClause += ` AND MetricName IN ({metricNames:Array(String)})` params.metricNames = metricNames } // Query gauge metrics with 1-minute intervals const gaugeQuery = ` SELECT toStartOfInterval(TimeUnix, INTERVAL 1 MINUTE) as timestamp, MetricName, avg(Value) as value FROM otel_metrics_gauge WHERE ${whereClause} GROUP BY timestamp, MetricName ORDER BY timestamp ASC ` const rows = await this.clickhouseService.query(gaugeQuery, params) // Group by metric name const seriesMap = new Map() for (const row of rows) { if (!seriesMap.has(row.MetricName)) { seriesMap.set(row.MetricName, []) } seriesMap.get(row.MetricName)!.push({ timestamp: row.timestamp, value: row.value, }) } const series: MetricSeriesDto[] = Array.from(seriesMap.entries()).map(([metricName, dataPoints]) => ({ metricName, dataPoints, })) return { series } } } ================================================ FILE: apps/api/src/tracing.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { NodeSDK } from '@opentelemetry/sdk-node' import { HttpInstrumentation } from '@opentelemetry/instrumentation-http' import { ExpressInstrumentation } from '@opentelemetry/instrumentation-express' import { NestInstrumentation } from '@opentelemetry/instrumentation-nestjs-core' import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base' import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http' import { CompressionAlgorithm, OTLPExporterNodeConfigBase } from '@opentelemetry/otlp-exporter-base' import { resourceFromAttributes } from '@opentelemetry/resources' import { ATTR_SERVICE_NAME } from '@opentelemetry/semantic-conventions' import { ATTR_DEPLOYMENT_ENVIRONMENT_NAME, ATTR_SERVICE_INSTANCE_ID, } from '@opentelemetry/semantic-conventions/incubating' import { IORedisInstrumentation } from '@opentelemetry/instrumentation-ioredis' import { PgInstrumentation } from '@opentelemetry/instrumentation-pg' import { KafkaJsInstrumentation } from '@opentelemetry/instrumentation-kafkajs' import { getAppMode } from './common/utils/app-mode' import { diag, DiagConsoleLogger, DiagLogLevel } from '@opentelemetry/api' import { hostname } from 'os' import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http' import { PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics' import { PinoInstrumentation } from '@opentelemetry/instrumentation-pino' import { RuntimeNodeInstrumentation } from '@opentelemetry/instrumentation-runtime-node' import { BatchLogRecordProcessor } from '@opentelemetry/sdk-logs' import { OTLPLogExporter } from '@opentelemetry/exporter-logs-otlp-http' // Enable OpenTelemetry diagnostics diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.WARN) const appMode = getAppMode() const serviceNameSuffix = appMode === 'api' ? 'api' : appMode === 'worker' ? 'worker' : 'api' const otlpExporterConfig: OTLPExporterNodeConfigBase = { compression: CompressionAlgorithm.GZIP, keepAlive: true, } const otelSdk = new NodeSDK({ resource: resourceFromAttributes({ [ATTR_SERVICE_NAME]: `daytona-${serviceNameSuffix}`, [ATTR_DEPLOYMENT_ENVIRONMENT_NAME]: process.env.ENVIRONMENT, [ATTR_SERVICE_INSTANCE_ID]: process.env.NODE_APP_INSTANCE ? `${hostname()}-${process.env.NODE_APP_INSTANCE}` : hostname(), }), instrumentations: [ new PinoInstrumentation(), new HttpInstrumentation({ requireParentforOutgoingSpans: true }), new ExpressInstrumentation(), new NestInstrumentation(), new IORedisInstrumentation({ requireParentSpan: true }), new PgInstrumentation({ requireParentSpan: true }), new KafkaJsInstrumentation(), new RuntimeNodeInstrumentation(), ], logRecordProcessors: [new BatchLogRecordProcessor(new OTLPLogExporter(otlpExporterConfig))], spanProcessors: [new BatchSpanProcessor(new OTLPTraceExporter(otlpExporterConfig))], metricReaders: [ new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter(otlpExporterConfig), exportIntervalMillis: 30 * 1000, }), ], }) export { otelSdk } process.on('SIGTERM', async () => { console.log('SIGTERM received, shutting down OpenTelemetry SDK') await otelSdk.shutdown() console.log('OpenTelemetry SDK shut down') }) ================================================ FILE: apps/api/src/usage/entities/sandbox-usage-period-archive.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, Entity, PrimaryGeneratedColumn } from 'typeorm' import { SandboxUsagePeriod } from './sandbox-usage-period.entity' // Duplicate of SandboxUsagePeriod // Used to archive usage periods and keep the original table lightweight // Will only contain closed usage periods @Entity('sandbox_usage_periods_archive') export class SandboxUsagePeriodArchive { @PrimaryGeneratedColumn('uuid') id: string @Column() sandboxId: string @Column() // Redundant property to optimize billing queries organizationId: string @Column({ type: 'timestamp with time zone' }) startAt: Date @Column({ type: 'timestamp with time zone' }) endAt: Date @Column({ type: 'float' }) cpu: number @Column({ type: 'float' }) gpu: number @Column({ type: 'float' }) mem: number @Column({ type: 'float' }) disk: number @Column() region: string public static fromUsagePeriod(usagePeriod: SandboxUsagePeriod) { const usagePeriodEntity = new SandboxUsagePeriodArchive() usagePeriodEntity.sandboxId = usagePeriod.sandboxId usagePeriodEntity.organizationId = usagePeriod.organizationId usagePeriodEntity.startAt = usagePeriod.startAt usagePeriodEntity.endAt = usagePeriod.endAt usagePeriodEntity.cpu = usagePeriod.cpu usagePeriodEntity.gpu = usagePeriod.gpu usagePeriodEntity.mem = usagePeriod.mem usagePeriodEntity.disk = usagePeriod.disk usagePeriodEntity.region = usagePeriod.region return usagePeriodEntity } } ================================================ FILE: apps/api/src/usage/entities/sandbox-usage-period.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, Entity, Index, PrimaryGeneratedColumn } from 'typeorm' @Entity('sandbox_usage_periods') @Index('idx_sandbox_usage_periods_sandbox_end', ['sandboxId', 'endAt']) export class SandboxUsagePeriod { @PrimaryGeneratedColumn('uuid') id: string @Column() sandboxId: string @Column() // Redundant property to optimize billing queries organizationId: string @Column({ type: 'timestamp with time zone' }) startAt: Date @Column({ type: 'timestamp with time zone', nullable: true }) endAt: Date | null @Column({ type: 'float' }) cpu: number @Column({ type: 'float' }) gpu: number @Column({ type: 'float' }) mem: number @Column({ type: 'float' }) disk: number @Column() region: string public static fromUsagePeriod(usagePeriod: SandboxUsagePeriod) { const usagePeriodEntity = new SandboxUsagePeriod() usagePeriodEntity.sandboxId = usagePeriod.sandboxId usagePeriodEntity.organizationId = usagePeriod.organizationId usagePeriodEntity.startAt = usagePeriod.startAt usagePeriodEntity.endAt = usagePeriod.endAt usagePeriodEntity.cpu = usagePeriod.cpu usagePeriodEntity.gpu = usagePeriod.gpu usagePeriodEntity.mem = usagePeriod.mem usagePeriodEntity.disk = usagePeriod.disk usagePeriodEntity.region = usagePeriod.region return usagePeriodEntity } } ================================================ FILE: apps/api/src/usage/services/usage.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnApplicationShutdown } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { IsNull, LessThan, Not, Repository } from 'typeorm' import { SandboxUsagePeriod } from '../entities/sandbox-usage-period.entity' import { OnEvent } from '@nestjs/event-emitter' import { SandboxStateUpdatedEvent } from '../../sandbox/events/sandbox-state-updated.event' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { SandboxEvents } from './../../sandbox/constants/sandbox-events.constants' import { Cron, CronExpression } from '@nestjs/schedule' import { RedisLockProvider } from '../../sandbox/common/redis-lock.provider' import { SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION } from '../../sandbox/constants/sandbox.constants' import { SandboxUsagePeriodArchive } from '../entities/sandbox-usage-period-archive.entity' import { TrackableJobExecutions } from '../../common/interfaces/trackable-job-executions' import { TrackJobExecution } from '../../common/decorators/track-job-execution.decorator' import { setTimeout as sleep } from 'timers/promises' import { LogExecution } from '../../common/decorators/log-execution.decorator' import { WithInstrumentation } from '../../common/decorators/otel.decorator' import { SandboxRepository } from '../../sandbox/repositories/sandbox.repository' @Injectable() export class UsageService implements TrackableJobExecutions, OnApplicationShutdown { activeJobs = new Set() private readonly logger = new Logger(UsageService.name) constructor( @InjectRepository(SandboxUsagePeriod) private sandboxUsagePeriodRepository: Repository, private readonly redisLockProvider: RedisLockProvider, private readonly sandboxRepository: SandboxRepository, ) {} async onApplicationShutdown() { // wait for all active jobs to finish while (this.activeJobs.size > 0) { this.logger.log(`Waiting for ${this.activeJobs.size} active jobs to finish`) await sleep(1000) } } @OnEvent(SandboxEvents.STATE_UPDATED) @TrackJobExecution() async handleSandboxStateUpdate(event: SandboxStateUpdatedEvent) { await this.waitForLock(event.sandbox.id) try { switch (event.newState) { case SandboxState.STARTED: { await this.closeUsagePeriod(event.sandbox.id) await this.createUsagePeriod(event) break } case SandboxState.STOPPING: await this.closeUsagePeriod(event.sandbox.id) await this.createUsagePeriod(event, true) break case SandboxState.ERROR: case SandboxState.BUILD_FAILED: case SandboxState.ARCHIVED: case SandboxState.DESTROYED: { await this.closeUsagePeriod(event.sandbox.id) break } } } finally { this.releaseLock(event.sandbox.id).catch((error) => { this.logger.error(`Error releasing lock for sandbox ${event.sandbox.id}`, error) }) } } private async createUsagePeriod(event: SandboxStateUpdatedEvent, diskOnly = false) { const usagePeriod = new SandboxUsagePeriod() usagePeriod.sandboxId = event.sandbox.id usagePeriod.startAt = new Date() usagePeriod.endAt = null if (!diskOnly) { usagePeriod.cpu = event.sandbox.cpu usagePeriod.gpu = event.sandbox.gpu usagePeriod.mem = event.sandbox.mem } else { usagePeriod.cpu = 0 usagePeriod.gpu = 0 usagePeriod.mem = 0 } usagePeriod.disk = event.sandbox.disk usagePeriod.organizationId = event.sandbox.organizationId usagePeriod.region = event.sandbox.region await this.sandboxUsagePeriodRepository.save(usagePeriod) } private async closeUsagePeriod(sandboxId: string) { const lastUsagePeriod = await this.sandboxUsagePeriodRepository.findOne({ where: { sandboxId, endAt: IsNull(), }, }) if (lastUsagePeriod) { lastUsagePeriod.endAt = new Date() await this.sandboxUsagePeriodRepository.save(lastUsagePeriod) } } @Cron(CronExpression.EVERY_MINUTE, { name: 'close-and-reopen-usage-periods' }) @TrackJobExecution() @LogExecution('close-and-reopen-usage-periods') @WithInstrumentation() async closeAndReopenUsagePeriods() { if (!(await this.redisLockProvider.lock('close-and-reopen-usage-periods', 60))) { return } const usagePeriods = await this.sandboxUsagePeriodRepository.find({ where: { endAt: IsNull(), // 1 day ago startAt: LessThan(new Date(Date.now() - 1000 * 60 * 60 * 24)), organizationId: Not(SANDBOX_WARM_POOL_UNASSIGNED_ORGANIZATION), }, order: { startAt: 'ASC', }, take: 100, }) for (const usagePeriod of usagePeriods) { if (!(await this.aquireLock(usagePeriod.sandboxId))) { continue } // validate that the usage period should remain active just in case try { const sandbox = await this.sandboxRepository.findOne({ where: { id: usagePeriod.sandboxId, }, }) await this.sandboxUsagePeriodRepository.manager.transaction(async (transactionalEntityManager) => { // Close usage period const closeTime = new Date() usagePeriod.endAt = closeTime await transactionalEntityManager.save(usagePeriod) if ( sandbox && (sandbox.state === SandboxState.STARTED || sandbox.state === SandboxState.STOPPED || sandbox.state === SandboxState.STOPPING) ) { // Create new usage period const newUsagePeriod = SandboxUsagePeriod.fromUsagePeriod(usagePeriod) newUsagePeriod.startAt = closeTime newUsagePeriod.endAt = null await transactionalEntityManager.save(newUsagePeriod) } }) } catch (error) { this.logger.error(`Error closing and reopening usage period ${usagePeriod.sandboxId}`, error) } finally { await this.releaseLock(usagePeriod.sandboxId) } } await this.redisLockProvider.unlock('close-and-reopen-usage-periods') } @Cron(CronExpression.EVERY_MINUTE, { name: 'archive-usage-periods' }) @TrackJobExecution() @LogExecution('archive-usage-periods') @WithInstrumentation() async archiveUsagePeriods() { const lockKey = 'archive-usage-periods' if (!(await this.redisLockProvider.lock(lockKey, 60))) { return } await this.sandboxUsagePeriodRepository.manager.transaction(async (transactionalEntityManager) => { const usagePeriods = await transactionalEntityManager.find(SandboxUsagePeriod, { where: { endAt: Not(IsNull()), }, order: { startAt: 'ASC', }, take: 1000, }) if (usagePeriods.length === 0) { return } this.logger.debug(`Found ${usagePeriods.length} usage periods to archive`) await transactionalEntityManager.delete( SandboxUsagePeriod, usagePeriods.map((usagePeriod) => usagePeriod.id), ) await transactionalEntityManager.save(usagePeriods.map(SandboxUsagePeriodArchive.fromUsagePeriod)) }) await this.redisLockProvider.unlock(lockKey) } private async waitForLock(sandboxId: string) { while (!(await this.aquireLock(sandboxId))) { await new Promise((resolve) => setTimeout(resolve, 500)) } } private async aquireLock(sandboxId: string): Promise { return await this.redisLockProvider.lock(`usage-period-${sandboxId}`, 60) } private async releaseLock(sandboxId: string) { await this.redisLockProvider.unlock(`usage-period-${sandboxId}`) } } ================================================ FILE: apps/api/src/usage/usage.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { DataSource } from 'typeorm' import { EventEmitter2 } from '@nestjs/event-emitter' import { SandboxUsagePeriod } from './entities/sandbox-usage-period.entity' import { UsageService } from './services/usage.service' import { RedisLockProvider } from '../sandbox/common/redis-lock.provider' import { SandboxUsagePeriodArchive } from './entities/sandbox-usage-period-archive.entity' import { SandboxRepository } from '../sandbox/repositories/sandbox.repository' import { SandboxLookupCacheInvalidationService } from '../sandbox/services/sandbox-lookup-cache-invalidation.service' import { Sandbox } from '../sandbox/entities/sandbox.entity' @Module({ imports: [TypeOrmModule.forFeature([SandboxUsagePeriod, Sandbox, SandboxUsagePeriodArchive])], providers: [ UsageService, RedisLockProvider, SandboxLookupCacheInvalidationService, { provide: SandboxRepository, inject: [DataSource, EventEmitter2, SandboxLookupCacheInvalidationService], useFactory: ( dataSource: DataSource, eventEmitter: EventEmitter2, sandboxLookupCacheInvalidationService: SandboxLookupCacheInvalidationService, ) => new SandboxRepository(dataSource, eventEmitter, sandboxLookupCacheInvalidationService), }, ], exports: [UsageService], }) export class UsageModule {} ================================================ FILE: apps/api/src/user/constants/acount-provider-display-name.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AccountProvider } from '../enums/account-provider.enum' export const ACCOUNT_PROVIDER_DISPLAY_NAME: Record = { [AccountProvider.GOOGLE]: 'Google', [AccountProvider.GITHUB]: 'GitHub', } ================================================ FILE: apps/api/src/user/constants/user-events.constant.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export const UserEvents = { CREATED: 'user.created', DELETED: 'user.deleted', EMAIL_VERIFIED: 'user.email-verified', } as const ================================================ FILE: apps/api/src/user/dto/account-provider.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString } from 'class-validator' @ApiSchema({ name: 'AccountProvider' }) export class AccountProviderDto { @ApiProperty() @IsString() name: string @ApiProperty() @IsString() displayName: string } ================================================ FILE: apps/api/src/user/dto/create-linked-account.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { IsString } from 'class-validator' @ApiSchema({ name: 'CreateLinkedAccount' }) export class CreateLinkedAccountDto { @ApiProperty({ description: 'The authentication provider of the secondary account', }) @IsString() provider: string @ApiProperty({ description: 'The user ID of the secondary account', }) @IsString() userId: string } ================================================ FILE: apps/api/src/user/dto/create-user.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsBoolean, IsEnum, IsOptional, IsString } from 'class-validator' import { SystemRole } from '../enums/system-role.enum' import { CreateOrganizationQuotaDto } from '../../organization/dto/create-organization-quota.dto' @ApiSchema({ name: 'CreateUser' }) export class CreateUserDto { @ApiProperty() @IsString() id: string @ApiProperty() @IsString() name: string @ApiPropertyOptional() @IsString() @IsOptional() email?: string @ApiPropertyOptional() @IsOptional() personalOrganizationQuota?: CreateOrganizationQuotaDto @ApiPropertyOptional() @IsString() @IsOptional() personalOrganizationDefaultRegionId?: string @ApiPropertyOptional({ enum: SystemRole, }) @IsEnum(SystemRole) @IsOptional() role?: SystemRole @ApiPropertyOptional() @IsBoolean() @IsOptional() emailVerified?: boolean } ================================================ FILE: apps/api/src/user/dto/update-user.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiPropertyOptional, ApiSchema } from '@nestjs/swagger' import { IsBoolean, IsEnum, IsOptional, IsString } from 'class-validator' import { SystemRole } from '../enums/system-role.enum' @ApiSchema({ name: 'UpdateUser' }) export class UpdateUserDto { @ApiPropertyOptional() @IsString() @IsOptional() name?: string @ApiPropertyOptional() @IsString() @IsOptional() email?: string @ApiPropertyOptional({ enum: SystemRole, }) @IsEnum(SystemRole) @IsOptional() role?: SystemRole @ApiPropertyOptional() @IsBoolean() @IsOptional() emailVerified?: boolean } ================================================ FILE: apps/api/src/user/dto/user-public-key.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { UserPublicKey } from '../user.entity' @ApiSchema({ name: 'UserPublicKey' }) export class UserPublicKeyDto { @ApiProperty({ description: 'Public key', }) key: string @ApiProperty({ description: 'Key name', }) name: string static fromUserPublicKey(publicKey: UserPublicKey): UserPublicKeyDto { const dto: UserPublicKeyDto = { key: publicKey.key, name: publicKey.name, } return dto } } ================================================ FILE: apps/api/src/user/dto/user.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { User } from '../user.entity' import { UserPublicKeyDto } from './user-public-key.dto' @ApiSchema({ name: 'User' }) export class UserDto { @ApiProperty({ description: 'User ID', }) id: string @ApiProperty({ description: 'User name', }) name: string @ApiProperty({ description: 'User email', }) email: string @ApiProperty({ description: 'User public keys', type: [UserPublicKeyDto], }) publicKeys: UserPublicKeyDto[] @ApiProperty({ description: 'Creation timestamp', }) createdAt: Date static fromUser(user: User): UserDto { const dto: UserDto = { id: user.id, name: user.name, email: user.email, publicKeys: user.publicKeys.map(UserPublicKeyDto.fromUserPublicKey), createdAt: user.createdAt, } return dto } } ================================================ FILE: apps/api/src/user/enums/account-provider.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum AccountProvider { GOOGLE = 'google-oauth2', GITHUB = 'github', } ================================================ FILE: apps/api/src/user/enums/system-role.enum.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum SystemRole { ADMIN = 'admin', USER = 'user', } ================================================ FILE: apps/api/src/user/events/user-created.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' import { User } from '../user.entity' import { CreateOrganizationQuotaDto } from '../../organization/dto/create-organization-quota.dto' export class UserCreatedEvent { constructor( public readonly entityManager: EntityManager, public readonly user: User, public readonly personalOrganizationQuota?: CreateOrganizationQuotaDto, public readonly personalOrganizationDefaultRegionId?: string, ) {} } ================================================ FILE: apps/api/src/user/events/user-deleted.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' export class UserDeletedEvent { constructor( public readonly entityManager: EntityManager, public readonly userId: string, ) {} } ================================================ FILE: apps/api/src/user/events/user-email-verified.event.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { EntityManager } from 'typeorm' export class UserEmailVerifiedEvent { constructor( public readonly entityManager: EntityManager, public readonly userId: string, ) {} } ================================================ FILE: apps/api/src/user/user.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BadRequestException, Body, Controller, Delete, ForbiddenException, Get, Logger, NotFoundException, Param, Post, UnauthorizedException, UseGuards, } from '@nestjs/common' import { User } from './user.entity' import { UserService } from './user.service' import { CreateUserDto } from './dto/create-user.dto' import { ApiOAuth2, ApiTags, ApiOperation, ApiResponse, ApiBearerAuth } from '@nestjs/swagger' import { CombinedAuthGuard } from '../auth/combined-auth.guard' import { AuthContext } from '../common/decorators/auth-context.decorator' import { AuthContext as IAuthContext } from '../common/interfaces/auth-context.interface' import { UserDto } from './dto/user.dto' import { SystemActionGuard } from '../auth/system-action.guard' import { RequiredSystemRole } from '../common/decorators/required-role.decorator' import { SystemRole } from './enums/system-role.enum' import { TypedConfigService } from '../config/typed-config.service' import axios from 'axios' import { AccountProviderDto } from './dto/account-provider.dto' import { ACCOUNT_PROVIDER_DISPLAY_NAME } from './constants/acount-provider-display-name.constant' import { AccountProvider } from './enums/account-provider.enum' import { CreateLinkedAccountDto } from './dto/create-linked-account.dto' import { Audit, TypedRequest } from '../audit/decorators/audit.decorator' import { AuditAction } from '../audit/enums/audit-action.enum' import { AuditTarget } from '../audit/enums/audit-target.enum' import { AuthenticatedRateLimitGuard } from '../common/guards/authenticated-rate-limit.guard' @ApiTags('users') @Controller('users') @UseGuards(CombinedAuthGuard, AuthenticatedRateLimitGuard, SystemActionGuard) @ApiOAuth2(['openid', 'profile', 'email']) @ApiBearerAuth() export class UserController { private readonly logger = new Logger(UserController.name) constructor( private readonly userService: UserService, private readonly configService: TypedConfigService, ) {} @Get('/me') @ApiOperation({ summary: 'Get authenticated user', operationId: 'getAuthenticatedUser', }) @ApiResponse({ status: 200, description: 'User details', type: UserDto, }) async getAuthenticatedUser(@AuthContext() authContext: IAuthContext): Promise { const user = await this.userService.findOne(authContext.userId) if (!user) { throw new NotFoundException(`User with ID ${authContext.userId} not found`) } return UserDto.fromUser(user) } @Post() @ApiOperation({ summary: 'Create user', operationId: 'createUser', }) @RequiredSystemRole(SystemRole.ADMIN) @Audit({ action: AuditAction.CREATE, targetType: AuditTarget.USER, targetIdFromResult: (result: User) => result?.id, requestMetadata: { body: (req: TypedRequest) => ({ id: req.body?.id, name: req.body?.name, email: req.body?.email, personalOrganizationQuota: req.body?.personalOrganizationQuota, role: req.body?.role, emailVerified: req.body?.emailVerified, }), }, }) async create(@Body() createUserDto: CreateUserDto): Promise { return this.userService.create(createUserDto) } @Get() @ApiOperation({ summary: 'List all users', operationId: 'listUsers', }) @RequiredSystemRole(SystemRole.ADMIN) async findAll(): Promise { return this.userService.findAll() } @Post('/:id/regenerate-key-pair') @ApiOperation({ summary: 'Regenerate user key pair', operationId: 'regenerateKeyPair', }) @RequiredSystemRole(SystemRole.ADMIN) @Audit({ action: AuditAction.REGENERATE_KEY_PAIR, targetType: AuditTarget.USER, targetIdFromRequest: (req) => req.params.id, }) async regenerateKeyPair(@Param('id') id: string): Promise { return this.userService.regenerateKeyPair(id) } @Get('/account-providers') @ApiOperation({ summary: 'Get available account providers', operationId: 'getAvailableAccountProviders', }) @ApiResponse({ status: 200, description: 'Available account providers', type: [AccountProviderDto], }) async getAvailableAccountProviders(): Promise { if (!this.configService.get('oidc.managementApi.enabled')) { this.logger.warn('OIDC Management API is not enabled') throw new NotFoundException() } const token = await this.getManagementApiToken() try { const response = await axios.get<{ name: string }[]>( `${this.configService.getOrThrow('oidc.issuer')}/api/v2/connections`, { headers: { Authorization: `Bearer ${token}`, }, }, ) const supportedProviders = new Set([AccountProvider.GOOGLE, AccountProvider.GITHUB]) const result: AccountProviderDto[] = response.data .filter((connection) => supportedProviders.has(connection.name as AccountProvider)) .map((connection) => ({ name: connection.name, displayName: ACCOUNT_PROVIDER_DISPLAY_NAME[connection.name as AccountProvider], })) return result } catch (error) { this.logger.error('Failed to get available account providers', error?.message || String(error)) throw new UnauthorizedException() } } @Post('/linked-accounts') @ApiOperation({ summary: 'Link account', operationId: 'linkAccount', }) @ApiResponse({ status: 204, description: 'Account linked successfully', }) @Audit({ action: AuditAction.LINK_ACCOUNT, requestMetadata: { body: (req: TypedRequest) => ({ provider: req.body?.provider, userId: req.body?.userId, }), }, }) async linkAccount( @AuthContext() authContext: IAuthContext, @Body() createLinkedAccountDto: CreateLinkedAccountDto, ): Promise { if (!this.configService.get('oidc.managementApi.enabled')) { this.logger.warn('OIDC Management API is not enabled') throw new NotFoundException() } const authenticatedUser = await this.userService.findOne(authContext.userId) if (!authenticatedUser.emailVerified) { throw new ForbiddenException('Please verify your email address') } const userToLinkId = `${createLinkedAccountDto.provider}|${createLinkedAccountDto.userId}` // Verify user doesn't already exist in our user table const userToLink = await this.userService.findOne(userToLinkId) if (userToLink) { throw new BadRequestException('This account is already associated with another user') } const token = await this.getManagementApiToken() // Verify account is eligible to be linked (must be reachable via OIDC Management API) try { await axios.get( `${this.configService.getOrThrow('oidc.issuer')}/api/v2/users/${encodeURIComponent(userToLinkId)}`, { headers: { Authorization: `Bearer ${token}`, }, }, ) } catch (error) { if (axios.isAxiosError(error) && error.response?.status === 404) { throw new BadRequestException('Account not found or already linked to another user') } throw error } // Link account try { await axios.post( `${this.configService.getOrThrow('oidc.issuer')}/api/v2/users/${authContext.userId}/identities`, { provider: createLinkedAccountDto.provider, user_id: createLinkedAccountDto.userId, }, { headers: { Authorization: `Bearer ${token}`, }, }, ) } catch (error) { this.logger.error('Failed to link account', error?.message || String(error)) throw new UnauthorizedException() } } @Delete('/linked-accounts/:provider/:providerUserId') @ApiOperation({ summary: 'Unlink account', operationId: 'unlinkAccount', }) @ApiResponse({ status: 204, description: 'Account unlinked successfully', }) @Audit({ action: AuditAction.UNLINK_ACCOUNT, requestMetadata: { params: (req) => ({ provider: req.params.provider, providerUserId: req.params.providerUserId, }), }, }) async unlinkAccount( @AuthContext() authContext: IAuthContext, @Param('provider') provider: string, @Param('providerUserId') providerUserId: string, ): Promise { if (!this.configService.get('oidc.managementApi.enabled')) { this.logger.warn('OIDC Management API is not enabled') throw new NotFoundException() } const token = await this.getManagementApiToken() try { await axios.delete( `${this.configService.getOrThrow('oidc.issuer')}/api/v2/users/${authContext.userId}/identities/${provider}/${providerUserId}`, { headers: { Authorization: `Bearer ${token}`, }, }, ) } catch (error) { this.logger.error('Failed to unlink account', error?.message || String(error)) throw new UnauthorizedException() } } @Post('/mfa/sms/enroll') @ApiOperation({ summary: 'Enroll in SMS MFA', operationId: 'enrollInSmsMfa', }) @ApiResponse({ status: 200, description: 'SMS MFA enrollment URL', type: String, }) async enrollInSmsMfa(@AuthContext() authContext: IAuthContext): Promise { if (!this.configService.get('oidc.managementApi.enabled')) { this.logger.warn('OIDC Management API is not enabled') throw new NotFoundException() } const token = await this.getManagementApiToken() try { const response = await axios.post( `${this.configService.getOrThrow('oidc.issuer')}/api/v2/guardian/enrollments/ticket`, { user_id: authContext.userId, }, { headers: { Authorization: `Bearer ${token}`, }, }, ) return response.data.ticket_url } catch (error) { this.logger.error('Failed to enable SMS MFA', error?.message || String(error)) throw new UnauthorizedException() } } @Get('/:id') @ApiOperation({ summary: 'Get user by ID', operationId: 'getUser', }) @ApiResponse({ status: 200, description: 'User details', type: UserDto, }) @RequiredSystemRole(SystemRole.ADMIN) async getUserById(@Param('id') id: string): Promise { const user = await this.userService.findOne(id) if (!user) { throw new NotFoundException(`User with ID ${id} not found`) } return UserDto.fromUser(user) } private async getManagementApiToken(): Promise { try { const tokenResponse = await axios.post(`${this.configService.getOrThrow('oidc.issuer')}/oauth/token`, { grant_type: 'client_credentials', client_id: this.configService.getOrThrow('oidc.managementApi.clientId'), client_secret: this.configService.getOrThrow('oidc.managementApi.clientSecret'), audience: this.configService.getOrThrow('oidc.managementApi.audience'), }) return tokenResponse.data.access_token } catch (error) { this.logger.error('Failed to get OIDC Management API token', error?.message || String(error)) throw new UnauthorizedException() } } } ================================================ FILE: apps/api/src/user/user.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Column, CreateDateColumn, Entity, PrimaryColumn } from 'typeorm' import { SystemRole } from './enums/system-role.enum' export interface UserSSHKeyPair { privateKey: string publicKey: string } export interface UserPublicKey { key: string name: string } @Entity() export class User { @PrimaryColumn() id: string @Column() name: string @Column({ default: '', }) email: string @Column({ default: false, }) emailVerified: boolean @Column({ type: 'simple-json', nullable: true, }) keyPair: UserSSHKeyPair @Column('simple-json') publicKeys: UserPublicKey[] @Column({ type: 'enum', enum: SystemRole, default: SystemRole.USER, }) role: SystemRole @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date } ================================================ FILE: apps/api/src/user/user.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { UserController } from './user.controller' import { UserService } from './user.service' import { TypeOrmModule } from '@nestjs/typeorm' import { User } from './user.entity' @Module({ imports: [TypeOrmModule.forFeature([User])], controllers: [UserController], providers: [UserService], exports: [UserService], }) export class UserModule {} ================================================ FILE: apps/api/src/user/user.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, NotFoundException } from '@nestjs/common' import { InjectRepository } from '@nestjs/typeorm' import { User, UserSSHKeyPair } from './user.entity' import { DataSource, ILike, In, Repository } from 'typeorm' import { CreateUserDto } from './dto/create-user.dto' import * as crypto from 'crypto' import * as forge from 'node-forge' import { EventEmitter2 } from '@nestjs/event-emitter' import { UserEvents } from './constants/user-events.constant' import { UpdateUserDto } from './dto/update-user.dto' import { UserCreatedEvent } from './events/user-created.event' import { UserDeletedEvent } from './events/user-deleted.event' import { UserEmailVerifiedEvent } from './events/user-email-verified.event' @Injectable() export class UserService { constructor( @InjectRepository(User) private readonly userRepository: Repository, private readonly eventEmitter: EventEmitter2, private readonly dataSource: DataSource, ) {} async create(createUserDto: CreateUserDto): Promise { let user = new User() user.id = createUserDto.id user.name = createUserDto.name const keyPair = await this.generatePrivateKey() user.keyPair = keyPair user.publicKeys = [] user.emailVerified = createUserDto.emailVerified if (createUserDto.email) { user.email = createUserDto.email } if (createUserDto.role) { user.role = createUserDto.role } await this.dataSource.transaction(async (em) => { user = await em.save(user) await this.eventEmitter.emitAsync( UserEvents.CREATED, new UserCreatedEvent( em, user, createUserDto.personalOrganizationQuota, createUserDto.personalOrganizationDefaultRegionId, ), ) }) return user } async findAll(): Promise { return this.userRepository.find() } async findByIds(ids: string[]): Promise { if (ids.length === 0) { return [] } return this.userRepository.find({ where: { id: In(ids), }, }) } async findOne(id: string): Promise { return this.userRepository.findOne({ where: { id } }) } async findOneOrFail(id: string): Promise { return this.userRepository.findOneOrFail({ where: { id } }) } async findOneByEmail(email: string, ignoreCase = false): Promise { return this.userRepository.findOne({ where: { email: ignoreCase ? ILike(email) : email, }, }) } async remove(id: string): Promise { await this.dataSource.transaction(async (em) => { await em.delete(User, id) await this.eventEmitter.emitAsync(UserEvents.DELETED, new UserDeletedEvent(em, id)) }) } async regenerateKeyPair(id: string): Promise { const user = await this.userRepository.findOneBy({ id: id }) const keyPair = await this.generatePrivateKey() user.keyPair = keyPair return this.userRepository.save(user) } private generatePrivateKey(): Promise { const comment = 'daytona' return new Promise((resolve, reject) => { crypto.generateKeyPair( 'rsa', { modulusLength: 4096, publicKeyEncoding: { type: 'pkcs1', format: 'pem', }, privateKeyEncoding: { type: 'pkcs1', format: 'pem', }, }, (error, publicKey, privateKey) => { if (error) { reject(error) } else { const publicKeySShEncoded = forge.ssh.publicKeyToOpenSSH(forge.pki.publicKeyFromPem(publicKey), comment) const privateKeySShEncoded = forge.ssh.privateKeyToOpenSSH(forge.pki.privateKeyFromPem(privateKey)) resolve({ publicKey: publicKeySShEncoded, privateKey: privateKeySShEncoded, }) } }, ) }) } // TODO: discuss if we need separate methods for updating specific fields async update(userId: string, updateUserDto: UpdateUserDto): Promise { const user = await this.userRepository.findOne({ where: { id: userId, }, }) if (!user) { throw new NotFoundException(`User with ID ${userId} not found.`) } if (updateUserDto.name) { user.name = updateUserDto.name } if (updateUserDto.email) { user.email = updateUserDto.email } if (updateUserDto.role) { user.role = updateUserDto.role } if (updateUserDto.emailVerified) { user.emailVerified = updateUserDto.emailVerified await this.dataSource.transaction(async (em) => { await em.save(user) await this.eventEmitter.emitAsync(UserEvents.EMAIL_VERIFIED, new UserEmailVerifiedEvent(em, user.id)) }) } return this.userRepository.save(user) } } ================================================ FILE: apps/api/src/webhook/README.md ================================================ # Webhook Service This service provides webhook functionality using [Svix](https://svix.com) as the webhook delivery provider. It automatically creates Svix applications for new organizations and sends webhooks for various events. ## Configuration Set the following environment variables: ```bash # Required: Your Svix authentication token SVIX_AUTH_TOKEN=your_svix_auth_token_here # Optional: Custom Svix server URL (for self-hosted instances) SVIX_SERVER_URL=https://your-svix-instance.com ``` ## API Endpoints ### Get App Portal Access ```http POST /api/webhooks/organizations/{organizationId}/app-portal-access ``` **Response:** ```json { "url": "https://app.svix.com/consumer/..." } ``` Returns a URL that provides access to the Svix Consumer App Portal for managing webhook endpoints, viewing delivery attempts, and monitoring webhook performance. ### Send Custom Webhook ```http POST /api/webhooks/organizations/{organizationId}/send ``` **Request Body:** ```json { "eventType": "custom.event", "payload": { "message": "Hello from Daytona!", "timestamp": "2025-01-01T00:00:00.000Z" }, "eventId": "optional-unique-id" } ``` Sends a custom webhook message to all configured endpoints for the specified organization. ### Get Message Delivery Attempts ```http GET /api/webhooks/organizations/{organizationId}/messages/{messageId}/attempts ``` **Response:** ```json [ { "id": "msg_attempt_123", "status": 200, "response": "OK", "timestamp": "2025-01-01T00:00:00.000Z" } ] ``` Returns the delivery attempts for a specific webhook message, including delivery status and response details. ### Get Service Status ```http GET /api/webhooks/status ``` **Response:** ```json { "enabled": true } ``` Returns the current status of the webhook service, indicating whether it's properly configured and enabled. ## Automatic Events The service automatically sends webhooks for the following events: ### Sandbox Events - `sandbox.created` - When a sandbox is created - `sandbox.state.updated` - When sandbox state changes ### Snapshot Events - `snapshot.created` - When a snapshot is created - `snapshot.state.updated` - When snapshot state changes - `snapshot.removed` - When a snapshot is removed ### Volume Events - `volume.created` - When a volume is created - `volume.state.updated` - When volume state changes ## Webhook Payload Format All webhooks include event-specific data relevant to the resource being updated. ### Example Sandbox Created Payload ```json { "id": "sandbox-uuid", "organizationId": "org-uuid", "state": "STARTED", "class": "SMALL", "createdAt": "2025-01-01T00:00:00.000Z" } ``` ### Example Sandbox State Updated Payload ```json { "id": "sandbox-uuid", "organizationId": "org-uuid", "oldState": "STOPPED", "newState": "STARTED", "updatedAt": "2025-01-01T00:00:00.000Z" } ``` ### Example Snapshot Created Payload ```json { "id": "snapshot-uuid", "name": "my-snapshot", "organizationId": "org-uuid", "state": "ACTIVE", "createdAt": "2025-01-01T00:00:00.000Z" } ``` ### Example Volume State Updated Payload ```json { "id": "volume-uuid", "name": "my-volume", "organizationId": "org-uuid", "oldState": "CREATING", "newState": "READY", "updatedAt": "2025-01-01T00:00:00.000Z" } ``` ## Development ### Adding New Event Types 1. Add the event type to `webhook-events.constants.ts` 2. Create an event handler in `webhook-event-handler.service.ts` 3. Use the `@OnEvent()` decorator to listen for the event 4. Define the payload structure for the new event type 5. Add the events to the `openapi-webhooks.ts` 6. Generate the openapi spec 7. Upload the new schema to the Svix dashboard ### Testing Use the Svix Play webhook debugger during development: 1. Set up a webhook endpoint pointing to your Svix Play URL 2. Send test webhooks using the `/send` endpoint 3. Check the Svix dashboard for delivery status 4. Monitor delivery attempts through the API ### Local Development For local development without Svix: 1. Set `SVIX_AUTH_TOKEN` to an empty string or invalid value 2. The service will log warnings but continue to function 3. Event handlers will skip webhook delivery when disabled 4. Use the status endpoint to verify configuration ## Dependencies - `svix` - Official Svix JavaScript SDK - `@nestjs/event-emitter` - Event handling - `@nestjs/common` - Core NestJS functionality ### Event Flow 1. System event occurs (e.g., sandbox created) 2. Event emitter publishes the event 3. Webhook event handler catches the event 4. Handler calls webhook service to send webhook 5. Service delivers webhook through Svix 6. Delivery status is tracked and available via API ================================================ FILE: apps/api/src/webhook/constants/webhook-events.constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export enum WebhookEvent { SANDBOX_CREATED = 'sandbox.created', SANDBOX_STATE_UPDATED = 'sandbox.state.updated', SNAPSHOT_CREATED = 'snapshot.created', SNAPSHOT_STATE_UPDATED = 'snapshot.state.updated', SNAPSHOT_REMOVED = 'snapshot.removed', VOLUME_CREATED = 'volume.created', VOLUME_STATE_UPDATED = 'volume.state.updated', } ================================================ FILE: apps/api/src/webhook/controllers/webhook.controller.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Controller, Post, Get, Body, Param, UseGuards, HttpStatus, NotFoundException } from '@nestjs/common' import { ApiTags, ApiOperation, ApiResponse, ApiBearerAuth, ApiHeader } from '@nestjs/swagger' import { WebhookService } from '../services/webhook.service' import { SendWebhookDto } from '../dto/send-webhook.dto' import { CombinedAuthGuard } from '../../auth/combined-auth.guard' import { CustomHeaders } from '../../common/constants/header.constants' import { SystemActionGuard } from '../../auth/system-action.guard' import { OrganizationAccessGuard } from '../../organization/guards/organization-access.guard' import { RequiredSystemRole } from '../../common/decorators/required-role.decorator' import { SystemRole } from '../../user/enums/system-role.enum' import { Audit, TypedRequest } from '../../audit/decorators/audit.decorator' import { AuditAction } from '../../audit/enums/audit-action.enum' import { AuditTarget } from '../../audit/enums/audit-target.enum' import { OrganizationService } from '../../organization/services/organization.service' import { WebhookAppPortalAccessDto } from '../dto/webhook-app-portal-access.dto' import { WebhookInitializationStatusDto } from '../dto/webhook-initialization-status.dto' import { AuthenticatedRateLimitGuard } from '../../common/guards/authenticated-rate-limit.guard' @ApiTags('webhooks') @Controller('webhooks') @ApiHeader(CustomHeaders.ORGANIZATION_ID) @UseGuards(CombinedAuthGuard, SystemActionGuard, OrganizationAccessGuard, AuthenticatedRateLimitGuard) @ApiBearerAuth() export class WebhookController { constructor( private readonly organizationService: OrganizationService, private readonly webhookService: WebhookService, ) {} @Post('organizations/:organizationId/app-portal-access') @ApiOperation({ summary: 'Get Svix Consumer App Portal access for an organization' }) @ApiResponse({ status: HttpStatus.OK, description: 'App Portal access generated successfully', type: WebhookAppPortalAccessDto, }) async getAppPortalAccess(@Param('organizationId') organizationId: string): Promise { return this.webhookService.getAppPortalAccess(organizationId) } @Post('organizations/:organizationId/send') @ApiOperation({ summary: 'Send a webhook message to an organization' }) @ApiResponse({ status: HttpStatus.OK, description: 'Webhook message sent successfully', }) @RequiredSystemRole(SystemRole.ADMIN) @Audit({ action: AuditAction.SEND_WEBHOOK_MESSAGE, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, requestMetadata: { body: (req: TypedRequest) => ({ eventType: req.body?.eventType, payload: req.body?.payload, eventId: req.body?.eventId, }), }, }) async sendWebhook( @Param('organizationId') organizationId: string, @Body() sendWebhookDto: SendWebhookDto, ): Promise { await this.webhookService.sendWebhook( organizationId, sendWebhookDto.eventType, sendWebhookDto.payload, sendWebhookDto.eventId, ) } @Get('organizations/:organizationId/messages/:messageId/attempts') @ApiOperation({ summary: 'Get delivery attempts for a webhook message' }) @ApiResponse({ status: HttpStatus.OK, description: 'List of delivery attempts', type: [Object], }) @RequiredSystemRole(SystemRole.ADMIN) async getMessageAttempts( @Param('organizationId') organizationId: string, @Param('messageId') messageId: string, ): Promise { return this.webhookService.getMessageAttempts(organizationId, messageId) } @Get('status') @ApiOperation({ summary: 'Get webhook service status' }) @ApiResponse({ status: HttpStatus.OK, description: 'Webhook service status', schema: { type: 'object', properties: { enabled: { type: 'boolean' }, }, }, }) @RequiredSystemRole(SystemRole.ADMIN) async getStatus(): Promise<{ enabled: boolean }> { return { enabled: this.webhookService.isEnabled(), } } @Get('organizations/:organizationId/initialization-status') @ApiOperation({ summary: 'Get webhook initialization status for an organization' }) @ApiResponse({ status: HttpStatus.OK, description: 'Webhook initialization status', type: WebhookInitializationStatusDto, }) @ApiResponse({ status: HttpStatus.NOT_FOUND, description: 'Webhook initialization status not found', }) async getInitializationStatus( @Param('organizationId') organizationId: string, ): Promise { const status = await this.webhookService.getInitializationStatus(organizationId) if (!status) { throw new NotFoundException('Webhook initialization status not found') } return WebhookInitializationStatusDto.fromWebhookInitialization(status) } @Post('organizations/:organizationId/initialize') @ApiOperation({ summary: 'Initialize webhooks for an organization' }) @ApiResponse({ status: HttpStatus.CREATED, description: 'Webhooks initialized successfully', }) @ApiResponse({ status: HttpStatus.FORBIDDEN, description: 'User does not have access to this organization', }) @ApiResponse({ status: HttpStatus.NOT_FOUND, description: 'Organization not found', }) @RequiredSystemRole(SystemRole.ADMIN) @Audit({ action: AuditAction.INITIALIZE_WEBHOOKS, targetType: AuditTarget.ORGANIZATION, targetIdFromRequest: (req) => req.params.organizationId, }) async initializeWebhooks(@Param('organizationId') organizationId: string): Promise { const organization = await this.organizationService.findOne(organizationId) if (!organization) { throw new NotFoundException('Organization not found') } await this.webhookService.createSvixApplication(organization) } } ================================================ FILE: apps/api/src/webhook/dto/send-webhook.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { IsString, IsObject, IsOptional, IsEnum } from 'class-validator' import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger' import { WebhookEvent } from '../constants/webhook-events.constants' export class SendWebhookDto { @ApiProperty({ description: 'The type of event being sent', enum: WebhookEvent, enumName: 'WebhookEvent', example: 'sandbox.created', }) @IsEnum(WebhookEvent) eventType: WebhookEvent @ApiProperty({ description: 'The payload data to send', example: { id: 'sandbox-123', name: 'My Sandbox' }, }) @IsObject() payload: Record @ApiPropertyOptional({ description: 'Optional event ID for idempotency', example: 'evt_1234567890abcdef', }) @IsOptional() @IsString() eventId?: string } ================================================ FILE: apps/api/src/webhook/dto/webhook-app-portal-access.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' @ApiSchema({ name: 'WebhookAppPortalAccess' }) export class WebhookAppPortalAccessDto { @ApiProperty({ description: 'The authentication token for the Svix consumer app portal', example: 'appsk_...', }) token: string @ApiProperty({ description: 'The URL to the webhook app portal', example: 'https://app.svix.com/app_1234567890', }) url: string } ================================================ FILE: apps/api/src/webhook/dto/webhook-event-payloads.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { WebhookEvent } from '../constants/webhook-events.constants' import { SandboxState } from '../../sandbox/enums/sandbox-state.enum' import { SandboxClass } from '../../sandbox/enums/sandbox-class.enum' import { SnapshotState } from '../../sandbox/enums/snapshot-state.enum' import { VolumeState } from '../../sandbox/enums/volume-state.enum' import { SandboxCreatedEvent } from '../../sandbox/events/sandbox-create.event' import { SandboxStateUpdatedEvent } from '../../sandbox/events/sandbox-state-updated.event' import { SnapshotCreatedEvent } from '../../sandbox/events/snapshot-created.event' import { SnapshotStateUpdatedEvent } from '../../sandbox/events/snapshot-state-updated.event' import { SnapshotRemovedEvent } from '../../sandbox/events/snapshot-removed.event' import { VolumeCreatedEvent } from '../../sandbox/events/volume-created.event' import { VolumeStateUpdatedEvent } from '../../sandbox/events/volume-state-updated.event' export abstract class BaseWebhookEventDto { @ApiProperty({ description: 'Event type identifier', enum: WebhookEvent, enumName: 'WebhookEvent', example: 'sandbox.created', }) event: string @ApiProperty({ description: 'Timestamp when the event occurred', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) timestamp: string } @ApiSchema({ name: 'SandboxCreatedWebhook' }) export class SandboxCreatedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Sandbox ID', example: 'sandbox123', }) id: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'Sandbox state', enum: SandboxState, enumName: 'SandboxState', }) state: SandboxState @ApiProperty({ description: 'Sandbox class', enum: SandboxClass, enumName: 'SandboxClass', }) class: SandboxClass @ApiProperty({ description: 'When the sandbox was created', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) createdAt: string static fromEvent(event: SandboxCreatedEvent, eventType: string): SandboxCreatedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.sandbox.id, organizationId: event.sandbox.organizationId, state: event.sandbox.state, class: event.sandbox.class, createdAt: event.sandbox.createdAt.toISOString(), } } } @ApiSchema({ name: 'SandboxStateUpdatedWebhook' }) export class SandboxStateUpdatedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Sandbox ID', example: 'sandbox123', }) id: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'Previous state', enum: SandboxState, enumName: 'SandboxState', }) oldState: SandboxState @ApiProperty({ description: 'New state', enum: SandboxState, enumName: 'SandboxState', }) newState: SandboxState @ApiProperty({ description: 'When the sandbox was last updated', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) updatedAt: string static fromEvent(event: SandboxStateUpdatedEvent, eventType: string): SandboxStateUpdatedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.sandbox.id, organizationId: event.sandbox.organizationId, oldState: event.oldState, newState: event.newState, updatedAt: event.sandbox.updatedAt.toISOString(), } } } @ApiSchema({ name: 'SnapshotCreatedWebhook' }) export class SnapshotCreatedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Snapshot ID', example: 'snapshot123', }) id: string @ApiProperty({ description: 'Snapshot name', example: 'my-snapshot', }) name: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'Snapshot state', enum: SnapshotState, enumName: 'SnapshotState', }) state: SnapshotState @ApiProperty({ description: 'When the snapshot was created', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) createdAt: string static fromEvent(event: SnapshotCreatedEvent, eventType: string): SnapshotCreatedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.snapshot.id, name: event.snapshot.name, organizationId: event.snapshot.organizationId, state: event.snapshot.state, createdAt: event.snapshot.createdAt.toISOString(), } } } @ApiSchema({ name: 'SnapshotStateUpdatedWebhook' }) export class SnapshotStateUpdatedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Snapshot ID', example: 'snapshot123', }) id: string @ApiProperty({ description: 'Snapshot name', example: 'my-snapshot', }) name: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'Previous state', enum: SnapshotState, enumName: 'SnapshotState', }) oldState: SnapshotState @ApiProperty({ description: 'New state', enum: SnapshotState, enumName: 'SnapshotState', }) newState: SnapshotState @ApiProperty({ description: 'When the snapshot was last updated', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) updatedAt: string static fromEvent(event: SnapshotStateUpdatedEvent, eventType: string): SnapshotStateUpdatedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.snapshot.id, name: event.snapshot.name, organizationId: event.snapshot.organizationId, oldState: event.oldState, newState: event.newState, updatedAt: event.snapshot.updatedAt.toISOString(), } } } @ApiSchema({ name: 'SnapshotRemovedWebhook' }) export class SnapshotRemovedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Snapshot ID', example: 'snapshot123', }) id: string @ApiProperty({ description: 'Snapshot name', example: 'my-snapshot', }) name: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'When the snapshot was removed', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) removedAt: string static fromEvent(event: SnapshotRemovedEvent, eventType: string): SnapshotRemovedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.snapshot.id, name: event.snapshot.name, organizationId: event.snapshot.organizationId, removedAt: new Date().toISOString(), } } } @ApiSchema({ name: 'VolumeCreatedWebhook' }) export class VolumeCreatedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Volume ID', example: 'vol-12345678', }) id: string @ApiProperty({ description: 'Volume name', example: 'my-volume', }) name: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'Volume state', enum: VolumeState, enumName: 'VolumeState', }) state: VolumeState @ApiProperty({ description: 'When the volume was created', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) createdAt: string static fromEvent(event: VolumeCreatedEvent, eventType: string): VolumeCreatedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.volume.id, name: event.volume.name, organizationId: event.volume.organizationId, state: event.volume.state, createdAt: event.volume.createdAt.toISOString(), } } } @ApiSchema({ name: 'VolumeStateUpdatedWebhook' }) export class VolumeStateUpdatedWebhookDto extends BaseWebhookEventDto { @ApiProperty({ description: 'Volume ID', example: 'vol-12345678', }) id: string @ApiProperty({ description: 'Volume name', example: 'my-volume', }) name: string @ApiProperty({ description: 'Organization ID', example: 'org123', }) organizationId: string @ApiProperty({ description: 'Previous state', enum: VolumeState, enumName: 'VolumeState', }) oldState: VolumeState @ApiProperty({ description: 'New state', enum: VolumeState, enumName: 'VolumeState', }) newState: VolumeState @ApiProperty({ description: 'When the volume was last updated', example: '2025-12-19T10:30:00.000Z', format: 'date-time', }) updatedAt: string static fromEvent(event: VolumeStateUpdatedEvent, eventType: string): VolumeStateUpdatedWebhookDto { return { event: eventType, timestamp: new Date().toISOString(), id: event.volume.id, name: event.volume.name, organizationId: event.volume.organizationId, oldState: event.oldState, newState: event.newState, updatedAt: event.volume.updatedAt.toISOString(), } } } ================================================ FILE: apps/api/src/webhook/dto/webhook-initialization-status.dto.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ApiProperty, ApiSchema } from '@nestjs/swagger' import { WebhookInitialization } from '../entities/webhook-initialization.entity' @ApiSchema({ name: 'WebhookInitializationStatus' }) export class WebhookInitializationStatusDto { @ApiProperty({ description: 'Organization ID', example: '123e4567-e89b-12d3-a456-426614174000', }) organizationId: string @ApiProperty({ description: 'The ID of the Svix application', example: 'app_1234567890', nullable: true, }) svixApplicationId?: string @ApiProperty({ description: 'The error reason for the last initialization attempt', example: 'Failed to create Svix application', nullable: true, }) lastError?: string @ApiProperty({ description: 'The number of times the initialization has been attempted', example: 3, }) retryCount: number @ApiProperty({ description: 'When the webhook initialization was created', example: '2023-01-01T00:00:00.000Z', }) createdAt: string @ApiProperty({ description: 'When the webhook initialization was last updated', example: '2023-01-01T00:00:00.000Z', }) updatedAt: string static fromWebhookInitialization(webhookInitialization: WebhookInitialization): WebhookInitializationStatusDto { return { organizationId: webhookInitialization.organizationId, svixApplicationId: webhookInitialization.svixApplicationId, lastError: webhookInitialization.lastError, retryCount: webhookInitialization.retryCount, createdAt: webhookInitialization.createdAt.toISOString(), updatedAt: webhookInitialization.updatedAt.toISOString(), } } } ================================================ FILE: apps/api/src/webhook/entities/webhook-initialization.entity.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Entity, PrimaryColumn, CreateDateColumn, UpdateDateColumn, Column } from 'typeorm' @Entity() export class WebhookInitialization { @PrimaryColumn() organizationId: string @Column({ nullable: true, }) svixApplicationId?: string @Column({ type: 'text', nullable: true, }) lastError?: string @Column({ type: 'int', default: 0, }) retryCount: number @CreateDateColumn({ type: 'timestamp with time zone', }) createdAt: Date @UpdateDateColumn({ type: 'timestamp with time zone', }) updatedAt: Date } ================================================ FILE: apps/api/src/webhook/index.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export * from './webhook.module' export * from './services/webhook.service' export * from './services/webhook-event-handler.service' export * from './entities/webhook-initialization.entity' export * from './controllers/webhook.controller' export * from './dto/send-webhook.dto' ================================================ FILE: apps/api/src/webhook/services/webhook-event-handler.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger } from '@nestjs/common' import { OnEvent } from '@nestjs/event-emitter' import { WebhookService } from './webhook.service' import { SandboxEvents } from '../../sandbox/constants/sandbox-events.constants' import { SnapshotEvents } from '../../sandbox/constants/snapshot-events' import { VolumeEvents } from '../../sandbox/constants/volume-events' import { SandboxCreatedEvent } from '../../sandbox/events/sandbox-create.event' import { SandboxStateUpdatedEvent } from '../../sandbox/events/sandbox-state-updated.event' import { SnapshotCreatedEvent } from '../../sandbox/events/snapshot-created.event' import { SnapshotStateUpdatedEvent } from '../../sandbox/events/snapshot-state-updated.event' import { SnapshotRemovedEvent } from '../../sandbox/events/snapshot-removed.event' import { VolumeCreatedEvent } from '../../sandbox/events/volume-created.event' import { VolumeStateUpdatedEvent } from '../../sandbox/events/volume-state-updated.event' import { WebhookEvent } from '../constants/webhook-events.constants' import { SandboxCreatedWebhookDto, SandboxStateUpdatedWebhookDto, SnapshotCreatedWebhookDto, SnapshotStateUpdatedWebhookDto, SnapshotRemovedWebhookDto, VolumeCreatedWebhookDto, VolumeStateUpdatedWebhookDto, } from '../dto/webhook-event-payloads.dto' @Injectable() export class WebhookEventHandlerService { private readonly logger = new Logger(WebhookEventHandlerService.name) constructor(private readonly webhookService: WebhookService) {} @OnEvent(SandboxEvents.CREATED) async handleSandboxCreated(event: SandboxCreatedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = SandboxCreatedWebhookDto.fromEvent(event, WebhookEvent.SANDBOX_CREATED) await this.webhookService.sendWebhook(event.sandbox.organizationId, WebhookEvent.SANDBOX_CREATED, payload) } catch (error) { this.logger.error(`Failed to send webhook for sandbox created: ${error.message}`) } } @OnEvent(SandboxEvents.STATE_UPDATED) async handleSandboxStateUpdated(event: SandboxStateUpdatedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = SandboxStateUpdatedWebhookDto.fromEvent(event, WebhookEvent.SANDBOX_STATE_UPDATED) await this.webhookService.sendWebhook(event.sandbox.organizationId, WebhookEvent.SANDBOX_STATE_UPDATED, payload) } catch (error) { this.logger.error(`Failed to send webhook for sandbox state updated: ${error.message}`) } } @OnEvent(SnapshotEvents.CREATED) async handleSnapshotCreated(event: SnapshotCreatedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = SnapshotCreatedWebhookDto.fromEvent(event, WebhookEvent.SNAPSHOT_CREATED) await this.webhookService.sendWebhook(event.snapshot.organizationId, WebhookEvent.SNAPSHOT_CREATED, payload) } catch (error) { this.logger.error(`Failed to send webhook for snapshot created: ${error.message}`) } } @OnEvent(SnapshotEvents.STATE_UPDATED) async handleSnapshotStateUpdated(event: SnapshotStateUpdatedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = SnapshotStateUpdatedWebhookDto.fromEvent(event, WebhookEvent.SNAPSHOT_STATE_UPDATED) await this.webhookService.sendWebhook(event.snapshot.organizationId, WebhookEvent.SNAPSHOT_STATE_UPDATED, payload) } catch (error) { this.logger.error(`Failed to send webhook for snapshot state updated: ${error.message}`) } } @OnEvent(SnapshotEvents.REMOVED) async handleSnapshotRemoved(event: SnapshotRemovedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = SnapshotRemovedWebhookDto.fromEvent(event, WebhookEvent.SNAPSHOT_REMOVED) await this.webhookService.sendWebhook(event.snapshot.organizationId, WebhookEvent.SNAPSHOT_REMOVED, payload) } catch (error) { this.logger.error(`Failed to send webhook for snapshot removed: ${error.message}`) } } @OnEvent(VolumeEvents.CREATED) async handleVolumeCreated(event: VolumeCreatedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = VolumeCreatedWebhookDto.fromEvent(event, WebhookEvent.VOLUME_CREATED) await this.webhookService.sendWebhook(event.volume.organizationId, WebhookEvent.VOLUME_CREATED, payload) } catch (error) { this.logger.error(`Failed to send webhook for volume created: ${error.message}`) } } @OnEvent(VolumeEvents.STATE_UPDATED) async handleVolumeStateUpdated(event: VolumeStateUpdatedEvent) { if (!this.webhookService.isEnabled()) { return } try { const payload = VolumeStateUpdatedWebhookDto.fromEvent(event, WebhookEvent.VOLUME_STATE_UPDATED) await this.webhookService.sendWebhook(event.volume.organizationId, WebhookEvent.VOLUME_STATE_UPDATED, payload) } catch (error) { this.logger.error(`Failed to send webhook for volume state updated: ${error.message}`) } } /** * Send a custom webhook event */ async sendCustomWebhook(organizationId: string, eventType: string, payload: any, eventId?: string): Promise { if (!this.webhookService.isEnabled()) { return } try { await this.webhookService.sendWebhook(organizationId, eventType, payload, eventId) } catch (error) { this.logger.error(`Failed to send custom webhook: ${error.message}`) } } } ================================================ FILE: apps/api/src/webhook/services/webhook.service.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Injectable, Logger, OnModuleInit } from '@nestjs/common' import { OnEvent } from '@nestjs/event-emitter' import { TypedConfigService } from '../../config/typed-config.service' import { Svix } from 'svix' import { OrganizationEvents } from '../../organization/constants/organization-events.constant' import { Organization } from '../../organization/entities/organization.entity' import { InjectRepository } from '@nestjs/typeorm' import { WebhookInitialization } from '../entities/webhook-initialization.entity' import { Repository } from 'typeorm' @Injectable() export class WebhookService implements OnModuleInit { private readonly logger = new Logger(WebhookService.name) private svix: Svix | null = null constructor( private readonly configService: TypedConfigService, @InjectRepository(WebhookInitialization) private readonly webhookInitializationRepository: Repository, ) {} async onModuleInit() { const svixAuthToken = this.configService.get('webhook.authToken') if (svixAuthToken) { const serverUrl = this.configService.get('webhook.serverUrl') if (serverUrl) { this.svix = new Svix(svixAuthToken, { serverUrl }) } else { this.svix = new Svix(svixAuthToken) //this.svix.eventType.importOpenapi } this.logger.log('Svix webhook service initialized') } else { this.logger.warn('SVIX_AUTH_TOKEN not configured, webhook service disabled') } } /** * Get webhook initialization status for an organization */ async getInitializationStatus(organizationId: string): Promise { return this.webhookInitializationRepository.findOne({ where: { organizationId }, }) } // TODO: Remove this once we decide to open webhooks to all organizations // @OnEvent(OrganizationEvents.CREATED) async handleOrganizationCreated(organization: Organization) { if (!this.svix) { this.logger.debug('Svix not configured, skipping webhook creation') return } try { // Create a new Svix application for this organization const svixAppId = await this.createSvixApplication(organization) this.logger.log(`Created Svix application for organization ${organization.id}: ${svixAppId}`) } catch (error) { this.logger.error(`Failed to create Svix application for organization ${organization.id}:`, error) } } /** * Create a Svix application for an organization */ async createSvixApplication(organization: Organization): Promise { if (!this.svix) { throw new Error('Svix not configured') } let existingWebhookInitialization = await this.getInitializationStatus(organization.id) if (existingWebhookInitialization && existingWebhookInitialization.svixApplicationId) { this.logger.warn( `Svix application already exists for organization ${organization.id}: ${existingWebhookInitialization.svixApplicationId}`, ) return existingWebhookInitialization.svixApplicationId } else { existingWebhookInitialization = new WebhookInitialization() existingWebhookInitialization.organizationId = organization.id existingWebhookInitialization.svixApplicationId = null existingWebhookInitialization.retryCount = -1 existingWebhookInitialization.lastError = null } try { const svixApp = await this.svix.application.getOrCreate({ name: organization.name, uid: organization.id, }) existingWebhookInitialization.svixApplicationId = svixApp.id existingWebhookInitialization.retryCount = existingWebhookInitialization.retryCount + 1 existingWebhookInitialization.lastError = null await this.webhookInitializationRepository.save(existingWebhookInitialization) this.logger.log(`Created Svix application for organization ${organization.id}: ${svixApp.id}`) return svixApp.id } catch (error) { existingWebhookInitialization.retryCount = existingWebhookInitialization.retryCount + 1 existingWebhookInitialization.lastError = String(error) await this.webhookInitializationRepository.save(existingWebhookInitialization) this.logger.error(`Failed to create Svix application for organization ${organization.id}:`, error) throw error } } /** * Send a webhook message to all endpoints of an organization */ async sendWebhook(organizationId: string, eventType: string, payload: any, eventId?: string): Promise { if (!this.svix) { this.logger.debug('Svix not configured, skipping webhook delivery') return } try { // Check if webhooks are initialized for this organization const isInitialized = await this.getInitializationStatus(organizationId) if (!isInitialized) { this.logger.log(`Webhooks not initialized for organization ${organizationId}, creating Svix application now...`) // For now, we'll just log that initialization is needed // The actual initialization should be done through the API or event handler this.logger.warn( `Organization ${organizationId} needs webhook initialization. Please use the initialization API endpoint.`, ) return } // Send the webhook message await this.svix.message.create(organizationId, { eventType, payload, eventId, }) this.logger.debug(`Sent webhook ${eventType} to organization ${organizationId}`) } catch (error) { this.logger.error(`Failed to send webhook ${eventType} to organization ${organizationId}:`, error) throw error } } /** * Get webhook delivery attempts for a message */ async getMessageAttempts(organizationId: string, messageId: string): Promise { if (!this.svix) { throw new Error('Svix not configured') } try { const attempts = await this.svix.messageAttempt.listByMsg(organizationId, messageId) return attempts.data } catch (error) { this.logger.error(`Failed to get message attempts for message ${messageId}:`, error) throw error } } /** * Check if webhook service is enabled */ isEnabled(): boolean { return this.svix !== null } /** * Get Svix Consumer App Portal access for an organization */ async getAppPortalAccess(organizationId: string): Promise<{ token: string; url: string }> { if (!this.svix) { throw new Error('Svix not configured') } try { const appPortalAccess = await this.svix.authentication.appPortalAccess(organizationId, {}) this.logger.debug(`Generated app portal access for organization ${organizationId}`) return { token: appPortalAccess.token, url: appPortalAccess.url, } } catch (error) { this.logger.debug(`Failed to generate app portal access for organization ${organizationId}:`, error) throw error } } } ================================================ FILE: apps/api/src/webhook/webhook.module.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Module } from '@nestjs/common' import { TypeOrmModule } from '@nestjs/typeorm' import { WebhookService } from './services/webhook.service' import { WebhookController } from './controllers/webhook.controller' import { WebhookEventHandlerService } from './services/webhook-event-handler.service' import { WebhookInitialization } from './entities/webhook-initialization.entity' import { OrganizationModule } from '../organization/organization.module' import { TypedConfigModule } from '../config/typed-config.module' import { AuthModule } from '../auth/auth.module' @Module({ imports: [OrganizationModule, TypedConfigModule, TypeOrmModule.forFeature([WebhookInitialization]), AuthModule], controllers: [WebhookController], providers: [WebhookService, WebhookEventHandlerService], exports: [WebhookService], }) export class WebhookModule {} ================================================ FILE: apps/api/tsconfig.app.json ================================================ { "extends": "./tsconfig.json", "compilerOptions": { "outDir": "../../dist/out-tsc", "module": "NodeNext", "moduleResolution": "nodenext", "types": ["node"], "experimentalDecorators": true, "emitDecoratorMetadata": true, "target": "es2022" }, "include": ["src/**/*.ts"], "exclude": ["jest.config.ts", "src/**/*.spec.ts", "src/**/*.test.ts"] } ================================================ FILE: apps/api/tsconfig.json ================================================ { "extends": "../../tsconfig.base.json", "files": [], "include": [], "references": [ { "path": "./tsconfig.app.json" }, { "path": "./tsconfig.spec.json" } ], "compilerOptions": { "esModuleInterop": true } } ================================================ FILE: apps/api/tsconfig.spec.json ================================================ { "extends": "./tsconfig.json", "compilerOptions": { "outDir": "../../dist/out-tsc", "module": "commonjs", "moduleResolution": "node10", "types": ["jest", "node"] }, "include": ["jest.config.ts", "src/**/*.test.ts", "src/**/*.spec.ts", "src/**/*.d.ts"] } ================================================ FILE: apps/api/webpack.config.js ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ const { composePlugins, withNx } = require('@nx/webpack') const path = require('path') const glob = require('glob') const migrationFiles = glob.sync('apps/api/src/migrations/**/*-migration.{ts,js}') const migrationEntries = migrationFiles.reduce((acc, migrationFile) => { const entryName = migrationFile.substring(migrationFile.lastIndexOf('/') + 1, migrationFile.lastIndexOf('.')) acc[entryName] = migrationFile return acc }, {}) module.exports = composePlugins( // Default Nx composable plugin withNx(), // Custom composable plugin (config, { options, context }) => { // `config` is the Webpack configuration object // `options` is the options passed to the `@nx/webpack:webpack` executor // `context` is the context passed to the `@nx/webpack:webpack` executor // customize configuration here config.output.devtoolModuleFilenameTemplate = function (info) { const rel = path.relative(process.cwd(), info.absoluteResourcePath) return `webpack:///./${rel}` } // Preserve openapi files while cleaning other build artifacts config.output.clean = { keep: /openapi.*\.json$/, } // add typeorm migrations as entry points for (const key in migrationEntries) { config.entry[`migrations/${key}`] = migrationEntries[key] } config.mode = process.env.NODE_ENV return config }, ) ================================================ FILE: apps/cli/apiclient/api_client.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package apiclient import ( "context" "fmt" "net/http" "strings" "sync" "github.com/daytonaio/daytona/cli/auth" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/internal" apiclient "github.com/daytonaio/daytona/libs/api-client-go" log "github.com/sirupsen/logrus" ) type versionCheckTransport struct { transport http.RoundTripper } var versionMismatchWarningOnce sync.Once func (t *versionCheckTransport) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := t.transport.RoundTrip(req) if resp != nil { // Check version mismatch on all responses, not just errors checkVersionsMismatch(resp) } return resp, err } var apiClient *apiclient.APIClient const DaytonaSourceHeader = "X-Daytona-Source" const API_VERSION_HEADER = "X-Daytona-Api-Version" func checkVersionsMismatch(res *http.Response) { // If the CLI is running in a structured output mode (e.g. json/yaml), // avoid printing human-readable warnings that could break consumers. if internal.SuppressVersionMismatchWarning { return } serverVersion := res.Header.Get(API_VERSION_HEADER) if serverVersion == "" { return } // Trim "v" prefix from both versions for comparison cliVersion := strings.TrimPrefix(internal.Version, "v") apiVersion := strings.TrimPrefix(serverVersion, "v") if cliVersion == "0.0.0-dev" || cliVersion == apiVersion { return } if compareVersions(cliVersion, apiVersion) >= 0 { return } versionMismatchWarningOnce.Do(func() { log.Warn(fmt.Sprintf("Version mismatch: Daytona CLI is on v%s and API is on v%s.\nMake sure the versions are aligned using 'brew upgrade daytonaio/cli/daytona' or by downloading the latest version from https://github.com/daytonaio/daytona/releases.", cliVersion, apiVersion)) }) } // compareVersions compares two semver strings // Returns: -1 if v1 < v2, 0 if v1 == v2, 1 if v1 > v2 func compareVersions(v1, v2 string) int { parts1 := strings.Split(v1, ".") parts2 := strings.Split(v2, ".") maxLen := len(parts1) if len(parts2) > maxLen { maxLen = len(parts2) } for i := 0; i < maxLen; i++ { var n1, n2 int if i < len(parts1) { _, _ = fmt.Sscanf(parts1[i], "%d", &n1) } if i < len(parts2) { _, _ = fmt.Sscanf(parts2[i], "%d", &n2) } if n1 < n2 { return -1 } if n1 > n2 { return 1 } } return 0 } func GetApiClient(profile *config.Profile, defaultHeaders map[string]string) (*apiclient.APIClient, error) { c, err := config.GetConfig() if err != nil { return nil, err } var activeProfile config.Profile if profile == nil { var err error activeProfile, err = c.GetActiveProfile() if err != nil { return nil, err } } else { activeProfile = *profile } if apiClient != nil && activeProfile.Api.Key == nil { err := auth.RefreshTokenIfNeeded(context.Background()) if err != nil { return nil, err } return apiClient, nil } var newApiClient *apiclient.APIClient serverUrl := activeProfile.Api.Url clientConfig := apiclient.NewConfiguration() clientConfig.Servers = apiclient.ServerConfigurations{ { URL: serverUrl, }, } if activeProfile.Api.Key != nil { clientConfig.AddDefaultHeader("Authorization", "Bearer "+*activeProfile.Api.Key) } else if activeProfile.Api.Token != nil { clientConfig.AddDefaultHeader("Authorization", "Bearer "+activeProfile.Api.Token.AccessToken) if activeProfile.ActiveOrganizationId != nil { clientConfig.AddDefaultHeader("X-Daytona-Organization-ID", *activeProfile.ActiveOrganizationId) } } clientConfig.AddDefaultHeader(DaytonaSourceHeader, "cli") for headerKey, headerValue := range defaultHeaders { clientConfig.AddDefaultHeader(headerKey, headerValue) } newApiClient = apiclient.NewAPIClient(clientConfig) newApiClient.GetConfig().HTTPClient = &http.Client{ Transport: &versionCheckTransport{ transport: http.DefaultTransport, }, } if apiClient != nil && activeProfile.Api.Key == nil { err = auth.RefreshTokenIfNeeded(context.Background()) if err != nil { return nil, err } } apiClient = newApiClient return apiClient, nil } ================================================ FILE: apps/cli/apiclient/error_handler.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package apiclient import ( "encoding/json" "errors" "fmt" "io" "net/http" ) type ApiErrorResponse struct { Error string `json:"error"` Message any `json:"message,omitempty"` } func HandleErrorResponse(res *http.Response, requestErr error) error { if res == nil { return requestErr } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { return err } var errResponse ApiErrorResponse err = json.Unmarshal(body, &errResponse) if err != nil { return err } errMessage := string(errResponse.Error) if errMessage == "" { // Fall back to raw body if error field is empty errMessage = string(body) } else { if errResponse.Message != nil { // Message field could be a string or an array switch msg := errResponse.Message.(type) { case string: errMessage += ": " + msg case []any: if len(msg) > 0 { msgStr := fmt.Sprintf("%v", msg) errMessage += ": " + msgStr } } } } if res.StatusCode == http.StatusUnauthorized { errMessage += " - run 'daytona login' to reauthenticate" } return errors.New(errMessage) } ================================================ FILE: apps/cli/auth/auth.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package auth import ( "context" "crypto/rand" _ "embed" "encoding/base64" "fmt" "net/http" "sync" "time" "github.com/coreos/go-oidc/v3/oidc" "github.com/daytonaio/daytona/cli/config" log "github.com/sirupsen/logrus" "golang.org/x/oauth2" ) //go:embed auth_success.html var successHTML []byte func StartCallbackServer(expectedState string) (string, error) { var code string var err error var wg sync.WaitGroup wg.Add(1) server := &http.Server{Addr: fmt.Sprintf(":%s", config.GetAuth0CallbackPort())} http.HandleFunc("/callback", func(w http.ResponseWriter, r *http.Request) { if r.URL.Query().Get("state") != expectedState { err = fmt.Errorf("invalid state parameter") http.Error(w, "State invalid", http.StatusBadRequest) wg.Done() return } code = r.URL.Query().Get("code") if code == "" { err = fmt.Errorf("no code in callback") http.Error(w, "No code", http.StatusBadRequest) wg.Done() return } w.Header().Set("Content-Type", "text/html") _, _ = w.Write(successHTML) // Delay server close to ensure browser receives the success page go func() { time.Sleep(500 * time.Millisecond) wg.Done() server.Close() }() }) go func() { if err := server.ListenAndServe(); err != http.ErrServerClosed { log.Errorf("HTTP server error: %v", err) } }() wg.Wait() if err != nil { return "", err } return code, nil } func GenerateRandomState() (string, error) { b := make([]byte, 32) _, err := rand.Read(b) if err != nil { return "", err } return base64.URLEncoding.EncodeToString(b), nil } func RefreshTokenIfNeeded(ctx context.Context) error { c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } if activeProfile.Api.Key != nil { return nil } if activeProfile.Api.Token == nil { return fmt.Errorf("no valid token found, use 'daytona login' to reauthenticate") } // Check if token is about to expire (within 5 minutes) if time.Until(activeProfile.Api.Token.ExpiresAt) > 5*time.Minute { return nil } provider, err := oidc.NewProvider(ctx, config.GetAuth0Domain()) if err != nil { return fmt.Errorf("failed to initialize OIDC provider: %w", err) } oauth2Config := oauth2.Config{ ClientID: config.GetAuth0ClientId(), ClientSecret: config.GetAuth0ClientSecret(), RedirectURL: fmt.Sprintf("http://localhost:%s/callback", config.GetAuth0CallbackPort()), Endpoint: provider.Endpoint(), Scopes: []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, "profile"}, } token := &oauth2.Token{ RefreshToken: activeProfile.Api.Token.RefreshToken, } newToken, err := oauth2Config.TokenSource(ctx, token).Token() if err != nil { return fmt.Errorf("use 'daytona login' to reauthenticate: %w", err) } activeProfile.Api.Token = &config.Token{ AccessToken: newToken.AccessToken, RefreshToken: newToken.RefreshToken, ExpiresAt: newToken.Expiry, } return c.EditProfile(activeProfile) } ================================================ FILE: apps/cli/auth/auth_success.html ================================================ Daytona

Authentication Successful

You can now close this window and return to the CLI.

================================================ FILE: apps/cli/cmd/auth/login.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package auth import ( "context" "fmt" "github.com/coreos/go-oidc/v3/oidc" "github.com/daytonaio/daytona/cli/auth" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/internal" view_common "github.com/daytonaio/daytona/cli/views/common" "github.com/pkg/browser" "github.com/spf13/cobra" "golang.org/x/oauth2" ) var LoginCmd = &cobra.Command{ Use: "login", Short: "Log in to Daytona", Args: cobra.NoArgs, GroupID: internal.USER_GROUP, RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() if apiKeyFlag != "" { return updateProfileWithLogin(nil, &apiKeyFlag) } items := []view_common.SelectItem{ {Title: "Login with Browser", Desc: "Authenticate using OAuth in your browser"}, {Title: "Set Daytona API Key", Desc: "Authenticate using Daytona API key"}, } choice, err := view_common.Select("Select Authentication Method", items) if err != nil { return fmt.Errorf("error running selection prompt: %w", err) } if choice == "" { return nil } var tokenConfig *config.Token setApiKey := choice == "Set Daytona API Key" if setApiKey { // Prompt for API key apiKey, err := view_common.PromptForInput("", "Enter your Daytona API key", "You can find it in the Daytona dashboard - https://app.daytona.io/dashboard") if err != nil { return err } return updateProfileWithLogin(nil, &apiKey) } token, err := login(ctx) if err != nil { return err } tokenConfig = &config.Token{ AccessToken: token.AccessToken, RefreshToken: token.RefreshToken, ExpiresAt: token.Expiry, } return updateProfileWithLogin(tokenConfig, nil) }, } var ( apiKeyFlag string ) func init() { LoginCmd.Flags().StringVar(&apiKeyFlag, "api-key", "", "API key to use for authentication") } func updateProfileWithLogin(tokenConfig *config.Token, apiKey *string) error { c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { if err == config.ErrNoProfilesFound { activeProfile, err = createInitialProfile(c) if err != nil { return err } } else { return err } } if apiKey != nil { activeProfile.Api.Token = nil activeProfile.Api.Key = apiKey view_common.RenderInfoMessageBold("Successfully set Daytona API key!") } if tokenConfig != nil { activeProfile.Api.Key = nil activeProfile.Api.Token = tokenConfig err = c.EditProfile(activeProfile) if err != nil { return err } if activeProfile.Api.Key == nil { personalOrganizationId, err := common.GetPersonalOrganizationId(activeProfile) if err != nil { return err } activeProfile.ActiveOrganizationId = &personalOrganizationId } } return c.EditProfile(activeProfile) } func createInitialProfile(c *config.Config) (config.Profile, error) { profile := config.Profile{ Id: "initial", Name: "initial", Api: config.ServerApi{ Url: config.GetDaytonaApiUrl(), }, } if internal.Version == "v0.0.0-dev" { profile.Api.Url = "http://localhost:3001/api" } return profile, c.AddProfile(profile) } func login(ctx context.Context) (*oauth2.Token, error) { provider, err := oidc.NewProvider(ctx, config.GetAuth0Domain()) if err != nil { return nil, fmt.Errorf("failed to initialize OIDC provider: %w", err) } verifier := provider.Verifier(&oidc.Config{ClientID: config.GetAuth0ClientId()}) oauth2Config := oauth2.Config{ ClientID: config.GetAuth0ClientId(), ClientSecret: config.GetAuth0ClientSecret(), RedirectURL: fmt.Sprintf("http://localhost:%s/callback", config.GetAuth0CallbackPort()), Endpoint: provider.Endpoint(), Scopes: []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, "profile"}, } state, err := auth.GenerateRandomState() if err != nil { return nil, fmt.Errorf("failed to generate random state: %w", err) } authURL := oauth2Config.AuthCodeURL( state, oauth2.SetAuthURLParam("audience", config.GetAuth0Audience()), ) view_common.RenderInfoMessageBold("Opening the browser for authentication ...") view_common.RenderInfoMessage("If opening fails, visit:\n") fmt.Println(authURL) _ = browser.OpenURL(authURL) code, err := auth.StartCallbackServer(state) if err != nil { return nil, fmt.Errorf("authentication failed: %w", err) } token, err := oauth2Config.Exchange(ctx, code) if err != nil { return nil, fmt.Errorf("failed to exchange token: %w", err) } rawIDToken, ok := token.Extra("id_token").(string) if !ok { return nil, fmt.Errorf("no id_token in token response") } _, err = verifier.Verify(ctx, rawIDToken) if err != nil { return nil, fmt.Errorf("failed to verify ID token: %w", err) } view_common.RenderInfoMessageBold("Successfully logged in!") return token, nil } ================================================ FILE: apps/cli/cmd/auth/logout.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package auth import ( "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/internal" "github.com/daytonaio/daytona/cli/views/common" "github.com/spf13/cobra" ) var LogoutCmd = &cobra.Command{ Use: "logout", Short: "Logout from Daytona", Args: cobra.NoArgs, GroupID: internal.USER_GROUP, RunE: func(cmd *cobra.Command, args []string) error { c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } // For now, this just clears the local auth token/api key entries activeProfile.Api.Token = nil activeProfile.Api.Key = nil err = c.EditProfile(activeProfile) if err != nil { return err } common.RenderInfoMessageBold("Successfully logged out") return nil }, } ================================================ FILE: apps/cli/cmd/autocomplete.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package cmd import ( "errors" "fmt" "os" "path/filepath" "strings" "github.com/spf13/cobra" ) var supportedShells = []string{"bash", "zsh", "fish", "powershell"} var AutoCompleteCmd = &cobra.Command{ Use: fmt.Sprintf("autocomplete [%s]", strings.Join(supportedShells, "|")), Short: "Adds a completion script for your shell environment", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { shell := args[0] profilePath, err := SetupAutocompletionForShell(cmd.Root(), shell) if err != nil { return err } fmt.Println("Autocomplete script generated and injected successfully.") fmt.Printf("Please source your %s profile to apply the changes or restart your terminal.\n", shell) fmt.Printf("For manual sourcing, use: source %s\n", profilePath) if shell == "bash" { fmt.Println("Please make sure that you have bash-completion installed in order to get full autocompletion functionality.") fmt.Println("On how to install bash-completion, please refer to the following link: https://www.daytona.io/docs/tools/cli/#daytona-autocomplete") } return nil }, } func DetectShellAndSetupAutocompletion(rootCmd *cobra.Command) error { shell := os.Getenv("SHELL") if shell == "" { return fmt.Errorf("unable to detect the shell, please use a supported one: %s", strings.Join(supportedShells, ", ")) } for _, supportedShell := range supportedShells { if strings.Contains(shell, supportedShell) { shell = supportedShell break } } _, err := SetupAutocompletionForShell(rootCmd, shell) if err != nil { return err } return nil } func SetupAutocompletionForShell(rootCmd *cobra.Command, shell string) (string, error) { homeDir, err := os.UserHomeDir() if err != nil { return "", fmt.Errorf("error finding user home directory: %s", err) } var filePath, profilePath string switch shell { case "bash": filePath = filepath.Join(homeDir, ".daytona.completion_script.bash") profilePath = filepath.Join(homeDir, ".bashrc") case "zsh": filePath = filepath.Join(homeDir, ".daytona.completion_script.zsh") profilePath = filepath.Join(homeDir, ".zshrc") case "fish": filePath = filepath.Join(homeDir, ".config", "fish", "daytona.completion_script.fish") profilePath = filepath.Join(homeDir, ".config", "fish", "config.fish") case "powershell": filePath = filepath.Join(homeDir, "daytona.completion_script.ps1") profilePath = filepath.Join(homeDir, "Documents", "WindowsPowerShell", "Microsoft.PowerShell_profile.ps1") default: return "", errors.New("unsupported shell type. Please use bash, zsh, fish, or powershell") } file, err := os.Create(filePath) if err != nil { return "", fmt.Errorf("error creating completion script file: %s", err) } defer file.Close() switch shell { case "bash": err = rootCmd.GenBashCompletion(file) case "zsh": err = rootCmd.GenZshCompletion(file) case "fish": err = rootCmd.GenFishCompletion(file, true) case "powershell": err = rootCmd.GenPowerShellCompletionWithDesc(file) } if err != nil { return "", fmt.Errorf("error generating completion script: %s", err) } sourceCommand := fmt.Sprintf("\nsource %s\n", filePath) if shell == "powershell" { sourceCommand = fmt.Sprintf(". %s\n", filePath) } alreadyPresent := false // Read existing content from the file profile, err := os.ReadFile(profilePath) if err != nil && !os.IsNotExist(err) { return "", fmt.Errorf("error while reading profile (%s): %s", profilePath, err) } if strings.Contains(string(profile), strings.TrimSpace(sourceCommand)) { alreadyPresent = true } if !alreadyPresent { // Append the source command to the shell's profile file if not present profile, err := os.OpenFile(profilePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return "", fmt.Errorf("error opening profile file (%s): %s", profilePath, err) } defer profile.Close() if _, err := profile.WriteString(sourceCommand); err != nil { return "", fmt.Errorf("error writing to profile file (%s): %s", profilePath, err) } } return profilePath, nil } ================================================ FILE: apps/cli/cmd/common/aliases.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common var commandAliases = map[string][]string{ "create": {"add", "new"}, "delete": {"remove", "rm"}, "update": {"set"}, "install": {"i"}, "uninstall": {"u"}, "info": {"view", "inspect"}, "code": {"open"}, "logs": {"log"}, "forward": {"fwd"}, "list": {"ls"}, } func GetAliases(cmd string) []string { if aliases, exists := commandAliases[cmd]; exists { return aliases } return nil } ================================================ FILE: apps/cli/cmd/common/build.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "context" "fmt" "net/url" "os" "path/filepath" "regexp" "strings" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/pkg/minio" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) // Create MinIO client from access parameters func CreateMinioClient(accessParams *apiclient.StorageAccessDto) (*minio.Client, error) { storageURL, err := url.Parse(accessParams.StorageUrl) if err != nil { return nil, fmt.Errorf("invalid storage URL: %w", err) } minioClient, err := minio.NewClient( storageURL.Host, accessParams.AccessKey, accessParams.Secret, accessParams.Bucket, storageURL.Scheme == "https", accessParams.SessionToken, ) if err != nil { return nil, fmt.Errorf("failed to create storage client: %w", err) } return minioClient, nil } // List existing objects in MinIO func ListExistingObjects(ctx context.Context, minioClient *minio.Client, orgID string) (map[string]bool, error) { objects, err := minioClient.ListObjects(ctx, orgID) if err != nil { return nil, fmt.Errorf("failed to list objects: %w", err) } existingObjects := make(map[string]bool) for _, obj := range objects { existingObjects[obj] = true } return existingObjects, nil } // getContextHashes processes context paths and returns their hashes func getContextHashes(ctx context.Context, apiClient *apiclient.APIClient, contextPaths []string) ([]string, error) { contextHashes := []string{} if len(contextPaths) == 0 { return contextHashes, nil } // Get storage access parameters accessParams, res, err := apiClient.ObjectStorageAPI.GetPushAccess(ctx).Execute() if err != nil { return nil, apiclient_cli.HandleErrorResponse(res, err) } // Create MinIO client minioClient, err := CreateMinioClient(accessParams) if err != nil { return nil, fmt.Errorf("failed to create storage client: %w", err) } // List existing objects to avoid re-uploading existingObjects, err := ListExistingObjects(ctx, minioClient, accessParams.OrganizationId) if err != nil { return nil, fmt.Errorf("failed to list existing objects: %w", err) } // Process each context path for _, contextPath := range contextPaths { absPath, err := filepath.Abs(contextPath) if err != nil { return nil, fmt.Errorf("invalid context path %s: %w", contextPath, err) } fileInfo, err := os.Stat(absPath) if err != nil { return nil, fmt.Errorf("failed to access context path %s: %w", contextPath, err) } if fileInfo.IsDir() { // Process directory dirHashes, err := minioClient.ProcessDirectory(ctx, absPath, accessParams.OrganizationId, existingObjects) if err != nil { return nil, fmt.Errorf("failed to process directory %s: %w", absPath, err) } contextHashes = append(contextHashes, dirHashes...) } else { // Process single file hash, err := minioClient.ProcessFile(ctx, absPath, accessParams.OrganizationId, existingObjects) if err != nil { return nil, fmt.Errorf("failed to process file %s: %w", absPath, err) } contextHashes = append(contextHashes, hash) } } return contextHashes, nil } func parseDockerfileForSources(dockerfileContent string, dockerfileDir string) ([]string, error) { var sources []string lines := strings.Split(dockerfileContent, "\n") copyRegex := regexp.MustCompile(`^\s*COPY\s+(.+)`) addRegex := regexp.MustCompile(`^\s*ADD\s+(.+)`) for _, line := range lines { line = strings.TrimSpace(line) // Skip empty lines and comments if line == "" || strings.HasPrefix(line, "#") { continue } var matches []string if copyRegex.MatchString(line) { // Skip COPY commands with --from= flag (multi-stage builds) if !strings.Contains(line, "--from=") { matches = copyRegex.FindStringSubmatch(line) } } else if addRegex.MatchString(line) { matches = addRegex.FindStringSubmatch(line) } if len(matches) > 1 { sourcePaths := parseCopyAddCommand(matches[1]) for _, srcPath := range sourcePaths { // Skip if it's a URL (ADD command can use URLs) if strings.HasPrefix(srcPath, "http://") || strings.HasPrefix(srcPath, "https://") { continue } // Convert relative paths to absolute paths relative to Dockerfile directory if !filepath.IsAbs(srcPath) { srcPath = filepath.Join(dockerfileDir, srcPath) } srcPath = filepath.Clean(srcPath) // Check if path exists and add to sources if _, err := os.Stat(srcPath); err == nil { sources = append(sources, srcPath) } else { // If exact path doesn't exist, try to match glob patterns matches, err := filepath.Glob(srcPath) if err == nil && len(matches) > 0 { sources = append(sources, matches...) } } } } } // Remove duplicates and optimize paths sourceMap := make(map[string]bool) var uniqueSources []string // Check if we have the current directory (.) in our sources hasCurrentDir := false currentDirPath := dockerfileDir for _, src := range sources { if src == currentDirPath { hasCurrentDir = true break } } // If we have the current directory, we only need that (it includes everything) if hasCurrentDir { return []string{currentDirPath}, nil } // Otherwise, remove duplicates normally for _, src := range sources { if !sourceMap[src] { sourceMap[src] = true uniqueSources = append(uniqueSources, src) } } return uniqueSources, nil } func parseCopyAddCommand(args string) []string { args = strings.TrimSpace(args) var sources []string // Handle JSON array format: ["src1", "src2", "dest"] if strings.HasPrefix(args, "[") && strings.HasSuffix(args, "]") { // Remove brackets and parse as space-separated values with quotes content := strings.Trim(args, "[]") parts := parseQuotedArguments(content) if len(parts) >= 2 { // All but the last argument are sources sources = parts[:len(parts)-1] } return sources } // Handle regular format with possible flags parts := parseQuotedArguments(args) // Skip flags like --chown, --chmod, --from sourcesStartIdx := 0 for i := 0; i < len(parts); i++ { part := parts[i] if strings.HasPrefix(part, "--") { // Skip the flag and its value if it has one if !strings.Contains(part, "=") && i+1 < len(parts) && !strings.HasPrefix(parts[i+1], "--") { sourcesStartIdx = i + 2 } else { sourcesStartIdx = i + 1 } } else { break } } // After skipping flags, we need at least one source and one destination if len(parts)-sourcesStartIdx >= 2 { sources = parts[sourcesStartIdx : len(parts)-1] } return sources } func parseQuotedArguments(input string) []string { var args []string var current strings.Builder inQuotes := false quoteChar := byte(0) input = strings.TrimSpace(input) for i := 0; i < len(input); i++ { char := input[i] if !inQuotes && (char == '"' || char == '\'') { inQuotes = true quoteChar = char } else if inQuotes && char == quoteChar { inQuotes = false quoteChar = 0 } else if !inQuotes && (char == ' ' || char == '\t') { if current.Len() > 0 { args = append(args, current.String()) current.Reset() } // Skip consecutive whitespace for i+1 < len(input) && (input[i+1] == ' ' || input[i+1] == '\t') { i++ } } else { current.WriteByte(char) } } if current.Len() > 0 { args = append(args, current.String()) } return args } func GetCreateBuildInfoDto(ctx context.Context, dockerfilePath string, contextPaths []string) (*apiclient.CreateBuildInfo, error) { dockerfileAbsPath, err := filepath.Abs(dockerfilePath) if err != nil { return nil, fmt.Errorf("invalid dockerfile path: %w", err) } if _, err := os.Stat(dockerfileAbsPath); os.IsNotExist(err) { return nil, fmt.Errorf("dockerfile does not exist: %s", dockerfileAbsPath) } dockerfileContent, err := os.ReadFile(dockerfileAbsPath) if err != nil { return nil, fmt.Errorf("failed to read dockerfile: %w", err) } dockerfileDir := filepath.Dir(dockerfileAbsPath) // If no context paths are provided, automatically parse the Dockerfile to find them if len(contextPaths) == 0 { autoContextPaths, err := parseDockerfileForSources(string(dockerfileContent), dockerfileDir) if err != nil { return nil, fmt.Errorf("failed to parse dockerfile for context: %w", err) } contextPaths = autoContextPaths } else { var resolvedContextPaths []string for _, contextPath := range contextPaths { var absPath string if filepath.IsAbs(contextPath) { absPath = contextPath } else { // When context paths are provided manually (via --context flag), // resolve them relative to the current working directory absPath, err = filepath.Abs(contextPath) if err != nil { return nil, fmt.Errorf("failed to resolve context path %s: %w", contextPath, err) } } resolvedContextPaths = append(resolvedContextPaths, absPath) } contextPaths = resolvedContextPaths } apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return nil, err } contextHashes, err := getContextHashes(ctx, apiClient, contextPaths) if err != nil { return nil, err } return &apiclient.CreateBuildInfo{ DockerfileContent: string(dockerfileContent), ContextHashes: contextHashes, }, nil } ================================================ FILE: apps/cli/cmd/common/format.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "encoding/json" "fmt" "os" "github.com/daytonaio/daytona/cli/internal" "github.com/spf13/cobra" "gopkg.in/yaml.v2" ) const ( formatFlagDescription = `Output format. Must be one of (yaml, json)` formatFlagName = "format" formatFlagShortHand = "f" ) var ( FormatFlag string standardOut *os.File ) type outputFormatter struct { data interface{} formatter Formatter } func NewFormatter(data interface{}) *outputFormatter { var formatter Formatter switch FormatFlag { case "json": formatter = JSONFormatter{} case "yaml": formatter = YAMLFormatter{} case "": formatter = nil default: formatter = JSONFormatter{} // Default to JSON } return &outputFormatter{ data: data, formatter: formatter, } } type Formatter interface { Format(data interface{}) (string, error) } type JSONFormatter struct{} func (f JSONFormatter) Format(data interface{}) (string, error) { jsonData, err := json.MarshalIndent(data, "", " ") // Indent with two spaces if err != nil { return "", err } return string(jsonData), nil } type YAMLFormatter struct{} func (f YAMLFormatter) Format(data interface{}) (string, error) { yamlData, err := yaml.Marshal(data) if err != nil { return "", err } return string(yamlData), nil } func (f *outputFormatter) Print() { formattedOutput, err := f.formatter.Format(f.data) if err != nil { fmt.Printf("Error formatting output: %v\n", err) os.Exit(1) } UnblockStdOut() fmt.Println(formattedOutput) BlockStdOut() } func BlockStdOut() { if os.Stdout != nil { standardOut = os.Stdout os.Stdout = nil } } func UnblockStdOut() { if os.Stdout == nil { os.Stdout = standardOut standardOut = nil } } func RegisterFormatFlag(cmd *cobra.Command) { cmd.Flags().StringVarP(&FormatFlag, formatFlagName, formatFlagShortHand, FormatFlag, formatFlagDescription) cmd.PreRun = func(cmd *cobra.Command, args []string) { if FormatFlag != "" { BlockStdOut() // When a structured output format is requested, suppress // noisy warnings such as version mismatch so scripts // consuming json/yaml aren't broken. internal.SuppressVersionMismatchWarning = true } else { internal.SuppressVersionMismatchWarning = false } } } ================================================ FILE: apps/cli/cmd/common/logs.go ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ package common import ( "bufio" "context" "fmt" "io" "net/http" "time" "github.com/daytonaio/daytona/cli/config" log "github.com/sirupsen/logrus" ) type ReadLogParams struct { Id string ServerUrl string ServerApi config.ServerApi ActiveOrganizationId *string Follow *bool ResourceType ResourceType } type ResourceType string const ( ResourceTypeSandbox ResourceType = "sandbox" ResourceTypeSnapshot ResourceType = "snapshots" ) func ReadBuildLogs(ctx context.Context, params ReadLogParams) { url := fmt.Sprintf("%s/%s/%s/build-logs", params.ServerUrl, params.ResourceType, params.Id) if params.Follow != nil && *params.Follow { url = fmt.Sprintf("%s?follow=true", url) } req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { log.Errorf("Failed to create request: %v", err) return } if params.ServerApi.Key != nil { req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", *params.ServerApi.Key)) } else if params.ServerApi.Token != nil { req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", params.ServerApi.Token.AccessToken)) if params.ActiveOrganizationId != nil { req.Header.Add("X-Daytona-Organization-ID", *params.ActiveOrganizationId) } } req.Header.Add("Accept", "application/octet-stream") client := &http.Client{} resp, err := client.Do(req) if err != nil { log.Errorf("Failed to connect to server: %v", err) return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { log.Errorf("Server returned a non-OK status while retrieving logs: %d", resp.StatusCode) return } reader := bufio.NewReader(resp.Body) buffer := make([]byte, 4096) for { select { case <-ctx.Done(): return default: n, err := reader.Read(buffer) if n > 0 { fmt.Print(string(buffer[:n])) } if err != nil { if err == io.EOF { if params.Follow != nil && *params.Follow { time.Sleep(500 * time.Millisecond) continue } return } // Don't log context.Canceled as it's an expected case when streaming is stopped if err != context.Canceled { log.Errorf("Error reading from stream: %v", err) } return } } } } ================================================ FILE: apps/cli/cmd/common/organization.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "context" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/config" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) func GetPersonalOrganizationId(profile config.Profile) (string, error) { apiClient, err := apiclient_cli.GetApiClient(&profile, nil) if err != nil { return "", err } organizationList, res, err := apiClient.OrganizationsAPI.ListOrganizations(context.Background()).Execute() if err != nil { return "", apiclient_cli.HandleErrorResponse(res, err) } for _, organization := range organizationList { if organization.Personal { return organization.Id, nil } } return "", nil } func GetActiveOrganizationName(apiClient *apiclient.APIClient, ctx context.Context) (string, error) { activeOrganizationId, err := config.GetActiveOrganizationId() if err != nil { return "", err } if activeOrganizationId == "" { return "", config.ErrNoActiveOrganization } activeOrganization, res, err := apiClient.OrganizationsAPI.GetOrganization(ctx, activeOrganizationId).Execute() if err != nil { return "", apiclient_cli.HandleErrorResponse(res, err) } return activeOrganization.Name, nil } ================================================ FILE: apps/cli/cmd/common/sandbox.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) func RequireStartedState(sandbox *apiclient.Sandbox) error { if sandbox.State == nil { return fmt.Errorf("sandbox state is unknown") } state := *sandbox.State if state == apiclient.SANDBOXSTATE_STARTED { return nil } sandboxRef := sandbox.Id if sandbox.Name != "" { sandboxRef = sandbox.Name } switch state { case apiclient.SANDBOXSTATE_STOPPED: return fmt.Errorf("sandbox is stopped. Start it with: daytona sandbox start %s", sandboxRef) case apiclient.SANDBOXSTATE_ARCHIVED: return fmt.Errorf("sandbox is archived. Start it with: daytona sandbox start %s", sandboxRef) case apiclient.SANDBOXSTATE_ARCHIVING: return fmt.Errorf("sandbox is archiving. Start it with: daytona sandbox start %s", sandboxRef) case apiclient.SANDBOXSTATE_STARTING: return fmt.Errorf("sandbox is starting. Please wait for it to be ready") case apiclient.SANDBOXSTATE_STOPPING: return fmt.Errorf("sandbox is stopping. Please wait for it to complete") case apiclient.SANDBOXSTATE_CREATING: return fmt.Errorf("sandbox is being created. Please wait for it to be ready") case apiclient.SANDBOXSTATE_DESTROYING: return fmt.Errorf("sandbox is being destroyed") case apiclient.SANDBOXSTATE_DESTROYED: return fmt.Errorf("sandbox has been destroyed") case apiclient.SANDBOXSTATE_ERROR: return fmt.Errorf("sandbox is in an error state") case apiclient.SANDBOXSTATE_BUILD_FAILED: return fmt.Errorf("sandbox build failed") default: return fmt.Errorf("sandbox is not running (state: %s)", state) } } ================================================ FILE: apps/cli/cmd/common/ssh.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" "strings" ) // ParseSSHCommand parses the SSH command string returned by the API // Expected formats: // - "ssh token@host" (port 22) // - "ssh -p port token@host" func ParseSSHCommand(sshCommand string) ([]string, error) { parts := strings.Fields(sshCommand) if len(parts) < 2 { return nil, fmt.Errorf("invalid SSH command format: %s", sshCommand) } // Skip the "ssh" part args := parts[1:] return args, nil } ================================================ FILE: apps/cli/cmd/common/ssh_unix.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 //go:build unix package common import ( "fmt" "os" "os/exec" "os/signal" "syscall" ) // ExecuteSSH runs the SSH command with proper terminal handling func ExecuteSSH(sshArgs []string) error { sshPath, err := exec.LookPath("ssh") if err != nil { return fmt.Errorf("ssh not found in PATH: %w", err) } // Create the command sshCmd := exec.Command(sshPath, sshArgs...) sshCmd.Stdin = os.Stdin sshCmd.Stdout = os.Stdout sshCmd.Stderr = os.Stderr // Handle signals - forward them to the SSH process sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGWINCH) // Start the SSH process if err := sshCmd.Start(); err != nil { return fmt.Errorf("failed to start SSH: %w", err) } // Forward signals to the SSH process go func() { for sig := range sigChan { if sshCmd.Process != nil { _ = sshCmd.Process.Signal(sig) } } }() // Wait for SSH to complete err = sshCmd.Wait() // Stop signal handling signal.Stop(sigChan) close(sigChan) if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { os.Exit(exitErr.ExitCode()) } return err } return nil } ================================================ FILE: apps/cli/cmd/common/ssh_windows.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 //go:build windows package common import ( "fmt" "os" "os/exec" "os/signal" "syscall" ) // ExecuteSSH runs the SSH command with proper terminal handling func ExecuteSSH(sshArgs []string) error { sshPath, err := exec.LookPath("ssh") if err != nil { return fmt.Errorf("ssh not found in PATH: %w", err) } // Create the command sshCmd := exec.Command(sshPath, sshArgs...) sshCmd.Stdin = os.Stdin sshCmd.Stdout = os.Stdout sshCmd.Stderr = os.Stderr // Handle signals - forward them to the SSH process // Note: SIGWINCH is not available on Windows sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) // Start the SSH process if err := sshCmd.Start(); err != nil { return fmt.Errorf("failed to start SSH: %w", err) } // Forward signals to the SSH process go func() { for sig := range sigChan { if sshCmd.Process != nil { _ = sshCmd.Process.Signal(sig) } } }() // Wait for SSH to complete err = sshCmd.Wait() // Stop signal handling signal.Stop(sigChan) close(sigChan) if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { os.Exit(exitErr.ExitCode()) } return err } return nil } ================================================ FILE: apps/cli/cmd/common/state.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "context" "fmt" "time" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) func AwaitSnapshotState(ctx context.Context, apiClient *apiclient.APIClient, name string, state apiclient.SnapshotState) error { for { snapshot, res, err := apiClient.SnapshotsAPI.GetSnapshot(ctx, name).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } switch snapshot.State { case state: return nil case apiclient.SNAPSHOTSTATE_ERROR, apiclient.SNAPSHOTSTATE_BUILD_FAILED: if !snapshot.ErrorReason.IsSet() { return fmt.Errorf("snapshot processing failed") } return fmt.Errorf("snapshot processing failed: %s", *snapshot.ErrorReason.Get()) } time.Sleep(time.Second) } } func AwaitSandboxState(ctx context.Context, apiClient *apiclient.APIClient, targetSandbox string, state apiclient.SandboxState) error { for { sandbox, res, err := apiClient.SandboxAPI.GetSandbox(ctx, targetSandbox).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } if sandbox.State != nil && *sandbox.State == state { return nil } else if sandbox.State != nil && (*sandbox.State == apiclient.SANDBOXSTATE_ERROR || *sandbox.State == apiclient.SANDBOXSTATE_BUILD_FAILED) { if sandbox.ErrorReason == nil { return fmt.Errorf("sandbox processing failed") } return fmt.Errorf("sandbox processing failed: %s", *sandbox.ErrorReason) } time.Sleep(time.Second) } } ================================================ FILE: apps/cli/cmd/common/validate.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" "strings" ) func ValidateImageName(imageName string) error { parts := strings.Split(imageName, ":") if len(parts) != 2 { return fmt.Errorf("invalid image format: must contain exactly one colon (e.g., 'ubuntu:22.04')") } if parts[1] == "latest" { return fmt.Errorf("tag 'latest' not allowed, please use a specific version tag") } return nil } ================================================ FILE: apps/cli/cmd/docs.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package cmd import ( "fmt" "github.com/daytonaio/daytona/cli/views/common" "github.com/pkg/browser" "github.com/spf13/cobra" ) var docsURL string = "https://www.daytona.io/docs/" var DocsCmd = &cobra.Command{ Use: "docs", Short: "Opens the Daytona documentation in your default browser.", Args: cobra.NoArgs, Aliases: []string{"documentation", "doc"}, RunE: func(cmd *cobra.Command, args []string) error { common.RenderInfoMessageBold(fmt.Sprintf("Opening the Daytona documentation in your default browser. If opening fails, you can go to %s manually.", common.LinkStyle.Render(docsURL))) return browser.OpenURL(docsURL) }, } ================================================ FILE: apps/cli/cmd/generatedocs.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package cmd import ( "fmt" "os" "path/filepath" "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) var yamlDirectory = "hack" var defaultDirectory = "docs" var GenerateDocsCmd = &cobra.Command{ Use: "generate-docs", Short: "Generate documentation for the Daytona CLI", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { directory, err := cmd.Flags().GetString("directory") if err != nil { return err } if directory == "" { directory = defaultDirectory } err = os.MkdirAll(directory, os.ModePerm) if err != nil { return err } err = os.MkdirAll(filepath.Join(yamlDirectory, directory), os.ModePerm) if err != nil { return err } err = doc.GenMarkdownTree(cmd.Root(), directory) if err != nil { return err } err = doc.GenYamlTree(cmd.Root(), filepath.Join(yamlDirectory, directory)) if err != nil { return err } fmt.Printf("Documentation generated at %s\n", directory) return nil }, Hidden: true, } func init() { GenerateDocsCmd.Flags().String("directory", "", "Directory to generate documentation into") } ================================================ FILE: apps/cli/cmd/mcp/agents/claude.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package agents import ( "errors" "os" "path/filepath" "runtime" ) func InitClaude(homeDir string) (string, string, error) { var agentConfigFilePath string var mcpLogFilePath string switch runtime.GOOS { case "darwin": agentConfigFilePath = filepath.Join(homeDir, "Library", "Application Support", "Claude", "claude_desktop_config.json") mcpLogFilePath = filepath.Join(homeDir, "Library", "Logs", "Claude", mcpLogFileName) case "windows": // Resolve %APPDATA% environment variable appData := os.Getenv("APPDATA") if appData == "" { return "", "", errors.New("could not resolve APPDATA environment variable") } agentConfigFilePath = filepath.Join(appData, "Claude", "claude_desktop_config.json") mcpLogFilePath = filepath.Join(appData, "Claude", "Logs", mcpLogFileName) case "linux": agentConfigFilePath = filepath.Join(homeDir, ".config", "Claude", "claude_desktop_config.json") mcpLogFilePath = filepath.Join("var", "log", "Claude", mcpLogFileName) default: return "", "", errors.New("operating system is not supported") } return agentConfigFilePath, mcpLogFilePath, nil } ================================================ FILE: apps/cli/cmd/mcp/agents/common.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package agents var mcpLogFileName string = "daytona-mcp-server.log" ================================================ FILE: apps/cli/cmd/mcp/agents/cursor.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package agents import ( "errors" "os" "path/filepath" "runtime" ) func InitCursor(homeDir string) (string, string, error) { var agentConfigFilePath string var mcpLogFilePath string switch runtime.GOOS { case "darwin": agentConfigFilePath = filepath.Join(homeDir, ".cursor", "mcp.json") mcpLogFilePath = filepath.Join(homeDir, "Library", "Logs", "Cursor", mcpLogFileName) case "windows": // Resolve %APPDATA% environment variable appData := os.Getenv("APPDATA") if appData == "" { return "", "", errors.New("could not resolve APPDATA environment variable") } agentConfigFilePath = filepath.Join(appData, ".cursor", "mcp.json") mcpLogFilePath = filepath.Join(appData, "Cursor", "Logs", mcpLogFileName) case "linux": agentConfigFilePath = filepath.Join(homeDir, ".cursor", "mcp.json") mcpLogFilePath = filepath.Join("var", "log", "Cursor", mcpLogFileName) default: return "", "", errors.New("operating system is not supported") } return agentConfigFilePath, mcpLogFilePath, nil } ================================================ FILE: apps/cli/cmd/mcp/agents/windsurf.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package agents import ( "errors" "os" "path/filepath" "runtime" ) func InitWindsurf(homeDir string) (string, string, error) { var agentConfigFilePath string var mcpLogFilePath string switch runtime.GOOS { case "darwin": agentConfigFilePath = filepath.Join(homeDir, ".codeium", "windsurf", "mcp_config.json") mcpLogFilePath = filepath.Join(homeDir, "Library", "Logs", "Windsurf", mcpLogFileName) case "windows": // Resolve %APPDATA% environment variable appData := os.Getenv("APPDATA") if appData == "" { return "", "", errors.New("could not resolve APPDATA environment variable") } agentConfigFilePath = filepath.Join(appData, ".codeium", "windsurf", "mcp_config.json") mcpLogFilePath = filepath.Join(appData, "Windsurf", "Logs", mcpLogFileName) case "linux": agentConfigFilePath = filepath.Join(homeDir, ".codeium", "windsurf", "mcp_config.json") mcpLogFilePath = filepath.Join("var", "log", "Windsurf", mcpLogFileName) default: return "", "", errors.New("operating system is not supported") } return agentConfigFilePath, mcpLogFilePath, nil } ================================================ FILE: apps/cli/cmd/mcp/config.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package mcp import ( "encoding/json" "fmt" "os" "runtime" "github.com/spf13/cobra" ) var ConfigCmd = &cobra.Command{ Use: "config [AGENT_NAME]", Short: "Outputs JSON configuration for Daytona MCP Server", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { homeDir, err := os.UserHomeDir() if err != nil { return err } var mcpLogFilePath string switch runtime.GOOS { case "darwin": mcpLogFilePath = homeDir + "/.daytona/daytona-mcp.log" case "windows": mcpLogFilePath = os.Getenv("APPDATA") + "\\.daytona\\daytona-mcp.log" case "linux": mcpLogFilePath = homeDir + "/.daytona/daytona-mcp.log" default: return fmt.Errorf("unsupported OS: %s", runtime.GOOS) } daytonaMcpConfig, err := getDayonaMcpConfig(mcpLogFilePath) if err != nil { return err } mcpConfig := map[string]interface{}{ "daytona-mcp": daytonaMcpConfig, } jsonBytes, err := json.MarshalIndent(mcpConfig, "", " ") if err != nil { return err } fmt.Println(string(jsonBytes)) return nil }, } func getDayonaMcpConfig(mcpLogFilePath string) (map[string]interface{}, error) { homeDir, err := os.UserHomeDir() if err != nil { return nil, err } // Create daytona-mcp config daytonaMcpConfig := map[string]interface{}{ "command": "daytona", "args": []string{"mcp", "start"}, "env": map[string]string{ "PATH": homeDir + ":/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/homebrew/bin", "HOME": homeDir, }, "logFile": mcpLogFilePath, } if runtime.GOOS == "windows" { daytonaMcpConfig["env"].(map[string]string)["APPDATA"] = os.Getenv("APPDATA") } return daytonaMcpConfig, nil } ================================================ FILE: apps/cli/cmd/mcp/init.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package mcp import ( "encoding/json" "fmt" "os" "github.com/daytonaio/daytona/cli/cmd/mcp/agents" "github.com/spf13/cobra" ) var InitCmd = &cobra.Command{ Use: "init [AGENT_NAME]", Short: "Initialize Daytona MCP Server with an agent (currently supported: claude, windsurf, cursor)", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return fmt.Errorf("agent name is required") } homeDir, err := os.UserHomeDir() if err != nil { return err } var agentConfigFilePath, mcpLogFilePath string switch args[0] { case "claude": agentConfigFilePath, mcpLogFilePath, err = agents.InitClaude(homeDir) if err != nil { return err } case "cursor": agentConfigFilePath, mcpLogFilePath, err = agents.InitCursor(homeDir) if err != nil { return err } case "windsurf": agentConfigFilePath, mcpLogFilePath, err = agents.InitWindsurf(homeDir) if err != nil { return err } default: return fmt.Errorf("agent name %s is not supported", args[0]) } return injectConfig(agentConfigFilePath, mcpLogFilePath) }, } func injectConfig(agentConfigFilePath, mcpLogFilePath string) error { daytonaMcpConfig, err := getDayonaMcpConfig(mcpLogFilePath) if err != nil { return err } // Read existing model config or create new one var agentConfig map[string]interface{} if agentConfigData, err := os.ReadFile(agentConfigFilePath); err == nil { if err := json.Unmarshal(agentConfigData, &agentConfig); err != nil { return err } } else if !os.IsNotExist(err) { return err } else { agentConfig = make(map[string]interface{}) } // Initialize or update mcpServers field mcpServers, ok := agentConfig["mcpServers"].(map[string]interface{}) if !ok { mcpServers = make(map[string]interface{}) } // Add or update daytona-mcp configuration mcpServers["daytona-mcp"] = daytonaMcpConfig agentConfig["mcpServers"] = mcpServers // Write back the updated config with indentation updatedJSON, err := json.MarshalIndent(agentConfig, "", " ") if err != nil { return err } return os.WriteFile(agentConfigFilePath, updatedJSON, 0644) } ================================================ FILE: apps/cli/cmd/mcp/mcp.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package mcp import ( "github.com/spf13/cobra" ) var MCPCmd = &cobra.Command{ Use: "mcp", Short: "Manage Daytona MCP Server", Long: "Commands for managing Daytona MCP Server", } func init() { MCPCmd.AddCommand(InitCmd) MCPCmd.AddCommand(StartCmd) MCPCmd.AddCommand(ConfigCmd) } ================================================ FILE: apps/cli/cmd/mcp/start.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package mcp import ( "os" "os/signal" "github.com/daytonaio/daytona/cli/mcp" "github.com/spf13/cobra" ) var StartCmd = &cobra.Command{ Use: "start", Short: "Start Daytona MCP Server", Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { server := mcp.NewDaytonaMCPServer() interruptChan := make(chan os.Signal, 1) signal.Notify(interruptChan, os.Interrupt) errChan := make(chan error) go func() { errChan <- server.Start() }() select { case err := <-errChan: return err case <-interruptChan: return nil } }, } ================================================ FILE: apps/cli/cmd/organization/create.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "context" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/organization" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) var CreateCmd = &cobra.Command{ Use: "create [ORGANIZATION_NAME]", Short: "Create a new organization and set it as active", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } createOrganizationDto := apiclient.CreateOrganization{ Name: args[0], } org, res, err := apiClient.OrganizationsAPI.CreateOrganization(ctx).CreateOrganization(createOrganizationDto).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } activeProfile.ActiveOrganizationId = &org.Id err = c.EditProfile(activeProfile) if err != nil { return err } organization.RenderInfo(org, false) common.RenderInfoMessageBold("Your organization has been created and its approval is pending\nOur team has been notified and will set up your resource quotas shortly") return nil }, } ================================================ FILE: apps/cli/cmd/organization/delete.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "context" "fmt" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" view_common "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/organization" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) var DeleteCmd = &cobra.Command{ Use: "delete [ORGANIZATION]", Short: "Delete an organization", Args: cobra.MaximumNArgs(1), Aliases: common.GetAliases("delete"), RunE: func(cmd *cobra.Command, args []string) error { var chosenOrganization *apiclient.Organization ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } orgList, res, err := apiClient.OrganizationsAPI.ListOrganizations(ctx).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } if len(orgList) == 0 { util.NotifyEmptyOrganizationList(true) return nil } if len(args) == 0 { chosenOrganization, err = organization.GetOrganizationIdFromPrompt(orgList) if err != nil { return err } } else { for _, org := range orgList { if org.Id == args[0] || org.Name == args[0] { chosenOrganization = &org break } } if chosenOrganization == nil { return fmt.Errorf("organization %s not found", args[0]) } } if chosenOrganization.Name == "Personal" { return fmt.Errorf("cannot delete personal organization") } res, err = apiClient.OrganizationsAPI.DeleteOrganization(ctx, chosenOrganization.Id).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Organization %s has been deleted", chosenOrganization.Name)) c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } if activeProfile.ActiveOrganizationId == nil || *activeProfile.ActiveOrganizationId != chosenOrganization.Id { return nil } personalOrganizationId, err := common.GetPersonalOrganizationId(activeProfile) if err != nil { return err } activeProfile.ActiveOrganizationId = &personalOrganizationId return c.EditProfile(activeProfile) }, } ================================================ FILE: apps/cli/cmd/organization/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "context" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/views/organization" "github.com/spf13/cobra" ) var ListCmd = &cobra.Command{ Use: "list", Short: "List all organizations", Args: cobra.NoArgs, Aliases: common.GetAliases("list"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } organizationList, res, err := apiClient.OrganizationsAPI.ListOrganizations(ctx).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } if common.FormatFlag != "" { formattedData := common.NewFormatter(organizationList) formattedData.Print() return nil } activeOrganizationId, err := config.GetActiveOrganizationId() if err != nil { return err } organization.ListOrganizations(organizationList, &activeOrganizationId) return nil }, } func init() { common.RegisterFormatFlag(ListCmd) } ================================================ FILE: apps/cli/cmd/organization/organization.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "errors" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/internal" "github.com/spf13/cobra" ) var OrganizationCmd = &cobra.Command{ Use: "organization", Short: "Manage Daytona organizations", Long: "Commands for managing Daytona organizations", Aliases: []string{"organizations", "org", "orgs"}, GroupID: internal.USER_GROUP, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if config.IsApiKeyAuth() { return errors.New("organization commands are not available when using API key authentication - run `daytona login` to reauthenticate with browser") } return nil }, } func init() { OrganizationCmd.AddCommand(ListCmd) OrganizationCmd.AddCommand(CreateCmd) OrganizationCmd.AddCommand(UseCmd) OrganizationCmd.AddCommand(DeleteCmd) } ================================================ FILE: apps/cli/cmd/organization/use.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "context" "fmt" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/organization" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) var UseCmd = &cobra.Command{ Use: "use [ORGANIZATION]", Short: "Set active organization", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { var chosenOrganization *apiclient.Organization ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } orgList, res, err := apiClient.OrganizationsAPI.ListOrganizations(ctx).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } if len(orgList) == 0 { util.NotifyEmptyOrganizationList(true) return nil } if len(args) == 0 { chosenOrganization, err = organization.GetOrganizationIdFromPrompt(orgList) if err != nil { return err } } else { for _, org := range orgList { if org.Id == args[0] || org.Name == args[0] { chosenOrganization = &org break } } if chosenOrganization == nil { return fmt.Errorf("organization %s not found", args[0]) } } c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } activeProfile.ActiveOrganizationId = &chosenOrganization.Id err = c.EditProfile(activeProfile) if err != nil { return err } common.RenderInfoMessageBold(fmt.Sprintf("Organization %s is now active", chosenOrganization.Name)) return nil }, } ================================================ FILE: apps/cli/cmd/sandbox/archive.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" view_common "github.com/daytonaio/daytona/cli/views/common" "github.com/spf13/cobra" ) var ArchiveCmd = &cobra.Command{ Use: "archive [SANDBOX_ID] | [SANDBOX_NAME]", Short: "Archive a sandbox", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrNameArg := args[0] _, res, err := apiClient.SandboxAPI.ArchiveSandbox(ctx, sandboxIdOrNameArg).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Sandbox %s marked for archival", sandboxIdOrNameArg)) return nil }, } func init() { } ================================================ FILE: apps/cli/cmd/sandbox/create.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "strings" "time" "github.com/charmbracelet/lipgloss" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/util" views_common "github.com/daytonaio/daytona/cli/views/common" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) const SANDBOX_TERMINAL_PORT = 22222 var CreateCmd = &cobra.Command{ Use: "create [flags]", Short: "Create a new sandbox", Args: cobra.NoArgs, Aliases: common.GetAliases("create"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } createSandbox := apiclient.NewCreateSandbox() // Add non-zero values to the request if snapshotFlag != "" { createSandbox.SetSnapshot(snapshotFlag) } if nameFlag != "" { createSandbox.SetName(nameFlag) } if userFlag != "" { createSandbox.SetUser(userFlag) } if len(envFlag) > 0 { env := make(map[string]string) for _, e := range envFlag { parts := strings.SplitN(e, "=", 2) if len(parts) == 2 { env[parts[0]] = parts[1] } } createSandbox.SetEnv(env) } if len(labelsFlag) > 0 { labels := make(map[string]string) for _, l := range labelsFlag { parts := strings.SplitN(l, "=", 2) if len(parts) == 2 { labels[parts[0]] = parts[1] } } createSandbox.SetLabels(labels) } if publicFlag { createSandbox.SetPublic(true) } if classFlag != "" { createSandbox.SetClass(classFlag) } if targetFlag != "" { createSandbox.SetTarget(targetFlag) } if cpuFlag > 0 { createSandbox.SetCpu(cpuFlag) } if gpuFlag > 0 { createSandbox.SetGpu(gpuFlag) } if memoryFlag > 0 { createSandbox.SetMemory(memoryFlag) } if diskFlag > 0 { createSandbox.SetDisk(diskFlag) } if autoStopFlag >= 0 { createSandbox.SetAutoStopInterval(autoStopFlag) } if autoArchiveFlag >= 0 { createSandbox.SetAutoArchiveInterval(autoArchiveFlag) } createSandbox.SetAutoDeleteInterval(autoDeleteFlag) createSandbox.SetNetworkBlockAll(networkBlockAllFlag) if networkAllowListFlag != "" { createSandbox.SetNetworkAllowList(networkAllowListFlag) } if dockerfileFlag != "" { createBuildInfoDto, err := common.GetCreateBuildInfoDto(ctx, dockerfileFlag, contextFlag) if err != nil { return err } createSandbox.SetBuildInfo(*createBuildInfoDto) } if len(volumesFlag) > 0 { volumes := make([]apiclient.SandboxVolume, 0, len(volumesFlag)) for _, v := range volumesFlag { parts := strings.SplitN(v, ":", 2) if len(parts) == 2 { volumeId := parts[0] mountPath := parts[1] volume := apiclient.SandboxVolume{ VolumeId: volumeId, MountPath: mountPath, } volumes = append(volumes, volume) } } if len(volumes) > 0 { createSandbox.SetVolumes(volumes) } } var sandbox *apiclient.Sandbox sandbox, res, err := apiClient.SandboxAPI.CreateSandbox(ctx).CreateSandbox(*createSandbox).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } if sandbox.State != nil && *sandbox.State == apiclient.SANDBOXSTATE_PENDING_BUILD { c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } err = common.AwaitSandboxState(ctx, apiClient, sandbox.Id, apiclient.SANDBOXSTATE_BUILDING_SNAPSHOT) if err != nil { return err } logsContext, stopLogs := context.WithCancel(context.Background()) defer stopLogs() go common.ReadBuildLogs(logsContext, common.ReadLogParams{ Id: sandbox.Id, ServerUrl: activeProfile.Api.Url, ServerApi: activeProfile.Api, ActiveOrganizationId: activeProfile.ActiveOrganizationId, Follow: util.Pointer(true), ResourceType: common.ResourceTypeSandbox, }) err = common.AwaitSandboxState(ctx, apiClient, sandbox.Id, apiclient.SANDBOXSTATE_STARTED) if err != nil { return err } // Wait for the last logs to be read time.Sleep(250 * time.Millisecond) stopLogs() } previewUrl, res, err := apiClient.SandboxAPI.GetPortPreviewUrl(ctx, sandbox.Id, SANDBOX_TERMINAL_PORT).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } boldStyle := lipgloss.NewStyle().Bold(true) views_common.RenderInfoMessageBold(fmt.Sprintf("Sandbox '%s' created successfully", sandbox.Name)) views_common.RenderInfoMessage(fmt.Sprintf("Connect via SSH: %s", boldStyle.Render(fmt.Sprintf("daytona ssh %s", sandbox.Name)))) views_common.RenderInfoMessage(fmt.Sprintf("Open the Web Terminal: %s\n", views_common.LinkStyle.Render(previewUrl.Url))) return nil }, } var ( snapshotFlag string nameFlag string userFlag string envFlag []string labelsFlag []string publicFlag bool classFlag string targetFlag string cpuFlag int32 gpuFlag int32 memoryFlag int32 diskFlag int32 autoStopFlag int32 autoArchiveFlag int32 autoDeleteFlag int32 volumesFlag []string dockerfileFlag string contextFlag []string networkBlockAllFlag bool networkAllowListFlag string ) func init() { CreateCmd.Flags().StringVar(&snapshotFlag, "snapshot", "", "Snapshot to use for the sandbox") CreateCmd.Flags().StringVar(&nameFlag, "name", "", "Name of the sandbox") CreateCmd.Flags().StringVar(&userFlag, "user", "", "User associated with the sandbox") CreateCmd.Flags().StringArrayVarP(&envFlag, "env", "e", []string{}, "Environment variables (format: KEY=VALUE)") CreateCmd.Flags().StringArrayVarP(&labelsFlag, "label", "l", []string{}, "Labels (format: KEY=VALUE)") CreateCmd.Flags().BoolVar(&publicFlag, "public", false, "Make sandbox publicly accessible") CreateCmd.Flags().StringVar(&classFlag, "class", "", "Sandbox class type (small, medium, large)") CreateCmd.Flags().StringVar(&targetFlag, "target", "", "Target region (eu, us)") CreateCmd.Flags().Int32Var(&cpuFlag, "cpu", 0, "CPU cores allocated to the sandbox") CreateCmd.Flags().Int32Var(&gpuFlag, "gpu", 0, "GPU units allocated to the sandbox") CreateCmd.Flags().Int32Var(&memoryFlag, "memory", 0, "Memory allocated to the sandbox in MB") CreateCmd.Flags().Int32Var(&diskFlag, "disk", 0, "Disk space allocated to the sandbox in GB") CreateCmd.Flags().Int32Var(&autoStopFlag, "auto-stop", 15, "Auto-stop interval in minutes (0 means disabled)") CreateCmd.Flags().Int32Var(&autoArchiveFlag, "auto-archive", 10080, "Auto-archive interval in minutes (0 means the maximum interval will be used)") CreateCmd.Flags().Int32Var(&autoDeleteFlag, "auto-delete", -1, "Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping)") CreateCmd.Flags().StringArrayVarP(&volumesFlag, "volume", "v", []string{}, "Volumes to mount (format: VOLUME_NAME:MOUNT_PATH)") CreateCmd.Flags().StringVarP(&dockerfileFlag, "dockerfile", "f", "", "Path to Dockerfile for Sandbox snapshot") CreateCmd.Flags().StringArrayVarP(&contextFlag, "context", "c", []string{}, "Files or directories to include in the build context (can be specified multiple times)") CreateCmd.Flags().BoolVar(&networkBlockAllFlag, "network-block-all", false, "Whether to block all network access for the sandbox") CreateCmd.Flags().StringVar(&networkAllowListFlag, "network-allow-list", "", "Comma-separated list of allowed CIDR network addresses for the sandbox") CreateCmd.MarkFlagsMutuallyExclusive("snapshot", "dockerfile") CreateCmd.MarkFlagsMutuallyExclusive("snapshot", "context") } ================================================ FILE: apps/cli/cmd/sandbox/delete.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "sync" "sync/atomic" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" view_common "github.com/daytonaio/daytona/cli/views/common" views_util "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) const spinnerThreshold = 10 var DeleteCmd = &cobra.Command{ Use: "delete [SANDBOX_ID] | [SANDBOX_NAME]", Short: "Delete a sandbox", Args: cobra.MaximumNArgs(1), Aliases: common.GetAliases("delete"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } // Handle case when no sandbox ID is provided and allFlag is true if len(args) == 0 { if allFlag { page := float32(1.0) limit := float32(200.0) // 200 is the maximum limit for the API var allSandboxes []apiclient.Sandbox for { sandboxBatch, res, err := apiClient.SandboxAPI.ListSandboxesPaginated(ctx).Page(page).Limit(limit).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } allSandboxes = append(allSandboxes, sandboxBatch.Items...) if len(sandboxBatch.Items) < int(limit) || page >= float32(sandboxBatch.TotalPages) { break } page++ } if len(allSandboxes) == 0 { view_common.RenderInfoMessageBold("No sandboxes to delete") return nil } var deletedCount int64 deleteFn := func() error { var wg sync.WaitGroup sem := make(chan struct{}, 10) // limit to 10 concurrent deletes for _, sb := range allSandboxes { wg.Add(1) go func(sb apiclient.Sandbox) { defer wg.Done() sem <- struct{}{} defer func() { <-sem }() _, res, err := apiClient.SandboxAPI.DeleteSandbox(ctx, sb.Id).Execute() if err != nil { fmt.Printf("Failed to delete sandbox %s: %s\n", sb.Id, apiclient_cli.HandleErrorResponse(res, err)) } else { atomic.AddInt64(&deletedCount, 1) } }(sb) } wg.Wait() return nil } if len(allSandboxes) > spinnerThreshold { err = views_util.WithInlineSpinner("Deleting all sandboxes", deleteFn) } else { err = deleteFn() } if err != nil { return err } view_common.RenderInfoMessageBold(fmt.Sprintf("Deleted %d sandboxes", atomic.LoadInt64(&deletedCount))) return nil } return cmd.Help() } // Handle case when a sandbox ID is provided sandboxIdOrNameArg := args[0] _, res, err := apiClient.SandboxAPI.DeleteSandbox(ctx, sandboxIdOrNameArg).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Sandbox %s deleted", sandboxIdOrNameArg)) return nil }, } var allFlag bool func init() { DeleteCmd.Flags().BoolVarP(&allFlag, "all", "a", false, "Delete all sandboxes") } ================================================ FILE: apps/cli/cmd/sandbox/exec.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "os" "strings" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/toolbox" "github.com/spf13/cobra" ) var ExecCmd = &cobra.Command{ Use: "exec [SANDBOX_ID | SANDBOX_NAME] -- [COMMAND] [ARGS...]", Short: "Execute a command in a sandbox", Long: "Execute a command in a running sandbox", Args: cobra.MinimumNArgs(2), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrName := args[0] // Find the command args after "--" commandArgs := args[1:] if len(commandArgs) == 0 { return fmt.Errorf("no command specified") } // First, get the sandbox to get its ID and region (in case name was provided) sandbox, res, err := apiClient.SandboxAPI.GetSandbox(ctx, sandboxIdOrName).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } if err := common.RequireStartedState(sandbox); err != nil { return err } toolboxClient := toolbox.NewClient(apiClient) command := strings.Join(commandArgs, " ") executeRequest := toolbox.ExecuteRequest{ Command: command, } if execCwd != "" { executeRequest.Cwd = &execCwd } if execTimeout > 0 { timeout := float32(execTimeout) executeRequest.Timeout = &timeout } // Execute the command via toolbox response, err := toolboxClient.ExecuteCommand(ctx, sandbox, executeRequest) if err != nil { return err } // Print the output (stdout + stderr combined) if response.Result != "" { fmt.Print(response.Result) } // Exit with the command's exit code exitCode := int(response.ExitCode) if exitCode != 0 { if response.Result == "" { fmt.Fprintf(os.Stderr, "Command failed with exit code %d\n", exitCode) } os.Exit(exitCode) } return nil }, } var ( execCwd string execTimeout int ) func init() { ExecCmd.Flags().StringVar(&execCwd, "cwd", "", "Working directory for command execution") ExecCmd.Flags().IntVar(&execTimeout, "timeout", 0, "Command timeout in seconds (0 for no timeout)") } ================================================ FILE: apps/cli/cmd/sandbox/info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/views/sandbox" "github.com/spf13/cobra" ) var InfoCmd = &cobra.Command{ Use: "info [SANDBOX_ID] | [SANDBOX_NAME]", Short: "Get sandbox info", Args: cobra.ExactArgs(1), Aliases: common.GetAliases("info"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrNameArg := args[0] sb, res, err := apiClient.SandboxAPI.GetSandbox(ctx, sandboxIdOrNameArg).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } if common.FormatFlag != "" { formattedData := common.NewFormatter(sb) formattedData.Print() return nil } sandbox.RenderInfo(sb, false) return nil }, } func init() { common.RegisterFormatFlag(InfoCmd) } ================================================ FILE: apps/cli/cmd/sandbox/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/views/sandbox" "github.com/spf13/cobra" ) var ( pageFlag int limitFlag int ) var ListCmd = &cobra.Command{ Use: "list", Short: "List sandboxes", Args: cobra.NoArgs, Aliases: common.GetAliases("list"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } page := float32(1.0) limit := float32(100.0) if cmd.Flags().Changed("page") { page = float32(pageFlag) } if cmd.Flags().Changed("limit") { limit = float32(limitFlag) } sandboxList, res, err := apiClient.SandboxAPI.ListSandboxesPaginated(ctx).Page(page).Limit(limit).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } sandbox.SortSandboxes(&sandboxList.Items) if common.FormatFlag != "" { formattedData := common.NewFormatter(sandboxList) formattedData.Print() return nil } var activeOrganizationName *string if !config.IsApiKeyAuth() { name, err := common.GetActiveOrganizationName(apiClient, ctx) if err != nil { return err } activeOrganizationName = &name } sandbox.ListSandboxes(sandboxList.Items, activeOrganizationName) return nil }, } func init() { ListCmd.Flags().IntVarP(&pageFlag, "page", "p", 1, "Page number for pagination (starting from 1)") ListCmd.Flags().IntVarP(&limitFlag, "limit", "l", 100, "Maximum number of items per page") common.RegisterFormatFlag(ListCmd) } ================================================ FILE: apps/cli/cmd/sandbox/preview_url.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/spf13/cobra" ) var PreviewUrlCmd = &cobra.Command{ Use: "preview-url [SANDBOX_ID | SANDBOX_NAME]", Short: "Get signed preview URL for a sandbox port", Args: cobra.ExactArgs(1), Aliases: common.GetAliases("preview-url"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrName := args[0] if previewUrlPort == 0 { return fmt.Errorf("port flag is required") } req := apiClient.SandboxAPI.GetSignedPortPreviewUrl(ctx, sandboxIdOrName, previewUrlPort). ExpiresInSeconds(previewUrlExpires) previewUrl, res, err := req.Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } fmt.Println(previewUrl.Url) return nil }, } var ( previewUrlPort int32 previewUrlExpires int32 ) func init() { PreviewUrlCmd.Flags().Int32VarP(&previewUrlPort, "port", "p", 0, "Port number to get preview URL for (required)") PreviewUrlCmd.Flags().Int32Var(&previewUrlExpires, "expires", 3600, "URL expiration time in seconds") _ = PreviewUrlCmd.MarkFlagRequired("port") } ================================================ FILE: apps/cli/cmd/sandbox/sandbox.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "github.com/daytonaio/daytona/cli/internal" "github.com/spf13/cobra" ) var SandboxCmd = &cobra.Command{ Use: "sandbox", Short: "Manage Daytona sandboxes", Long: "Commands for managing Daytona sandboxes", Aliases: []string{"sandboxes"}, GroupID: internal.SANDBOX_GROUP, Hidden: true, // Deprecated: use top-level commands instead (e.g., "daytona start" instead of "daytona sandbox start") } func init() { SandboxCmd.AddCommand(ListCmd) SandboxCmd.AddCommand(CreateCmd) SandboxCmd.AddCommand(InfoCmd) SandboxCmd.AddCommand(DeleteCmd) SandboxCmd.AddCommand(StartCmd) SandboxCmd.AddCommand(StopCmd) SandboxCmd.AddCommand(ArchiveCmd) SandboxCmd.AddCommand(SSHCmd) SandboxCmd.AddCommand(ExecCmd) SandboxCmd.AddCommand(PreviewUrlCmd) } ================================================ FILE: apps/cli/cmd/sandbox/ssh.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/spf13/cobra" ) var SSHCmd = &cobra.Command{ Use: "ssh [SANDBOX_ID] | [SANDBOX_NAME]", Short: "SSH into a sandbox", Long: "Establish an SSH connection to a running sandbox", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrName := args[0] // Get sandbox to check state sandbox, res, err := apiClient.SandboxAPI.GetSandbox(ctx, sandboxIdOrName).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } if err := common.RequireStartedState(sandbox); err != nil { return err } // Create SSH access token sshAccessRequest := apiClient.SandboxAPI.CreateSshAccess(ctx, sandbox.Id) if sshExpiresInMinutes > 0 { sshAccessRequest = sshAccessRequest.ExpiresInMinutes(float32(sshExpiresInMinutes)) } sshAccess, res, err := sshAccessRequest.Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } // Parse the SSH command from the response sshArgs, err := common.ParseSSHCommand(sshAccess.SshCommand) if err != nil { return fmt.Errorf("failed to parse SSH command: %w", err) } // Execute SSH return common.ExecuteSSH(sshArgs) }, } var sshExpiresInMinutes int func init() { SSHCmd.Flags().IntVar(&sshExpiresInMinutes, "expires", 1440, "SSH access token expiration time in minutes (defaults to 24 hours)") } ================================================ FILE: apps/cli/cmd/sandbox/start.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" view_common "github.com/daytonaio/daytona/cli/views/common" "github.com/spf13/cobra" ) var StartCmd = &cobra.Command{ Use: "start [SANDBOX_ID] | [SANDBOX_NAME]", Short: "Start a sandbox", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrNameArg := args[0] _, res, err := apiClient.SandboxAPI.StartSandbox(ctx, sandboxIdOrNameArg).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Sandbox %s started", sandboxIdOrNameArg)) return nil }, } func init() { } ================================================ FILE: apps/cli/cmd/sandbox/stop.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" view_common "github.com/daytonaio/daytona/cli/views/common" "github.com/spf13/cobra" ) var StopCmd = &cobra.Command{ Use: "stop [SANDBOX_ID] | [SANDBOX_NAME]", Short: "Stop a sandbox", Args: cobra.MaximumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } sandboxIdOrNameArg := args[0] _, res, err := apiClient.SandboxAPI.StopSandbox(ctx, sandboxIdOrNameArg).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Sandbox %s stopped", sandboxIdOrNameArg)) return nil }, } func init() { } ================================================ FILE: apps/cli/cmd/snapshot/create.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "context" "fmt" "strings" "time" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/util" view_common "github.com/daytonaio/daytona/cli/views/common" views_util "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) var CreateCmd = &cobra.Command{ Use: "create [SNAPSHOT]", Short: "Create a snapshot", Args: cobra.ExactArgs(1), Aliases: common.GetAliases("create"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() snapshotName := args[0] usingDockerfile := dockerfilePathFlag != "" usingImage := imageNameFlag != "" if !usingDockerfile && !usingImage { return fmt.Errorf("must specify either --dockerfile or --image") } apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } createSnapshot := apiclient.NewCreateSnapshot(snapshotName) if cpuFlag != 0 { createSnapshot.SetCpu(cpuFlag) } if memoryFlag != 0 { createSnapshot.SetMemory(memoryFlag) } if diskFlag != 0 { createSnapshot.SetDisk(diskFlag) } if regionIdFlag != "" { createSnapshot.SetRegionId(regionIdFlag) } if usingDockerfile { createBuildInfoDto, err := common.GetCreateBuildInfoDto(ctx, dockerfilePathFlag, contextFlag) if err != nil { return err } createSnapshot.SetBuildInfo(*createBuildInfoDto) } else if usingImage { err := common.ValidateImageName(imageNameFlag) if err != nil { return err } createSnapshot.SetImageName(imageNameFlag) if entrypointFlag != "" { createSnapshot.SetEntrypoint(strings.Split(entrypointFlag, " ")) } } else if entrypointFlag != "" { createSnapshot.SetEntrypoint(strings.Split(entrypointFlag, " ")) } // Send create request snapshot, res, err := apiClient.SnapshotsAPI.CreateSnapshot(ctx).CreateSnapshot(*createSnapshot).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } // If we're building from a Dockerfile, show build logs if usingDockerfile { c, err := config.GetConfig() if err != nil { return err } activeProfile, err := c.GetActiveProfile() if err != nil { return err } logsContext, stopLogs := context.WithCancel(context.Background()) defer stopLogs() go common.ReadBuildLogs(logsContext, common.ReadLogParams{ Id: snapshot.Id, ServerUrl: activeProfile.Api.Url, ServerApi: activeProfile.Api, ActiveOrganizationId: activeProfile.ActiveOrganizationId, Follow: util.Pointer(true), ResourceType: common.ResourceTypeSnapshot, }) err = common.AwaitSnapshotState(ctx, apiClient, snapshotName, apiclient.SNAPSHOTSTATE_PENDING) if err != nil { return err } // Wait for the last logs to be read time.Sleep(250 * time.Millisecond) stopLogs() } err = views_util.WithInlineSpinner("Waiting for the snapshot to be validated", func() error { return common.AwaitSnapshotState(ctx, apiClient, snapshotName, apiclient.SNAPSHOTSTATE_ACTIVE) }) if err != nil { return err } view_common.RenderInfoMessageBold(fmt.Sprintf("Snapshot %s successfully created", snapshotName)) view_common.RenderInfoMessage(fmt.Sprintf("%s Run 'daytona sandbox create --snapshot %s' to create a new sandbox using this snapshot", view_common.Checkmark, snapshotName)) return nil }, } var ( entrypointFlag string imageNameFlag string dockerfilePathFlag string contextFlag []string cpuFlag int32 memoryFlag int32 diskFlag int32 regionIdFlag string ) func init() { CreateCmd.Flags().StringVarP(&entrypointFlag, "entrypoint", "e", "", "The entrypoint command for the snapshot") CreateCmd.Flags().StringVarP(&imageNameFlag, "image", "i", "", "The image name for the snapshot") CreateCmd.Flags().StringVarP(&dockerfilePathFlag, "dockerfile", "f", "", "Path to Dockerfile to build") CreateCmd.Flags().StringArrayVarP(&contextFlag, "context", "c", []string{}, "Files or directories to include in the build context (can be specified multiple times). If not provided, context will be automatically determined from COPY/ADD commands in the Dockerfile") CreateCmd.Flags().Int32Var(&cpuFlag, "cpu", 0, "CPU cores that will be allocated to the underlying sandboxes (default: 1)") CreateCmd.Flags().Int32Var(&memoryFlag, "memory", 0, "Memory that will be allocated to the underlying sandboxes in GB (default: 1)") CreateCmd.Flags().Int32Var(&diskFlag, "disk", 0, "Disk space that will be allocated to the underlying sandboxes in GB (default: 3)") CreateCmd.Flags().StringVar(®ionIdFlag, "region", "", "ID of the region where the snapshot will be available (defaults to organization default region)") CreateCmd.MarkFlagsMutuallyExclusive("image", "dockerfile") CreateCmd.MarkFlagsMutuallyExclusive("image", "context") CreateCmd.MarkFlagsMutuallyExclusive("entrypoint", "dockerfile") CreateCmd.MarkFlagsMutuallyExclusive("entrypoint", "context") } ================================================ FILE: apps/cli/cmd/snapshot/delete.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "context" "fmt" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" view_common "github.com/daytonaio/daytona/cli/views/common" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) var DeleteCmd = &cobra.Command{ Use: "delete [SNAPSHOT_ID | SNAPSHOT_NAME]", Short: "Delete a snapshot", Args: cobra.MaximumNArgs(1), Aliases: common.GetAliases("delete"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } // Handle case when no snapshot ID is provided and allFlag is true if len(args) == 0 { if allFlag { page := float32(1.0) limit := float32(200.0) // 200 is the maximum limit for the API var allSnapshots []apiclient.SnapshotDto for { snapshotBatch, res, err := apiClient.SnapshotsAPI.GetAllSnapshots(ctx).Page(page).Limit(limit).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } allSnapshots = append(allSnapshots, snapshotBatch.Items...) if len(snapshotBatch.Items) < int(limit) || page >= snapshotBatch.TotalPages { break } page++ } if len(allSnapshots) == 0 { view_common.RenderInfoMessageBold("No snapshots to delete") return nil } var deletedCount int for _, snapshot := range allSnapshots { res, err := apiClient.SnapshotsAPI.RemoveSnapshot(ctx, snapshot.Id).Execute() if err != nil { fmt.Printf("Failed to delete snapshot %s: %s\n", snapshot.Id, apiclient_cli.HandleErrorResponse(res, err)) } else { deletedCount++ } } view_common.RenderInfoMessageBold(fmt.Sprintf("Deleted %d snapshots", deletedCount)) return nil } return cmd.Help() } snapshotIdOrName := args[0] snapshot, res, err := apiClient.SnapshotsAPI.GetSnapshot(ctx, snapshotIdOrName).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } res, err = apiClient.SnapshotsAPI.RemoveSnapshot(ctx, snapshot.Id).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Snapshot %s deleted", snapshotIdOrName)) return nil }, } var allFlag bool func init() { DeleteCmd.Flags().BoolVarP(&allFlag, "all", "a", false, "Delete all snapshots") } ================================================ FILE: apps/cli/cmd/snapshot/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/views/snapshot" "github.com/spf13/cobra" ) var ( pageFlag int limitFlag int ) var ListCmd = &cobra.Command{ Use: "list", Short: "List all snapshots", Long: "List all available Daytona snapshots", Aliases: common.GetAliases("list"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } page := float32(1.0) limit := float32(100.0) if cmd.Flags().Changed("page") { page = float32(pageFlag) } if cmd.Flags().Changed("limit") { limit = float32(limitFlag) } snapshots, res, err := apiClient.SnapshotsAPI.GetAllSnapshots(ctx).Page(page).Limit(limit).Execute() if err != nil { fmt.Printf("Error: %v\n", err) return apiclient.HandleErrorResponse(res, err) } if common.FormatFlag != "" { formattedData := common.NewFormatter(snapshots.Items) formattedData.Print() return nil } var activeOrganizationName *string if !config.IsApiKeyAuth() { name, err := common.GetActiveOrganizationName(apiClient, ctx) if err != nil { return err } activeOrganizationName = &name } snapshot.ListSnapshots(snapshots.Items, activeOrganizationName) return nil }, } func init() { common.RegisterFormatFlag(ListCmd) ListCmd.Flags().IntVarP(&pageFlag, "page", "p", 1, "Page number for pagination (starting from 1)") ListCmd.Flags().IntVarP(&limitFlag, "limit", "l", 100, "Maximum number of items per page") } ================================================ FILE: apps/cli/cmd/snapshot/push.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "context" "encoding/base64" "encoding/json" "fmt" "os" "strings" "time" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/docker" views_common "github.com/daytonaio/daytona/cli/views/common" views_util "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" "github.com/spf13/cobra" ) var PushCmd = &cobra.Command{ Use: "push [SNAPSHOT]", Short: "Push local snapshot", Long: "Push a local Docker image to Daytona. To securely build it on our infrastructure, use 'daytona snapshot build'", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() sourceImage := args[0] err := common.ValidateImageName(sourceImage) if err != nil { return err } dockerClient, err := client.NewClientWithOpts( client.FromEnv, client.WithAPIVersionNegotiation(), ) if err != nil { return fmt.Errorf("failed to create Docker client: %w", err) } defer dockerClient.Close() // Check if the image exists locally when not building if exists, err := docker.ImageExistsLocally(ctx, dockerClient, sourceImage); err != nil { return err } else if !exists { return fmt.Errorf("image '%s' not found locally. Please ensure the image exists and try again", sourceImage) } // Validate image architecture isArchAmd, err := docker.CheckAmdArchitecture(ctx, dockerClient, sourceImage) if err != nil { return fmt.Errorf("failed to check image architecture: %w", err) } if !isArchAmd { return fmt.Errorf("image '%s' is not compatible with AMD architecture", sourceImage) } apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } pushAccessRequest := apiClient.DockerRegistryAPI.GetTransientPushAccess(ctx) if regionIdFlag != "" { pushAccessRequest = pushAccessRequest.RegionId(regionIdFlag) } tokenResponse, res, err := pushAccessRequest.Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } encodedAuthConfig, err := json.Marshal(registry.AuthConfig{ Username: tokenResponse.Username, Password: tokenResponse.Secret, ServerAddress: tokenResponse.RegistryUrl, }) if err != nil { return fmt.Errorf("failed to marshal auth config: %w", err) } // Extract image name without tag and create timestamp-based tag imageName := sourceImage if colonIndex := strings.LastIndex(sourceImage, ":"); colonIndex != -1 { imageName = sourceImage[:colonIndex] } // Generate timestamp-based tag to avoid inconsistencies // 20060102150405 is the format of the timestamp (year, month, day, hour, minute, second) timestamp := time.Now().Format("20060102150405") targetImage := fmt.Sprintf("%s/%s/%s:%s", tokenResponse.RegistryUrl, tokenResponse.Project, imageName, timestamp) err = dockerClient.ImageTag(ctx, sourceImage, targetImage) if err != nil { return fmt.Errorf("failed to tag image: %w", err) } // Push image to transient registry pushReader, err := dockerClient.ImagePush(ctx, targetImage, image.PushOptions{ RegistryAuth: base64.URLEncoding.EncodeToString(encodedAuthConfig), }) if err != nil { return fmt.Errorf("failed to push image: %w", err) } defer pushReader.Close() err = jsonmessage.DisplayJSONMessagesStream(pushReader, os.Stdout, 0, true, nil) if err != nil { return err } createSnapshot := apiclient.NewCreateSnapshot(nameFlag) createSnapshot.SetImageName(targetImage) if entrypointFlag != "" { createSnapshot.SetEntrypoint(strings.Split(entrypointFlag, " ")) } // Poll until the image is really available on the registry // This is a workaround for harbor's delay in making newly created images available for { _, err := dockerClient.DistributionInspect(ctx, targetImage, base64.URLEncoding.EncodeToString(encodedAuthConfig)) if err == nil { break } time.Sleep(time.Second) } if cpuFlag != 0 { createSnapshot.SetCpu(cpuFlag) } if memoryFlag != 0 { createSnapshot.SetMemory(memoryFlag) } if diskFlag != 0 { createSnapshot.SetDisk(diskFlag) } if regionIdFlag != "" { createSnapshot.SetRegionId(regionIdFlag) } _, res, err = apiClient.SnapshotsAPI.CreateSnapshot(ctx).CreateSnapshot(*createSnapshot).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } views_common.RenderInfoMessageBold(fmt.Sprintf("Successfully pushed %s to Daytona", sourceImage)) err = views_util.WithInlineSpinner("Waiting for the snapshot to be validated", func() error { return common.AwaitSnapshotState(ctx, apiClient, nameFlag, apiclient.SNAPSHOTSTATE_ACTIVE) }) if err != nil { return err } views_common.RenderInfoMessage(fmt.Sprintf("%s Use '%s' to create a new sandbox using this snapshot", views_common.Checkmark, nameFlag)) return nil }, } var ( nameFlag string ) func init() { PushCmd.Flags().StringVarP(&entrypointFlag, "entrypoint", "e", "", "The entrypoint command for the image") PushCmd.Flags().StringVarP(&nameFlag, "name", "n", "", "Specify the Snapshot name") PushCmd.Flags().Int32Var(&cpuFlag, "cpu", 0, "CPU cores that will be allocated to the underlying sandboxes (default: 1)") PushCmd.Flags().Int32Var(&memoryFlag, "memory", 0, "Memory that will be allocated to the underlying sandboxes in GB (default: 1)") PushCmd.Flags().Int32Var(&diskFlag, "disk", 0, "Disk space that will be allocated to the underlying sandboxes in GB (default: 3)") PushCmd.Flags().StringVar(®ionIdFlag, "region", "", "ID of the region where the snapshot will be available (defaults to organization default region)") _ = PushCmd.MarkFlagRequired("name") } ================================================ FILE: apps/cli/cmd/snapshot/snapshot.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "github.com/daytonaio/daytona/cli/internal" "github.com/spf13/cobra" ) var SnapshotsCmd = &cobra.Command{ Use: "snapshot", Short: "Manage Daytona snapshots", Long: "Commands for managing Daytona snapshots", Aliases: []string{"snapshots"}, GroupID: internal.SANDBOX_GROUP, } func init() { SnapshotsCmd.AddCommand(ListCmd) SnapshotsCmd.AddCommand(CreateCmd) SnapshotsCmd.AddCommand(PushCmd) SnapshotsCmd.AddCommand(DeleteCmd) } ================================================ FILE: apps/cli/cmd/version.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package cmd import ( "fmt" "github.com/daytonaio/daytona/cli/internal" "github.com/spf13/cobra" ) var VersionCmd = &cobra.Command{ Use: "version", Short: "Print the version number", RunE: func(cmd *cobra.Command, args []string) error { fmt.Println("Daytona CLI version", internal.Version) return nil }, } ================================================ FILE: apps/cli/cmd/volume/create.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "context" "fmt" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" view_common "github.com/daytonaio/daytona/cli/views/common" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/spf13/cobra" ) var CreateCmd = &cobra.Command{ Use: "create [NAME]", Short: "Create a volume", Args: cobra.ExactArgs(1), Aliases: common.GetAliases("create"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient_cli.GetApiClient(nil, nil) if err != nil { return err } volume, res, err := apiClient.VolumesAPI.CreateVolume(ctx).CreateVolume(apiclient.CreateVolume{ Name: args[0], }).Execute() if err != nil { return apiclient_cli.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Volume %s successfully created", volume.Name)) return nil }, } var sizeFlag int32 func init() { CreateCmd.Flags().Int32VarP(&sizeFlag, "size", "s", 10, "Size of the volume in GB") } ================================================ FILE: apps/cli/cmd/volume/delete.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" view_common "github.com/daytonaio/daytona/cli/views/common" "github.com/spf13/cobra" ) var DeleteCmd = &cobra.Command{ Use: "delete [VOLUME_ID]", Short: "Delete a volume", Args: cobra.ExactArgs(1), Aliases: common.GetAliases("delete"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } res, err := apiClient.VolumesAPI.DeleteVolume(ctx, args[0]).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } view_common.RenderInfoMessageBold(fmt.Sprintf("Volume %s deleted", args[0])) return nil }, } ================================================ FILE: apps/cli/cmd/volume/get.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "context" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/views/volume" "github.com/spf13/cobra" ) var GetCmd = &cobra.Command{ Use: "get [VOLUME_ID]", Short: "Get volume details", Args: cobra.ExactArgs(1), Aliases: common.GetAliases("get"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } vol, res, err := apiClient.VolumesAPI.GetVolume(ctx, args[0]).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } if common.FormatFlag != "" { formattedData := common.NewFormatter(vol) formattedData.Print() return nil } volume.RenderInfo(vol, false) return nil }, } func init() { common.RegisterFormatFlag(GetCmd) } ================================================ FILE: apps/cli/cmd/volume/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "context" "github.com/daytonaio/daytona/cli/apiclient" "github.com/daytonaio/daytona/cli/cmd/common" "github.com/daytonaio/daytona/cli/config" "github.com/daytonaio/daytona/cli/views/volume" "github.com/spf13/cobra" ) var ListCmd = &cobra.Command{ Use: "list", Short: "List all volumes", Args: cobra.NoArgs, Aliases: common.GetAliases("list"), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() apiClient, err := apiclient.GetApiClient(nil, nil) if err != nil { return err } volumes, res, err := apiClient.VolumesAPI.ListVolumes(ctx).Execute() if err != nil { return apiclient.HandleErrorResponse(res, err) } if common.FormatFlag != "" { formattedData := common.NewFormatter(volumes) formattedData.Print() return nil } var activeOrganizationName *string if !config.IsApiKeyAuth() { name, err := common.GetActiveOrganizationName(apiClient, ctx) if err != nil { return err } activeOrganizationName = &name } volume.ListVolumes(volumes, activeOrganizationName) return nil }, } func init() { common.RegisterFormatFlag(ListCmd) } ================================================ FILE: apps/cli/cmd/volume/volume.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "github.com/daytonaio/daytona/cli/internal" "github.com/spf13/cobra" ) var VolumeCmd = &cobra.Command{ Use: "volume", Short: "Manage Daytona volumes", Long: "Commands for managing Daytona volumes", Aliases: []string{"volumes"}, GroupID: internal.SANDBOX_GROUP, } func init() { VolumeCmd.AddCommand(ListCmd) VolumeCmd.AddCommand(CreateCmd) VolumeCmd.AddCommand(GetCmd) VolumeCmd.AddCommand(DeleteCmd) } ================================================ FILE: apps/cli/config/config.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package config import ( "encoding/json" "errors" "fmt" "os" "path/filepath" "time" "github.com/daytonaio/daytona/cli/cmd" "github.com/daytonaio/daytona/cli/internal" ) const DAYTONA_API_URL_ENV_VAR = "DAYTONA_API_URL" const DAYTONA_API_KEY_ENV_VAR = "DAYTONA_API_KEY" type Config struct { ActiveProfileId string `json:"activeProfile"` Profiles []Profile `json:"profiles"` } type Profile struct { Id string `json:"id"` Name string `json:"name"` Api ServerApi `json:"api"` ActiveOrganizationId *string `json:"activeOrganizationId"` ToolboxProxyUrls map[string]string `json:"toolboxProxyUrls,omitempty"` // Cache proxy URLs by region } type ServerApi struct { Url string `json:"url"` Key *string `json:"key"` Token *Token `json:"token"` } type Token struct { AccessToken string `json:"accessToken"` RefreshToken string `json:"refreshToken"` ExpiresAt time.Time `json:"expiresAt"` } func GetConfig() (*Config, error) { configFilePath, err := getConfigPath() if err != nil { return nil, err } _, err = os.Stat(configFilePath) if os.IsNotExist(err) { // Setup autocompletion when adding initial config _ = cmd.DetectShellAndSetupAutocompletion(cmd.AutoCompleteCmd.Root()) config := &Config{} return config, config.Save() } if err != nil { return nil, err } var c Config configContent, err := os.ReadFile(configFilePath) if err != nil { return nil, err } err = json.Unmarshal(configContent, &c) if err != nil { return nil, err } return &c, nil } var ErrNoProfilesFound = errors.New("no profiles found. Run `daytona login` to authenticate") func (c *Config) GetActiveProfile() (Profile, error) { apiUrl := os.Getenv(DAYTONA_API_URL_ENV_VAR) apiKey := os.Getenv(DAYTONA_API_KEY_ENV_VAR) if apiUrl != "" && apiKey != "" { return Profile{ Id: "env", Api: ServerApi{ Url: apiUrl, Key: &apiKey, }, }, nil } if len(c.Profiles) == 0 { return Profile{}, ErrNoProfilesFound } for _, profile := range c.Profiles { if profile.Id == c.ActiveProfileId { return profile, nil } } return Profile{}, ErrNoActiveProfile } var ErrNoActiveProfile = errors.New("no active profile found. Run `daytona login` to authenticate") var ErrNoActiveOrganization = errors.New("no active organization found. Run `daytona organization use` to select an organization") func (c *Config) Save() error { configFilePath, err := getConfigPath() if err != nil { return err } err = os.MkdirAll(filepath.Dir(configFilePath), 0755) if err != nil { return err } configContent, err := json.MarshalIndent(c, "", " ") if err != nil { return err } return os.WriteFile(configFilePath, configContent, 0644) } func (c *Config) AddProfile(profile Profile) error { c.Profiles = append(c.Profiles, profile) c.ActiveProfileId = profile.Id return c.Save() } func (c *Config) EditProfile(profile Profile) error { for i, p := range c.Profiles { if p.Id == profile.Id { c.Profiles[i] = profile return c.Save() } } return fmt.Errorf("profile with id %s not found", profile.Id) } func (c *Config) RemoveProfile(profileId string) error { if c.ActiveProfileId == profileId { return errors.New("cannot remove active profile") } var profiles []Profile for _, profile := range c.Profiles { if profile.Id != profileId { profiles = append(profiles, profile) } } c.Profiles = profiles return c.Save() } func (c *Config) GetProfile(profileId string) (Profile, error) { for _, profile := range c.Profiles { if profile.Id == profileId { return profile, nil } } return Profile{}, errors.New("profile not found") } func getConfigPath() (string, error) { configDir, err := GetConfigDir() if err != nil { return "", err } return filepath.Join(configDir, "config.json"), nil } func GetConfigDir() (string, error) { daytonaConfigDir := os.Getenv("DAYTONA_CONFIG_DIR") if daytonaConfigDir != "" { return daytonaConfigDir, nil } userConfigDir, err := os.UserConfigDir() if err != nil { return "", err } return filepath.Join(userConfigDir, "daytona"), nil } func DeleteConfigDir() error { configDir, err := GetConfigDir() if err != nil { return err } return os.RemoveAll(configDir) } func GetActiveOrganizationId() (string, error) { c, err := GetConfig() if err != nil { return "", err } activeProfile, err := c.GetActiveProfile() if err != nil { return "", err } if activeProfile.ActiveOrganizationId == nil { return "", ErrNoActiveOrganization } return *activeProfile.ActiveOrganizationId, nil } func IsApiKeyAuth() bool { c, err := GetConfig() if err != nil { return false } activeProfile, err := c.GetActiveProfile() if err != nil { return false } return activeProfile.Api.Key != nil && activeProfile.Api.Token == nil } func GetAuth0Domain() string { auth0Domain := os.Getenv("DAYTONA_AUTH0_DOMAIN") if auth0Domain == "" { auth0Domain = internal.Auth0Domain } return auth0Domain } func GetAuth0ClientId() string { auth0ClientId := os.Getenv("DAYTONA_AUTH0_CLIENT_ID") if auth0ClientId == "" { auth0ClientId = internal.Auth0ClientId } return auth0ClientId } func GetAuth0ClientSecret() string { auth0ClientSecret := os.Getenv("DAYTONA_AUTH0_CLIENT_SECRET") if auth0ClientSecret == "" { auth0ClientSecret = internal.Auth0ClientSecret } return auth0ClientSecret } func GetAuth0CallbackPort() string { auth0CallbackPort := os.Getenv("DAYTONA_AUTH0_CALLBACK_PORT") if auth0CallbackPort == "" { auth0CallbackPort = internal.Auth0CallbackPort } return auth0CallbackPort } func GetAuth0Audience() string { auth0Audience := os.Getenv("DAYTONA_AUTH0_AUDIENCE") if auth0Audience == "" { auth0Audience = internal.Auth0Audience } return auth0Audience } func GetDaytonaApiUrl() string { daytonaApiUrl := os.Getenv("DAYTONA_API_URL") if daytonaApiUrl == "" { daytonaApiUrl = internal.DaytonaApiUrl } return daytonaApiUrl } func GetToolboxProxyUrl(region string) (string, error) { c, err := GetConfig() if err != nil { return "", err } activeProfile, err := c.GetActiveProfile() if err != nil { return "", err } if activeProfile.ToolboxProxyUrls == nil { return "", nil } return activeProfile.ToolboxProxyUrls[region], nil } func SetToolboxProxyUrl(region, url string) error { c, err := GetConfig() if err != nil { return err } // Find and update the active profile for i, profile := range c.Profiles { if profile.Id == c.ActiveProfileId { if c.Profiles[i].ToolboxProxyUrls == nil { c.Profiles[i].ToolboxProxyUrls = make(map[string]string) } c.Profiles[i].ToolboxProxyUrls[region] = url return c.Save() } } return ErrNoActiveProfile } ================================================ FILE: apps/cli/docker/build.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package docker import ( "context" "fmt" "slices" "github.com/docker/docker/api/types/image" "github.com/docker/docker/client" ) func ImageExistsLocally(ctx context.Context, dockerClient *client.Client, imageName string) (bool, error) { images, err := dockerClient.ImageList(ctx, image.ListOptions{}) if err != nil { return false, fmt.Errorf("failed to list images: %w", err) } for _, image := range images { for _, tag := range image.RepoTags { if tag == imageName { return true, nil } } } return false, nil } func CheckAmdArchitecture(ctx context.Context, dockerClient *client.Client, imageName string) (bool, error) { inspect, err := dockerClient.ImageInspect(ctx, imageName) if err != nil { return false, fmt.Errorf("failed to inspect image: %w", err) } x64Architectures := []string{"amd64", "x86_64"} if slices.Contains(x64Architectures, inspect.Architecture) { return true, nil } return false, nil } ================================================ FILE: apps/cli/docs/daytona.md ================================================ ## daytona Daytona CLI ### Synopsis Command line interface for Daytona Sandboxes ``` daytona [flags] ``` ### Options ``` --help help for daytona -v, --version Display the version of Daytona ``` ### SEE ALSO * [daytona archive](daytona_archive.md) - Archive a sandbox * [daytona autocomplete](daytona_autocomplete.md) - Adds a completion script for your shell environment * [daytona create](daytona_create.md) - Create a new sandbox * [daytona delete](daytona_delete.md) - Delete a sandbox * [daytona docs](daytona_docs.md) - Opens the Daytona documentation in your default browser. * [daytona exec](daytona_exec.md) - Execute a command in a sandbox * [daytona info](daytona_info.md) - Get sandbox info * [daytona list](daytona_list.md) - List sandboxes * [daytona login](daytona_login.md) - Log in to Daytona * [daytona logout](daytona_logout.md) - Logout from Daytona * [daytona mcp](daytona_mcp.md) - Manage Daytona MCP Server * [daytona organization](daytona_organization.md) - Manage Daytona organizations * [daytona preview-url](daytona_preview-url.md) - Get signed preview URL for a sandbox port * [daytona snapshot](daytona_snapshot.md) - Manage Daytona snapshots * [daytona ssh](daytona_ssh.md) - SSH into a sandbox * [daytona start](daytona_start.md) - Start a sandbox * [daytona stop](daytona_stop.md) - Stop a sandbox * [daytona version](daytona_version.md) - Print the version number * [daytona volume](daytona_volume.md) - Manage Daytona volumes ================================================ FILE: apps/cli/docs/daytona_archive.md ================================================ ## daytona archive Archive a sandbox ``` daytona archive [SANDBOX_ID] | [SANDBOX_NAME] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_autocomplete.md ================================================ ## daytona autocomplete Adds a completion script for your shell environment ``` daytona autocomplete [bash|zsh|fish|powershell] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_create.md ================================================ ## daytona create Create a new sandbox ``` daytona create [flags] ``` ### Options ``` --auto-archive int32 Auto-archive interval in minutes (0 means the maximum interval will be used) (default 10080) --auto-delete int32 Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping) (default -1) --auto-stop int32 Auto-stop interval in minutes (0 means disabled) (default 15) --class string Sandbox class type (small, medium, large) -c, --context stringArray Files or directories to include in the build context (can be specified multiple times) --cpu int32 CPU cores allocated to the sandbox --disk int32 Disk space allocated to the sandbox in GB -f, --dockerfile string Path to Dockerfile for Sandbox snapshot -e, --env stringArray Environment variables (format: KEY=VALUE) --gpu int32 GPU units allocated to the sandbox -l, --label stringArray Labels (format: KEY=VALUE) --memory int32 Memory allocated to the sandbox in MB --name string Name of the sandbox --network-allow-list string Comma-separated list of allowed CIDR network addresses for the sandbox --network-block-all Whether to block all network access for the sandbox --public Make sandbox publicly accessible --snapshot string Snapshot to use for the sandbox --target string Target region (eu, us) --user string User associated with the sandbox -v, --volume stringArray Volumes to mount (format: VOLUME_NAME:MOUNT_PATH) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_delete.md ================================================ ## daytona delete Delete a sandbox ``` daytona delete [SANDBOX_ID] | [SANDBOX_NAME] [flags] ``` ### Options ``` -a, --all Delete all sandboxes ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_docs.md ================================================ ## daytona docs Opens the Daytona documentation in your default browser. ``` daytona docs [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_exec.md ================================================ ## daytona exec Execute a command in a sandbox ### Synopsis Execute a command in a running sandbox ``` daytona exec [SANDBOX_ID | SANDBOX_NAME] -- [COMMAND] [ARGS...] [flags] ``` ### Options ``` --cwd string Working directory for command execution --timeout int Command timeout in seconds (0 for no timeout) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_info.md ================================================ ## daytona info Get sandbox info ``` daytona info [SANDBOX_ID] | [SANDBOX_NAME] [flags] ``` ### Options ``` -f, --format string Output format. Must be one of (yaml, json) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_list.md ================================================ ## daytona list List sandboxes ``` daytona list [flags] ``` ### Options ``` -f, --format string Output format. Must be one of (yaml, json) -l, --limit int Maximum number of items per page (default 100) -p, --page int Page number for pagination (starting from 1) (default 1) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_login.md ================================================ ## daytona login Log in to Daytona ``` daytona login [flags] ``` ### Options ``` --api-key string API key to use for authentication ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_logout.md ================================================ ## daytona logout Logout from Daytona ``` daytona logout [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_mcp.md ================================================ ## daytona mcp Manage Daytona MCP Server ### Synopsis Commands for managing Daytona MCP Server ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI * [daytona mcp config](daytona_mcp_config.md) - Outputs JSON configuration for Daytona MCP Server * [daytona mcp init](daytona_mcp_init.md) - Initialize Daytona MCP Server with an agent (currently supported: claude, windsurf, cursor) * [daytona mcp start](daytona_mcp_start.md) - Start Daytona MCP Server ================================================ FILE: apps/cli/docs/daytona_mcp_config.md ================================================ ## daytona mcp config Outputs JSON configuration for Daytona MCP Server ``` daytona mcp config [AGENT_NAME] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona mcp](daytona_mcp.md) - Manage Daytona MCP Server ================================================ FILE: apps/cli/docs/daytona_mcp_init.md ================================================ ## daytona mcp init Initialize Daytona MCP Server with an agent (currently supported: claude, windsurf, cursor) ``` daytona mcp init [AGENT_NAME] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona mcp](daytona_mcp.md) - Manage Daytona MCP Server ================================================ FILE: apps/cli/docs/daytona_mcp_start.md ================================================ ## daytona mcp start Start Daytona MCP Server ``` daytona mcp start [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona mcp](daytona_mcp.md) - Manage Daytona MCP Server ================================================ FILE: apps/cli/docs/daytona_organization.md ================================================ ## daytona organization Manage Daytona organizations ### Synopsis Commands for managing Daytona organizations ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI * [daytona organization create](daytona_organization_create.md) - Create a new organization and set it as active * [daytona organization delete](daytona_organization_delete.md) - Delete an organization * [daytona organization list](daytona_organization_list.md) - List all organizations * [daytona organization use](daytona_organization_use.md) - Set active organization ================================================ FILE: apps/cli/docs/daytona_organization_create.md ================================================ ## daytona organization create Create a new organization and set it as active ``` daytona organization create [ORGANIZATION_NAME] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona organization](daytona_organization.md) - Manage Daytona organizations ================================================ FILE: apps/cli/docs/daytona_organization_delete.md ================================================ ## daytona organization delete Delete an organization ``` daytona organization delete [ORGANIZATION] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona organization](daytona_organization.md) - Manage Daytona organizations ================================================ FILE: apps/cli/docs/daytona_organization_list.md ================================================ ## daytona organization list List all organizations ``` daytona organization list [flags] ``` ### Options ``` -f, --format string Output format. Must be one of (yaml, json) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona organization](daytona_organization.md) - Manage Daytona organizations ================================================ FILE: apps/cli/docs/daytona_organization_use.md ================================================ ## daytona organization use Set active organization ``` daytona organization use [ORGANIZATION] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona organization](daytona_organization.md) - Manage Daytona organizations ================================================ FILE: apps/cli/docs/daytona_preview-url.md ================================================ ## daytona preview-url Get signed preview URL for a sandbox port ``` daytona preview-url [SANDBOX_ID | SANDBOX_NAME] [flags] ``` ### Options ``` --expires int32 URL expiration time in seconds (default 3600) -p, --port int32 Port number to get preview URL for (required) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_snapshot.md ================================================ ## daytona snapshot Manage Daytona snapshots ### Synopsis Commands for managing Daytona snapshots ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI * [daytona snapshot create](daytona_snapshot_create.md) - Create a snapshot * [daytona snapshot delete](daytona_snapshot_delete.md) - Delete a snapshot * [daytona snapshot list](daytona_snapshot_list.md) - List all snapshots * [daytona snapshot push](daytona_snapshot_push.md) - Push local snapshot ================================================ FILE: apps/cli/docs/daytona_snapshot_create.md ================================================ ## daytona snapshot create Create a snapshot ``` daytona snapshot create [SNAPSHOT] [flags] ``` ### Options ``` -c, --context stringArray Files or directories to include in the build context (can be specified multiple times). If not provided, context will be automatically determined from COPY/ADD commands in the Dockerfile --cpu int32 CPU cores that will be allocated to the underlying sandboxes (default: 1) --disk int32 Disk space that will be allocated to the underlying sandboxes in GB (default: 3) -f, --dockerfile string Path to Dockerfile to build -e, --entrypoint string The entrypoint command for the snapshot -i, --image string The image name for the snapshot --memory int32 Memory that will be allocated to the underlying sandboxes in GB (default: 1) --region string ID of the region where the snapshot will be available (defaults to organization default region) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona snapshot](daytona_snapshot.md) - Manage Daytona snapshots ================================================ FILE: apps/cli/docs/daytona_snapshot_delete.md ================================================ ## daytona snapshot delete Delete a snapshot ``` daytona snapshot delete [SNAPSHOT_ID] [flags] ``` ### Options ``` -a, --all Delete all snapshots ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona snapshot](daytona_snapshot.md) - Manage Daytona snapshots ================================================ FILE: apps/cli/docs/daytona_snapshot_list.md ================================================ ## daytona snapshot list List all snapshots ### Synopsis List all available Daytona snapshots ``` daytona snapshot list [flags] ``` ### Options ``` -f, --format string Output format. Must be one of (yaml, json) -l, --limit int Maximum number of items per page (default 100) -p, --page int Page number for pagination (starting from 1) (default 1) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona snapshot](daytona_snapshot.md) - Manage Daytona snapshots ================================================ FILE: apps/cli/docs/daytona_snapshot_push.md ================================================ ## daytona snapshot push Push local snapshot ### Synopsis Push a local Docker image to Daytona. To securely build it on our infrastructure, use 'daytona snapshot build' ``` daytona snapshot push [SNAPSHOT] [flags] ``` ### Options ``` --cpu int32 CPU cores that will be allocated to the underlying sandboxes (default: 1) --disk int32 Disk space that will be allocated to the underlying sandboxes in GB (default: 3) -e, --entrypoint string The entrypoint command for the image --memory int32 Memory that will be allocated to the underlying sandboxes in GB (default: 1) -n, --name string Specify the Snapshot name --region string ID of the region where the snapshot will be available (defaults to organization default region) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona snapshot](daytona_snapshot.md) - Manage Daytona snapshots ================================================ FILE: apps/cli/docs/daytona_ssh.md ================================================ ## daytona ssh SSH into a sandbox ### Synopsis Establish an SSH connection to a running sandbox ``` daytona ssh [SANDBOX_ID] | [SANDBOX_NAME] [flags] ``` ### Options ``` --expires int SSH access token expiration time in minutes (defaults to 24 hours) (default 1440) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_start.md ================================================ ## daytona start Start a sandbox ``` daytona start [SANDBOX_ID] | [SANDBOX_NAME] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_stop.md ================================================ ## daytona stop Stop a sandbox ``` daytona stop [SANDBOX_ID] | [SANDBOX_NAME] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_version.md ================================================ ## daytona version Print the version number ``` daytona version [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI ================================================ FILE: apps/cli/docs/daytona_volume.md ================================================ ## daytona volume Manage Daytona volumes ### Synopsis Commands for managing Daytona volumes ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona](daytona.md) - Daytona CLI * [daytona volume create](daytona_volume_create.md) - Create a volume * [daytona volume delete](daytona_volume_delete.md) - Delete a volume * [daytona volume get](daytona_volume_get.md) - Get volume details * [daytona volume list](daytona_volume_list.md) - List all volumes ================================================ FILE: apps/cli/docs/daytona_volume_create.md ================================================ ## daytona volume create Create a volume ``` daytona volume create [NAME] [flags] ``` ### Options ``` -s, --size int32 Size of the volume in GB (default 10) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona volume](daytona_volume.md) - Manage Daytona volumes ================================================ FILE: apps/cli/docs/daytona_volume_delete.md ================================================ ## daytona volume delete Delete a volume ``` daytona volume delete [VOLUME_ID] [flags] ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona volume](daytona_volume.md) - Manage Daytona volumes ================================================ FILE: apps/cli/docs/daytona_volume_get.md ================================================ ## daytona volume get Get volume details ``` daytona volume get [VOLUME_ID] [flags] ``` ### Options ``` -f, --format string Output format. Must be one of (yaml, json) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona volume](daytona_volume.md) - Manage Daytona volumes ================================================ FILE: apps/cli/docs/daytona_volume_list.md ================================================ ## daytona volume list List all volumes ``` daytona volume list [flags] ``` ### Options ``` -f, --format string Output format. Must be one of (yaml, json) ``` ### Options inherited from parent commands ``` --help help for daytona ``` ### SEE ALSO * [daytona volume](daytona_volume.md) - Manage Daytona volumes ================================================ FILE: apps/cli/go.mod ================================================ module github.com/daytonaio/daytona/cli go 1.25.4 require ( github.com/charmbracelet/bubbletea v1.1.0 github.com/daytonaio/daytona/libs/api-client-go v0.153.0 github.com/docker/docker v28.5.2+incompatible github.com/mark3labs/mcp-go v0.32.0 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.10.1 golang.org/x/oauth2 v0.34.0 ) require ( github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/atotto/clipboard v0.1.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/catppuccin/go v0.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charmbracelet/x/ansi v0.4.2 // indirect github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 // indirect github.com/charmbracelet/x/term v0.2.0 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/creack/pty v1.1.23 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/google/uuid v1.6.0 // indirect github.com/klauspost/compress v1.18.1 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/minio/crc64nvme v1.0.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/moby/term v0.5.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.15.3-0.20240618155329-98d742f6907a // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sahilm/fuzzy v0.1.1 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect golang.org/x/crypto v0.47.0 // indirect golang.org/x/net v0.49.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.40.0 // indirect golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.10.0 // indirect google.golang.org/grpc v1.79.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect ) require ( github.com/charmbracelet/bubbles v0.20.0 github.com/charmbracelet/huh v0.6.0 github.com/charmbracelet/lipgloss v1.0.0 github.com/coreos/go-oidc/v3 v3.12.0 github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.91 github.com/spf13/pflag v1.0.9 // indirect golang.org/x/term v0.39.0 gopkg.in/yaml.v2 v2.4.0 ) ================================================ FILE: apps/cli/go.sum ================================================ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= github.com/catppuccin/go v0.2.0 h1:ktBeIrIP42b/8FGiScP9sgrWOss3lw0Z5SktRoithGA= github.com/catppuccin/go v0.2.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c= github.com/charmbracelet/bubbletea v1.1.0/go.mod h1:9Ogk0HrdbHolIKHdjfFpyXJmiCzGwy+FesYkZr7hYU4= github.com/charmbracelet/huh v0.6.0 h1:mZM8VvZGuE0hoDXq6XLxRtgfWyTI3b2jZNKh0xWmax8= github.com/charmbracelet/huh v0.6.0/go.mod h1:GGNKeWCeNzKpEOh/OJD8WBwTQjV3prFAtQPpLv+AVwU= github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg= github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo= github.com/charmbracelet/x/ansi v0.4.2 h1:0JM6Aj/g/KC154/gOP4vfxun0ff6itogDYk41kof+qk= github.com/charmbracelet/x/ansi v0.4.2/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw= github.com/charmbracelet/x/exp/golden v0.0.0-20240815200342-61de596daa2b h1:MnAMdlwSltxJyULnrYbkZpp4k58Co7Tah3ciKhSNo0Q= github.com/charmbracelet/x/exp/golden v0.0.0-20240815200342-61de596daa2b/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 h1:qko3AQ4gK1MTS/de7F5hPGx6/k1u0w4TeYmBFwzYVP4= github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0/go.mod h1:pBhA0ybfXv6hDjQUZ7hk1lVxBiUbupdw5R31yPUViVQ= github.com/charmbracelet/x/term v0.2.0 h1:cNB9Ot9q8I711MyZ7myUR5HFWL/lc3OpU8jZ4hwm0x0= github.com/charmbracelet/x/term v0.2.0/go.mod h1:GVxgxAbjUrmpvIINHIQnJJKpMlHiZ4cktEQCN6GWyF0= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo= github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daytonaio/daytona/libs/api-client-go v0.153.0 h1:OGCzMcAR9RsrPToFuKJFUdKIcynnYtQCfvGn67Z0A2s= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mark3labs/mcp-go v0.32.0 h1:fgwmbfL2gbd67obg57OfV2Dnrhs1HtSdlY/i5fn7MU8= github.com/mark3labs/mcp-go v0.32.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.91 h1:tWLZnEfo3OZl5PoXQwcwTAPNNrjyWwOh6cbZitW5JQc= github.com/minio/minio-go/v7 v7.0.91/go.mod h1:uvMUcGrpgeSAAI6+sD3818508nUyMULw94j2Nxku/Go= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/muesli/termenv v0.15.3-0.20240618155329-98d742f6907a h1:2MaM6YC3mGu54x+RKAA6JiFFHlHDY1UbkxqppT7wYOg= github.com/muesli/termenv v0.15.3-0.20240618155329-98d742f6907a/go.mod h1:hxSnBBYLK21Vtq/PHd0S2FYCxBXzBua8ov5s1RobyRQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= ================================================ FILE: apps/cli/hack/build.sh ================================================ #!/bin/bash # Copyright 2025 Daytona Platforms Inc. # SPDX-License-Identifier: AGPL-3.0 # Exit on error set -e # Get absolute path of script directory SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)" DIST_DIR="$(cd "${SCRIPT_DIR}/../../.." && pwd)" # Environment file precedence: # 1. DAYTONA_ENV_FILE environment variable if set # 2. .env file in CLI directory # 3. .env file in project root # 4. Default values load_env_file() { local env_file="$1" if [ -f "$env_file" ]; then source "$env_file" return 0 fi return 1 } # If --skip-env-file is passed, skip loading env files for arg in "$@"; do if [ "$arg" == "--skip-env-file" ]; then echo "Skipping loading of environment files" SKIP_ENV_FILE=true break fi done if [ "$SKIP_ENV_FILE" != "true" ]; then echo "Loading environment files" # Try loading environment files in order of precedence if [ -n "$DAYTONA_ENV_FILE" ]; then if ! load_env_file "$DAYTONA_ENV_FILE"; then echo "Warning: Environment file specified by DAYTONA_ENV_FILE ($DAYTONA_ENV_FILE) not found" fi elif load_env_file "${SCRIPT_DIR}/../.env.local"; then : # Successfully loaded CLI .env elif load_env_file "${SCRIPT_DIR}/../.env"; then : # Successfully loaded CLI .env elif load_env_file "${PROJECT_ROOT}/.env.local"; then : # Successfully loaded root .env elif load_env_file "${PROJECT_ROOT}/.env"; then : # Successfully loaded root .env else echo "Note: No .env file found, using default values" fi fi # Set default values DAYTONA_VERSION=${VERSION:-v0.0.0-dev} GOOS=${GOOS:-linux} GOARCH=${GOARCH:-amd64} CGO_ENABLED=${CGO_ENABLED:-0} # Validate required variables REQUIRED_VARS=( "DAYTONA_API_URL" "DAYTONA_AUTH0_DOMAIN" "DAYTONA_AUTH0_CLIENT_ID" "DAYTONA_AUTH0_CALLBACK_PORT" "DAYTONA_AUTH0_AUDIENCE" ) MISSING_VARS=() for var in "${REQUIRED_VARS[@]}"; do if [ -z "${!var}" ]; then MISSING_VARS+=("$var") fi done if [ ${#MISSING_VARS[@]} -ne 0 ]; then echo "Error: Missing required environment variables:" printf '%s\n' "${MISSING_VARS[@]}" exit 1 fi # Create build directory if it doesn't exist mkdir -p "${DIST_DIR}/dist/apps/cli" # Set output filename with .exe extension for Windows OUTPUT_FILE="daytona-${GOOS}-${GOARCH}" if [ "$GOOS" == "windows" ]; then OUTPUT_FILE="${OUTPUT_FILE}.exe" fi # Build the binary echo "Building Daytona CLI with version: $DAYTONA_VERSION" go build \ -ldflags "-X 'github.com/daytonaio/daytona/cli/internal.Version=${DAYTONA_VERSION}' \ -X 'github.com/daytonaio/daytona/cli/internal.DaytonaApiUrl=${DAYTONA_API_URL}' \ -X 'github.com/daytonaio/daytona/cli/internal.Auth0Domain=${DAYTONA_AUTH0_DOMAIN}' \ -X 'github.com/daytonaio/daytona/cli/internal.Auth0ClientId=${DAYTONA_AUTH0_CLIENT_ID}' \ -X 'github.com/daytonaio/daytona/cli/internal.Auth0ClientSecret=${DAYTONA_AUTH0_CLIENT_SECRET}' \ -X 'github.com/daytonaio/daytona/cli/internal.Auth0CallbackPort=${DAYTONA_AUTH0_CALLBACK_PORT}' \ -X 'github.com/daytonaio/daytona/cli/internal.Auth0Audience=${DAYTONA_AUTH0_AUDIENCE}'" \ -o "${DIST_DIR}/dist/apps/cli/${OUTPUT_FILE}" main.go echo "Build complete: ${DIST_DIR}/dist/apps/cli/${OUTPUT_FILE}" ================================================ FILE: apps/cli/hack/docs/daytona.yaml ================================================ name: daytona synopsis: Daytona CLI description: Command line interface for Daytona Sandboxes usage: daytona [flags] options: - name: help default_value: 'false' usage: help for daytona - name: version shorthand: v default_value: 'false' usage: Display the version of Daytona see_also: - daytona archive - Archive a sandbox - daytona autocomplete - Adds a completion script for your shell environment - daytona create - Create a new sandbox - daytona delete - Delete a sandbox - daytona docs - Opens the Daytona documentation in your default browser. - daytona exec - Execute a command in a sandbox - daytona info - Get sandbox info - daytona list - List sandboxes - daytona login - Log in to Daytona - daytona logout - Logout from Daytona - daytona mcp - Manage Daytona MCP Server - daytona organization - Manage Daytona organizations - daytona preview-url - Get signed preview URL for a sandbox port - daytona snapshot - Manage Daytona snapshots - daytona ssh - SSH into a sandbox - daytona start - Start a sandbox - daytona stop - Stop a sandbox - daytona version - Print the version number - daytona volume - Manage Daytona volumes ================================================ FILE: apps/cli/hack/docs/daytona_archive.yaml ================================================ name: daytona archive synopsis: Archive a sandbox usage: daytona archive [SANDBOX_ID] | [SANDBOX_NAME] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_autocomplete.yaml ================================================ name: daytona autocomplete synopsis: Adds a completion script for your shell environment usage: daytona autocomplete [bash|zsh|fish|powershell] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_create.yaml ================================================ name: daytona create synopsis: Create a new sandbox usage: daytona create [flags] options: - name: auto-archive default_value: '10080' usage: | Auto-archive interval in minutes (0 means the maximum interval will be used) - name: auto-delete default_value: '-1' usage: | Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping) - name: auto-stop default_value: '15' usage: Auto-stop interval in minutes (0 means disabled) - name: class usage: Sandbox class type (small, medium, large) - name: context shorthand: c default_value: '[]' usage: | Files or directories to include in the build context (can be specified multiple times) - name: cpu default_value: '0' usage: CPU cores allocated to the sandbox - name: disk default_value: '0' usage: Disk space allocated to the sandbox in GB - name: dockerfile shorthand: f usage: Path to Dockerfile for Sandbox snapshot - name: env shorthand: e default_value: '[]' usage: 'Environment variables (format: KEY=VALUE)' - name: gpu default_value: '0' usage: GPU units allocated to the sandbox - name: label shorthand: l default_value: '[]' usage: 'Labels (format: KEY=VALUE)' - name: memory default_value: '0' usage: Memory allocated to the sandbox in MB - name: name usage: Name of the sandbox - name: network-allow-list usage: | Comma-separated list of allowed CIDR network addresses for the sandbox - name: network-block-all default_value: 'false' usage: Whether to block all network access for the sandbox - name: public default_value: 'false' usage: Make sandbox publicly accessible - name: snapshot usage: Snapshot to use for the sandbox - name: target usage: Target region (eu, us) - name: user usage: User associated with the sandbox - name: volume shorthand: v default_value: '[]' usage: 'Volumes to mount (format: VOLUME_NAME:MOUNT_PATH)' inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_delete.yaml ================================================ name: daytona delete synopsis: Delete a sandbox usage: daytona delete [SANDBOX_ID] | [SANDBOX_NAME] [flags] options: - name: all shorthand: a default_value: 'false' usage: Delete all sandboxes inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_docs.yaml ================================================ name: daytona docs synopsis: Opens the Daytona documentation in your default browser. usage: daytona docs [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_exec.yaml ================================================ name: daytona exec synopsis: Execute a command in a sandbox description: Execute a command in a running sandbox usage: daytona exec [SANDBOX_ID | SANDBOX_NAME] -- [COMMAND] [ARGS...] [flags] options: - name: cwd usage: Working directory for command execution - name: timeout default_value: '0' usage: Command timeout in seconds (0 for no timeout) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_info.yaml ================================================ name: daytona info synopsis: Get sandbox info usage: daytona info [SANDBOX_ID] | [SANDBOX_NAME] [flags] options: - name: format shorthand: f usage: Output format. Must be one of (yaml, json) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_list.yaml ================================================ name: daytona list synopsis: List sandboxes usage: daytona list [flags] options: - name: format shorthand: f usage: Output format. Must be one of (yaml, json) - name: limit shorthand: l default_value: '100' usage: Maximum number of items per page - name: page shorthand: p default_value: '1' usage: Page number for pagination (starting from 1) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_login.yaml ================================================ name: daytona login synopsis: Log in to Daytona usage: daytona login [flags] options: - name: api-key usage: API key to use for authentication inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_logout.yaml ================================================ name: daytona logout synopsis: Logout from Daytona usage: daytona logout [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_mcp.yaml ================================================ name: daytona mcp synopsis: Manage Daytona MCP Server description: Commands for managing Daytona MCP Server inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI - daytona mcp config - Outputs JSON configuration for Daytona MCP Server - 'daytona mcp init - Initialize Daytona MCP Server with an agent (currently supported: claude, windsurf, cursor)' - daytona mcp start - Start Daytona MCP Server ================================================ FILE: apps/cli/hack/docs/daytona_mcp_config.yaml ================================================ name: daytona mcp config synopsis: Outputs JSON configuration for Daytona MCP Server usage: daytona mcp config [AGENT_NAME] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona mcp - Manage Daytona MCP Server ================================================ FILE: apps/cli/hack/docs/daytona_mcp_init.yaml ================================================ name: daytona mcp init synopsis: | Initialize Daytona MCP Server with an agent (currently supported: claude, windsurf, cursor) usage: daytona mcp init [AGENT_NAME] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona mcp - Manage Daytona MCP Server ================================================ FILE: apps/cli/hack/docs/daytona_mcp_start.yaml ================================================ name: daytona mcp start synopsis: Start Daytona MCP Server usage: daytona mcp start [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona mcp - Manage Daytona MCP Server ================================================ FILE: apps/cli/hack/docs/daytona_organization.yaml ================================================ name: daytona organization synopsis: Manage Daytona organizations description: Commands for managing Daytona organizations inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI - daytona organization create - Create a new organization and set it as active - daytona organization delete - Delete an organization - daytona organization list - List all organizations - daytona organization use - Set active organization ================================================ FILE: apps/cli/hack/docs/daytona_organization_create.yaml ================================================ name: daytona organization create synopsis: Create a new organization and set it as active usage: daytona organization create [ORGANIZATION_NAME] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona organization - Manage Daytona organizations ================================================ FILE: apps/cli/hack/docs/daytona_organization_delete.yaml ================================================ name: daytona organization delete synopsis: Delete an organization usage: daytona organization delete [ORGANIZATION] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona organization - Manage Daytona organizations ================================================ FILE: apps/cli/hack/docs/daytona_organization_list.yaml ================================================ name: daytona organization list synopsis: List all organizations usage: daytona organization list [flags] options: - name: format shorthand: f usage: Output format. Must be one of (yaml, json) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona organization - Manage Daytona organizations ================================================ FILE: apps/cli/hack/docs/daytona_organization_use.yaml ================================================ name: daytona organization use synopsis: Set active organization usage: daytona organization use [ORGANIZATION] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona organization - Manage Daytona organizations ================================================ FILE: apps/cli/hack/docs/daytona_preview-url.yaml ================================================ name: daytona preview-url synopsis: Get signed preview URL for a sandbox port usage: daytona preview-url [SANDBOX_ID | SANDBOX_NAME] [flags] options: - name: expires default_value: '3600' usage: URL expiration time in seconds - name: port shorthand: p default_value: '0' usage: Port number to get preview URL for (required) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_snapshot.yaml ================================================ name: daytona snapshot synopsis: Manage Daytona snapshots description: Commands for managing Daytona snapshots inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI - daytona snapshot create - Create a snapshot - daytona snapshot delete - Delete a snapshot - daytona snapshot list - List all snapshots - daytona snapshot push - Push local snapshot ================================================ FILE: apps/cli/hack/docs/daytona_snapshot_create.yaml ================================================ name: daytona snapshot create synopsis: Create a snapshot usage: daytona snapshot create [SNAPSHOT] [flags] options: - name: context shorthand: c default_value: '[]' usage: | Files or directories to include in the build context (can be specified multiple times). If not provided, context will be automatically determined from COPY/ADD commands in the Dockerfile - name: cpu default_value: '0' usage: | CPU cores that will be allocated to the underlying sandboxes (default: 1) - name: disk default_value: '0' usage: | Disk space that will be allocated to the underlying sandboxes in GB (default: 3) - name: dockerfile shorthand: f usage: Path to Dockerfile to build - name: entrypoint shorthand: e usage: The entrypoint command for the snapshot - name: image shorthand: i usage: The image name for the snapshot - name: memory default_value: '0' usage: | Memory that will be allocated to the underlying sandboxes in GB (default: 1) - name: region usage: | ID of the region where the snapshot will be available (defaults to organization default region) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona snapshot - Manage Daytona snapshots ================================================ FILE: apps/cli/hack/docs/daytona_snapshot_delete.yaml ================================================ name: daytona snapshot delete synopsis: Delete a snapshot usage: daytona snapshot delete [SNAPSHOT_ID] [flags] options: - name: all shorthand: a default_value: 'false' usage: Delete all snapshots inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona snapshot - Manage Daytona snapshots ================================================ FILE: apps/cli/hack/docs/daytona_snapshot_list.yaml ================================================ name: daytona snapshot list synopsis: List all snapshots description: List all available Daytona snapshots usage: daytona snapshot list [flags] options: - name: format shorthand: f usage: Output format. Must be one of (yaml, json) - name: limit shorthand: l default_value: '100' usage: Maximum number of items per page - name: page shorthand: p default_value: '1' usage: Page number for pagination (starting from 1) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona snapshot - Manage Daytona snapshots ================================================ FILE: apps/cli/hack/docs/daytona_snapshot_push.yaml ================================================ name: daytona snapshot push synopsis: Push local snapshot description: | Push a local Docker image to Daytona. To securely build it on our infrastructure, use 'daytona snapshot build' usage: daytona snapshot push [SNAPSHOT] [flags] options: - name: cpu default_value: '0' usage: | CPU cores that will be allocated to the underlying sandboxes (default: 1) - name: disk default_value: '0' usage: | Disk space that will be allocated to the underlying sandboxes in GB (default: 3) - name: entrypoint shorthand: e usage: The entrypoint command for the image - name: memory default_value: '0' usage: | Memory that will be allocated to the underlying sandboxes in GB (default: 1) - name: name shorthand: 'n' usage: Specify the Snapshot name - name: region usage: | ID of the region where the snapshot will be available (defaults to organization default region) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona snapshot - Manage Daytona snapshots ================================================ FILE: apps/cli/hack/docs/daytona_ssh.yaml ================================================ name: daytona ssh synopsis: SSH into a sandbox description: Establish an SSH connection to a running sandbox usage: daytona ssh [SANDBOX_ID] | [SANDBOX_NAME] [flags] options: - name: expires default_value: '1440' usage: | SSH access token expiration time in minutes (defaults to 24 hours) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_start.yaml ================================================ name: daytona start synopsis: Start a sandbox usage: daytona start [SANDBOX_ID] | [SANDBOX_NAME] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_stop.yaml ================================================ name: daytona stop synopsis: Stop a sandbox usage: daytona stop [SANDBOX_ID] | [SANDBOX_NAME] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_version.yaml ================================================ name: daytona version synopsis: Print the version number usage: daytona version [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI ================================================ FILE: apps/cli/hack/docs/daytona_volume.yaml ================================================ name: daytona volume synopsis: Manage Daytona volumes description: Commands for managing Daytona volumes inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona - Daytona CLI - daytona volume create - Create a volume - daytona volume delete - Delete a volume - daytona volume get - Get volume details - daytona volume list - List all volumes ================================================ FILE: apps/cli/hack/docs/daytona_volume_create.yaml ================================================ name: daytona volume create synopsis: Create a volume usage: daytona volume create [NAME] [flags] options: - name: size shorthand: s default_value: '10' usage: Size of the volume in GB inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona volume - Manage Daytona volumes ================================================ FILE: apps/cli/hack/docs/daytona_volume_delete.yaml ================================================ name: daytona volume delete synopsis: Delete a volume usage: daytona volume delete [VOLUME_ID] [flags] inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona volume - Manage Daytona volumes ================================================ FILE: apps/cli/hack/docs/daytona_volume_get.yaml ================================================ name: daytona volume get synopsis: Get volume details usage: daytona volume get [VOLUME_ID] [flags] options: - name: format shorthand: f usage: Output format. Must be one of (yaml, json) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona volume - Manage Daytona volumes ================================================ FILE: apps/cli/hack/docs/daytona_volume_list.yaml ================================================ name: daytona volume list synopsis: List all volumes usage: daytona volume list [flags] options: - name: format shorthand: f usage: Output format. Must be one of (yaml, json) inherited_options: - name: help default_value: 'false' usage: help for daytona see_also: - daytona volume - Manage Daytona volumes ================================================ FILE: apps/cli/hack/generate-cli-docs.sh ================================================ #!/bin/bash # Copyright 2025 Daytona Platforms Inc. # SPDX-License-Identifier: AGPL-3.0 # Clean up existing documentation files rm -rf docs hack/docs # Generate default CLI documentation files in folder "docs" go run main.go generate-docs ================================================ FILE: apps/cli/internal/buildinfo.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package internal var ( Version = "v0.0.0-dev" DaytonaApiUrl = "" Auth0Domain = "" Auth0ClientId = "" Auth0ClientSecret = "" Auth0CallbackPort = "" Auth0Audience = "" ) ================================================ FILE: apps/cli/internal/cmd.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package internal const ( USER_GROUP = "user" SANDBOX_GROUP = "sandbox" ) var ( SuppressVersionMismatchWarning = false ) ================================================ FILE: apps/cli/main.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package main import ( "os" log "github.com/sirupsen/logrus" "github.com/daytonaio/daytona/cli/cmd" "github.com/daytonaio/daytona/cli/cmd/auth" "github.com/daytonaio/daytona/cli/cmd/mcp" "github.com/daytonaio/daytona/cli/cmd/organization" "github.com/daytonaio/daytona/cli/cmd/sandbox" "github.com/daytonaio/daytona/cli/cmd/snapshot" "github.com/daytonaio/daytona/cli/cmd/volume" "github.com/daytonaio/daytona/cli/internal" "github.com/joho/godotenv" "github.com/spf13/cobra" ) var rootCmd = &cobra.Command{ Use: "daytona", Short: "Daytona CLI", Long: "Command line interface for Daytona Sandboxes", DisableAutoGenTag: true, SilenceUsage: true, SilenceErrors: true, RunE: func(cmd *cobra.Command, args []string) error { return cmd.Help() }, } func init() { rootCmd.AddGroup(&cobra.Group{ID: internal.USER_GROUP, Title: "User"}) rootCmd.AddGroup(&cobra.Group{ID: internal.SANDBOX_GROUP, Title: "Sandbox"}) rootCmd.AddCommand(auth.LoginCmd) rootCmd.AddCommand(auth.LogoutCmd) rootCmd.AddCommand(sandbox.SandboxCmd) rootCmd.AddCommand(snapshot.SnapshotsCmd) rootCmd.AddCommand(volume.VolumeCmd) rootCmd.AddCommand(organization.OrganizationCmd) rootCmd.AddCommand(mcp.MCPCmd) rootCmd.AddCommand(cmd.DocsCmd) rootCmd.AddCommand(cmd.AutoCompleteCmd) rootCmd.AddCommand(cmd.GenerateDocsCmd) rootCmd.AddCommand(cmd.VersionCmd) // Add sandbox subcommands as top-level shortcuts rootCmd.AddCommand(createSandboxShortcut(sandbox.CreateCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.DeleteCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.InfoCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.ListCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.StartCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.StopCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.ArchiveCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.SSHCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.ExecCmd)) rootCmd.AddCommand(createSandboxShortcut(sandbox.PreviewUrlCmd)) rootCmd.CompletionOptions.HiddenDefaultCmd = true rootCmd.PersistentFlags().BoolP("help", "", false, "help for daytona") rootCmd.Flags().BoolP("version", "v", false, "Display the version of Daytona") rootCmd.PreRun = func(command *cobra.Command, args []string) { versionFlag, _ := command.Flags().GetBool("version") if versionFlag { err := cmd.VersionCmd.RunE(command, []string{}) if err != nil { log.Fatal(err) } os.Exit(0) } } } // createSandboxShortcut creates a top-level shortcut for a sandbox subcommand func createSandboxShortcut(original *cobra.Command) *cobra.Command { shortcut := &cobra.Command{ Use: original.Use, Short: original.Short, Long: original.Long, Args: original.Args, Aliases: original.Aliases, GroupID: internal.SANDBOX_GROUP, RunE: original.RunE, } shortcut.Flags().AddFlagSet(original.Flags()) return shortcut } func main() { _ = godotenv.Load() err := rootCmd.Execute() if err != nil { log.Fatal(err) } } ================================================ FILE: apps/cli/mcp/README.md ================================================ # Daytona MCP (Model Context Protocol) Server Daytona MCP Server allows AI agents to utilize: - Daytona Sandbox Management (Create, Destroy) - Execute commands in Daytona Sandboxes - File Operations in Daytona sandboxes - Generate preview links for web applications running in Daytona Sandboxes ## Prerequisites - Daytona account - Daytona CLI installed - A compatible AI agent (Claude Desktop App, Claude Code, Cursor, Windsurf) ## Steps to Integrate Daytona MCP Server with an AI Agent 1. **Install the Daytona CLI:** **Mac/Linux** ```bash brew install daytonaio/cli/daytona ``` **Windows** ```bash powershell -Command "irm https://get.daytona.io/windows | iex" ``` 2. **Log in to your Daytona account:** ```bash daytona login ``` 3. **Initialize the Daytona MCP server with Claude Desktop/Claude Code/Cursor/Windsurf:** ```bash daytona mcp init [claude/cursor/windsurf] ``` 4. **Open Agent App** ## Integrating with Other AI Agents Apps **Run the following command to get a JSON Daytona MCP configuration which you can c/p to your agent configuration:** ```bash daytona mcp config ``` **Command outputs the following:** ```json { "mcpServers": { "daytona-mcp": { "command": "daytona", "args": ["mcp", "start"], "env": { "HOME": "${HOME}", "PATH": "${HOME}:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/homebrew/bin" }, "logFile": "${HOME}/Library/Logs/daytona/daytona-mcp-server.log" } } } ``` Note: if you are running Daytona MCP Server on Windows OS, add the following to the env field of the configuration: ```json "APPDATA": "${APPDATA}" ``` **Finally, open or restart your AI agent** ## Available Tools ### Sandbox Management - `create_sandbox`: Create a new sandbox with Daytona - Parameters: - `id` (optional): Sandbox ID - if provided, an existing sandbox will be used, new one will be created otherwise - `target` (optional): Target region of the sandbox (if not provided, default region of the organization is used) - `image`: Image of the sandbox (optional) - `auto_stop_interval` (default: "15"): Auto-stop interval in minutes (0 means disabled) - `auto_archive_interval` (default: "10080"): Auto-archive interval in minutes (0 means the maximum interval will be used) - `auto_delete_interval` (default: "-1"): Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping) - `destroy_sandbox`: Destroy a sandbox with Daytona ### File Operations - `upload_file`: Upload a file to the Daytona sandbox - Files can be text or base64-encoded binary content - Creates necessary parent directories automatically - Files persist during the session and have appropriate permissions - Supports overwrite controls and maintains original files formats - Parameters: - `id` (optional): Sandbox ID - `file_path`: Path to the file to upload - `content`: Content of the file to upload - `encoding`: Encoding of the file to upload - `overwrite`: Overwrite the file if it already exists - `download_file`: Download a file from the Daytona sandbox - Returns file content as text or base64 encoded image - Handles special cases like matplotlib plots stored as JSON - Parameters: - `id` (optional): Sandbox ID - `file_path`: Path to the file to download - `create_folder`: Create a new folder in the Daytona sandbox - Parameters: - `id` (optional): Sandbox ID - `folder_path`: Path to the folder to create - `mode`: Mode of the folder to create (defaults to 0755) - `get_file_info`: Get information about a file in the Daytona sandbox - Parameters: - `id` (optional): Sandbox ID - `file_path`: Path to the file to get information about - `list_files`: List files in a directory in the Daytona sandbox - Parameters: - `id` (optional): Sandbox ID - `path`: Path to the directory to list files from (defaults to current directory) - `move_file`: Move or rename a file in the Daytona sandbox - Parameters: - `id` (optional): Sandbox ID - `source_path`: Source path of the file to move - `dest_path`: Destination path where to move the file - `delete_file`: Delete a file or directory in the Daytona sandbox - Parameters: - `id` (optional): Sandbox ID - `file_path`: Path to the file or directory to delete ### Git Operations - `git_clone`: Clone a Git repository into the Daytona sandbox - Parameters: - `id` (optional): Sandbox ID - `url`: URL of the Git repository to clone - `path`: Directory to clone the repository into (defaults to current directory) - `branch`: Branch to clone - `commit_id`: Commit ID to clone - `username`: Username to clone the repository with - `password`: Password to clone the repository with ### Command Execution - `execute_command`: Execute shell commands in the ephemeral Daytona Linux environment - Returns full stdout and stderr output with exit codes - Commands have sandbox user permissions - Parameters: - `id` (optional): Sandbox ID - `command`: Command to execute ### Preview - `preview_link`: Generate accessible preview URLs for web applications running in the Daytona sandbox - Creates a secure tunnel to expose local ports externally without configuration - Validates if a server is actually running on the specified port - Provides diagnostic information for troubleshooting - Supports custom descriptions and metadata for better organization of multiple services - Parameters: - `id` (optional): Sandbox ID - `port`: Port to expose - `description`: Description of the service - `check_server`: Check if a server is running ## Troubleshooting - **Authentication issues:** Run `daytona login` to refresh your credentials - **Connection errors:** Ensure that the Daytona MCP Server is properly configured - **Sandbox errors:** Check sandbox status with `daytona sandbox list` ## Support For more information, visit [daytona.io](https://daytona.io) or contact support at support@daytona.io. ================================================ FILE: apps/cli/mcp/server.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package mcp import ( "github.com/daytonaio/daytona/cli/mcp/tools" "github.com/mark3labs/mcp-go/mcp" "github.com/mark3labs/mcp-go/server" ) type DaytonaMCPServer struct { server.MCPServer } func NewDaytonaMCPServer() *DaytonaMCPServer { s := &DaytonaMCPServer{} s.MCPServer = *server.NewMCPServer( "Daytona MCP Server", "0.0.0-dev", server.WithRecovery(), server.WithPromptCapabilities(false), server.WithResourceCapabilities(false, false), server.WithToolCapabilities(true), server.WithLogging(), ) s.addTools() return s } func (s *DaytonaMCPServer) Start() error { return server.ServeStdio(&s.MCPServer) } func (s *DaytonaMCPServer) addTools() { s.AddTool(tools.GetCreateSandboxTool(), mcp.NewTypedToolHandler(tools.CreateSandbox)) s.AddTool(tools.GetDestroySandboxTool(), mcp.NewTypedToolHandler(tools.DestroySandbox)) s.AddTool(tools.GetFileUploadTool(), mcp.NewTypedToolHandler(tools.FileUpload)) s.AddTool(tools.GetFileDownloadTool(), mcp.NewTypedToolHandler(tools.FileDownload)) s.AddTool(tools.GetFileInfoTool(), mcp.NewTypedToolHandler(tools.FileInfo)) s.AddTool(tools.GetListFilesTool(), mcp.NewTypedToolHandler(tools.ListFiles)) s.AddTool(tools.GetMoveFileTool(), mcp.NewTypedToolHandler(tools.MoveFile)) s.AddTool(tools.GetDeleteFileTool(), mcp.NewTypedToolHandler(tools.DeleteFile)) s.AddTool(tools.GetCreateFolderTool(), mcp.NewTypedToolHandler(tools.CreateFolder)) s.AddTool(tools.GetExecuteCommandTool(), mcp.NewTypedToolHandler(tools.ExecuteCommand)) s.AddTool(tools.GetPreviewLinkTool(), mcp.NewTypedToolHandler(tools.PreviewLink)) s.AddTool(tools.GetGitCloneTool(), mcp.NewTypedToolHandler(tools.GitClone)) } ================================================ FILE: apps/cli/mcp/tools/common.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import "github.com/daytonaio/daytona/cli/apiclient" var daytonaMCPHeaders map[string]string = map[string]string{ apiclient.DaytonaSourceHeader: "daytona-mcp", } ================================================ FILE: apps/cli/mcp/tools/create_folder.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type CreateFolderArgs struct { Id *string `json:"id,omitempty"` FolderPath *string `json:"folderPath,omitempty"` Mode *string `json:"mode,omitempty"` } func GetCreateFolderTool() mcp.Tool { return mcp.NewTool("create_folder", mcp.WithDescription("Create a new folder in the Daytona sandbox."), mcp.WithString("folderPath", mcp.Required(), mcp.Description("Path to the folder to create.")), mcp.WithString("mode", mcp.Description("Mode of the folder to create (defaults to 0755).")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to create the folder in.")), ) } func CreateFolder(ctx context.Context, request mcp.CallToolRequest, args CreateFolderArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } if args.FolderPath == nil || *args.FolderPath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("folderPath parameter is required") } mode := "0755" // default mode if args.Mode == nil || *args.Mode == "" { args.Mode = &mode } // Create the folder _, err = apiClient.ToolboxAPI.CreateFolderDeprecated(ctx, *args.Id).Path(*args.FolderPath).Mode(*args.Mode).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error creating folder: %v", err) } log.Infof("Created folder: %s", *args.FolderPath) return mcp.NewToolResultText(fmt.Sprintf("Created folder: %s", *args.FolderPath)), nil } ================================================ FILE: apps/cli/mcp/tools/create_sandbox.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" "strings" "time" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type CreateSandboxArgs struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Target *string `json:"target,omitempty"` Snapshot *string `json:"snapshot,omitempty"` User *string `json:"user,omitempty"` Env *map[string]string `json:"env,omitempty"` Labels *map[string]string `json:"labels,omitempty"` Public *bool `json:"public,omitempty"` Cpu *int32 `json:"cpu,omitempty"` Gpu *int32 `json:"gpu,omitempty"` Memory *int32 `json:"memory,omitempty"` Disk *int32 `json:"disk,omitempty"` AutoStopInterval *int32 `json:"autoStopInterval,omitempty"` AutoArchiveInterval *int32 `json:"autoArchiveInterval,omitempty"` AutoDeleteInterval *int32 `json:"autoDeleteInterval,omitempty"` Volumes *[]apiclient.SandboxVolume `json:"volumes,omitempty"` BuildInfo *apiclient.CreateBuildInfo `json:"buildInfo,omitempty"` NetworkBlockAll *bool `json:"networkBlockAll,omitempty"` NetworkAllowList *string `json:"networkAllowList,omitempty"` } func GetCreateSandboxTool() mcp.Tool { return mcp.NewTool("create_sandbox", mcp.WithDescription("Create a new sandbox with Daytona"), mcp.WithString("id", mcp.Description("If a sandbox ID is provided it is first checked if it exists and is running, if so, the existing sandbox will be used. However, a model is not able to provide custom sandbox ID but only the ones Daytona commands return and should always leave ID field empty if the intention is to create a new sandbox.")), mcp.WithString("name", mcp.Description("Name of the sandbox. If not provided, the sandbox ID will be used as the name.")), mcp.WithString("target", mcp.DefaultString("us"), mcp.Description("Target region of the sandbox.")), mcp.WithString("snapshot", mcp.Description("Snapshot of the sandbox (don't specify any if not explicitly instructed from user). Cannot be specified when using a build info entry.")), mcp.WithString("user", mcp.Description("User associated with the sandbox.")), mcp.WithObject("env", mcp.Description("Environment variables for the sandbox. Format: {\"key\": \"value\", \"key2\": \"value2\"}"), mcp.AdditionalProperties(map[string]any{"type": "string"})), mcp.WithObject("labels", mcp.Description("Labels for the sandbox. Format: {\"key\": \"value\", \"key2\": \"value2\"}"), mcp.AdditionalProperties(map[string]any{"type": "string"})), mcp.WithBoolean("public", mcp.Description("Whether the sandbox http preview is publicly accessible.")), mcp.WithNumber("cpu", mcp.Description("CPU cores allocated to the sandbox. Cannot specify sandbox resources when using a snapshot."), mcp.Max(4)), mcp.WithNumber("gpu", mcp.Description("GPU units allocated to the sandbox. Cannot specify sandbox resources when using a snapshot."), mcp.Max(1)), mcp.WithNumber("memory", mcp.Description("Memory allocated to the sandbox in GB. Cannot specify sandbox resources when using a snapshot."), mcp.Max(8)), mcp.WithNumber("disk", mcp.Description("Disk space allocated to the sandbox in GB. Cannot specify sandbox resources when using a snapshot."), mcp.Max(10)), mcp.WithNumber("autoStopInterval", mcp.DefaultNumber(15), mcp.Min(0), mcp.Description("Auto-stop interval in minutes (0 means disabled) for the sandbox.")), mcp.WithNumber("autoArchiveInterval", mcp.DefaultNumber(10080), mcp.Min(0), mcp.Description("Auto-archive interval in minutes (0 means the maximum interval will be used) for the sandbox.")), mcp.WithNumber("autoDeleteInterval", mcp.DefaultNumber(-1), mcp.Description("Auto-delete interval in minutes (negative value means disabled, 0 means delete immediately upon stopping) for the sandbox.")), mcp.WithArray("volumes", mcp.Description("Volumes to attach to the sandbox."), mcp.Items(map[string]any{"type": "object", "properties": map[string]any{"volumeId": map[string]any{"type": "string"}, "mountPath": map[string]any{"type": "string"}}})), mcp.WithObject("buildInfo", mcp.Description("Build information for the sandbox."), mcp.Properties(map[string]any{"dockerfileContent": map[string]any{"type": "string"}, "contextHashes": map[string]any{"type": "array", "items": map[string]any{"type": "string"}}})), mcp.WithBoolean("networkBlockAll", mcp.Description("Whether to block all network access to the sandbox.")), mcp.WithString("networkAllowList", mcp.Description("Comma-separated list of domains to allow network access to the sandbox.")), ) } func CreateSandbox(ctx context.Context, request mcp.CallToolRequest, args CreateSandboxArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient_cli.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } sandboxId := "" if args.Id != nil && *args.Id != "" { sandboxId = *args.Id } if sandboxId != "" { sandbox, _, err := apiClient.SandboxAPI.GetSandbox(ctx, sandboxId).Execute() if err == nil && sandbox.State != nil && *sandbox.State == apiclient.SANDBOXSTATE_STARTED { return mcp.NewToolResultText(fmt.Sprintf("Reusing existing sandbox %s", sandboxId)), nil } return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox %s not found or not running", sandboxId) } createSandboxReq, err := createSandboxRequest(args) if err != nil { return &mcp.CallToolResult{IsError: true}, err } // Create new sandbox with retries maxRetries := 3 retryDelay := time.Second * 2 for retry := range maxRetries { sandbox, _, err := apiClient.SandboxAPI.CreateSandbox(ctx).CreateSandbox(*createSandboxReq).Execute() if err != nil { if strings.Contains(err.Error(), "Total CPU quota exceeded") { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("CPU quota exceeded. Please delete unused sandboxes or upgrade your plan") } if retry == maxRetries-1 { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to create sandbox after %d retries: %v", maxRetries, err) } log.Infof("Sandbox creation failed, retrying: %v", err) time.Sleep(retryDelay) retryDelay = retryDelay * 3 / 2 // Exponential backoff continue } log.Infof("Created new sandbox: %s", sandbox.Id) return mcp.NewToolResultText(fmt.Sprintf("Created new sandbox %s", sandbox.Id)), nil } return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to create sandbox after %d retries", maxRetries) } func createSandboxRequest(args CreateSandboxArgs) (*apiclient.CreateSandbox, error) { createSandbox := apiclient.NewCreateSandbox() if args.Name != nil && *args.Name != "" { createSandbox.SetName(*args.Name) } if args.BuildInfo != nil { if args.Snapshot != nil && *args.Snapshot != "" { return nil, fmt.Errorf("cannot specify a snapshot when using a build info entry") } } else { if args.Cpu != nil || args.Gpu != nil || args.Memory != nil || args.Disk != nil { return nil, fmt.Errorf("cannot specify sandbox resources when using a snapshot") } } if args.Snapshot != nil && *args.Snapshot != "" { createSandbox.SetSnapshot(*args.Snapshot) } if args.Target != nil && *args.Target != "" { createSandbox.SetTarget(*args.Target) } if args.AutoStopInterval != nil { createSandbox.SetAutoStopInterval(*args.AutoStopInterval) } if args.AutoArchiveInterval != nil { createSandbox.SetAutoArchiveInterval(*args.AutoArchiveInterval) } if args.AutoDeleteInterval != nil { createSandbox.SetAutoDeleteInterval(*args.AutoDeleteInterval) } if args.User != nil && *args.User != "" { createSandbox.SetUser(*args.User) } if args.Env != nil { createSandbox.SetEnv(*args.Env) } if args.Labels != nil { createSandbox.SetLabels(*args.Labels) } if args.Public != nil { createSandbox.SetPublic(*args.Public) } if args.Cpu != nil { createSandbox.SetCpu(*args.Cpu) } if args.Memory != nil { createSandbox.SetMemory(*args.Memory) } if args.Disk != nil { createSandbox.SetDisk(*args.Disk) } if args.Volumes != nil { createSandbox.SetVolumes(*args.Volumes) } if args.BuildInfo != nil { createSandbox.SetBuildInfo(*args.BuildInfo) } if args.NetworkBlockAll != nil { createSandbox.SetNetworkBlockAll(*args.NetworkBlockAll) } if args.NetworkAllowList != nil { createSandbox.SetNetworkAllowList(*args.NetworkAllowList) } return createSandbox, nil } ================================================ FILE: apps/cli/mcp/tools/delete_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type DeleteFileArgs struct { Id *string `json:"id,omitempty"` FilePath *string `json:"filePath,omitempty"` } func GetDeleteFileTool() mcp.Tool { return mcp.NewTool("delete_file", mcp.WithDescription("Delete a file or directory in the Daytona sandbox."), mcp.WithString("filePath", mcp.Required(), mcp.Description("Path to the file or directory to delete.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to delete the file in.")), ) } func DeleteFile(ctx context.Context, request mcp.CallToolRequest, args DeleteFileArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient_cli.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } if args.FilePath == nil || *args.FilePath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("filePath parameter is required") } // Execute delete command execResponse, _, err := apiClient.ToolboxAPI.ExecuteCommandDeprecated(ctx, *args.Id). ExecuteRequest(*apiclient.NewExecuteRequest(fmt.Sprintf("rm -rf %s", *args.FilePath))). Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error deleting file: %v", err) } log.Infof("Deleted file: %s", *args.FilePath) return mcp.NewToolResultText(fmt.Sprintf("Deleted file: %s\nOutput: %s", *args.FilePath, execResponse.Result)), nil } ================================================ FILE: apps/cli/mcp/tools/destroy_sandbox.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" "time" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type DestroySandboxArgs struct { Id *string `json:"id,omitempty"` } func GetDestroySandboxTool() mcp.Tool { return mcp.NewTool("destroy_sandbox", mcp.WithDescription("Destroy a sandbox with Daytona"), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to destroy.")), ) } func DestroySandbox(ctx context.Context, request mcp.CallToolRequest, args DestroySandboxArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } // Destroy sandbox with retries maxRetries := 3 retryDelay := time.Second * 2 for retry := range maxRetries { _, _, err := apiClient.SandboxAPI.DeleteSandbox(ctx, *args.Id).Execute() if err != nil { if retry == maxRetries-1 { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to destroy sandbox after %d retries: %v", maxRetries, err) } log.Infof("Sandbox creation failed, retrying: %v", err) time.Sleep(retryDelay) retryDelay = retryDelay * 3 / 2 // Exponential backoff continue } log.Infof("Destroyed sandbox with ID: %s", *args.Id) return mcp.NewToolResultText(fmt.Sprintf("Destroyed sandbox with ID %s", *args.Id)), nil } return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to destroy sandbox after %d retries", maxRetries) } ================================================ FILE: apps/cli/mcp/tools/download_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "encoding/json" "fmt" "io" "path/filepath" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" ) type FileDownloadArgs struct { Id *string `json:"id,omitempty"` FilePath *string `json:"filePath,omitempty"` } type Content struct { Type string `json:"type"` Text string `json:"text,omitempty"` Data string `json:"data,omitempty"` } func GetFileDownloadTool() mcp.Tool { return mcp.NewTool("file_download", mcp.WithDescription("Download a file from the Daytona sandbox. Returns the file content either as text or as a base64 encoded image. Handles special cases like matplotlib plots stored as JSON with embedded base64 images."), mcp.WithString("filePath", mcp.Required(), mcp.Description("Path to the file to download.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to download the file from.")), ) } func FileDownload(ctx context.Context, request mcp.CallToolRequest, args FileDownloadArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } if args.FilePath == nil || *args.FilePath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("filePath parameter is required") } // Download the file file, _, err := apiClient.ToolboxAPI.DownloadFileDeprecated(ctx, *args.Id).Path(*args.FilePath).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error downloading file: %v", err) } defer file.Close() // Read file content content, err := io.ReadAll(file) if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error reading file content: %v", err) } // Process file content based on file type ext := filepath.Ext(*args.FilePath) var result []Content switch ext { case ".png", ".jpg", ".jpeg", ".gif": // For image files, return as base64 encoded data result = []Content{{ Type: "image", Data: string(content), }} case ".json": // For JSON files, try to parse and handle special cases like matplotlib plots var jsonData map[string]interface{} if err := json.Unmarshal(content, &jsonData); err != nil { // If not valid JSON, return as text result = []Content{{ Type: "text", Text: string(content), }} } else { // Check if it's a matplotlib plot if _, ok := jsonData["data"]; ok { result = []Content{{ Type: "image", Data: jsonData["data"].(string), }} } else { result = []Content{{ Type: "text", Text: string(content), }} } } default: // For all other files, return as text result = []Content{{ Type: "text", Text: string(content), }} } // Convert result to JSON resultJSON, err := json.Marshal(result) if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error marshaling result: %v", err) } return mcp.NewToolResultText(string(resultJSON)), nil } ================================================ FILE: apps/cli/mcp/tools/execute_command.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "encoding/json" "fmt" "strings" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type ExecuteCommandArgs struct { Id *string `json:"id,omitempty"` Command *string `json:"command,omitempty"` } type CommandResult struct { Stdout string `json:"stdout"` Stderr string `json:"stderr"` ExitCode int `json:"exitCode"` ErrorType string `json:"errorType,omitempty"` } func GetExecuteCommandTool() mcp.Tool { return mcp.NewTool("execute_command", mcp.WithDescription("Execute shell commands in the ephemeral Daytona Linux environment. Returns full stdout and stderr output with exit codes. Commands have sandbox user permissions and can install packages, modify files, and interact with running services. Always use /tmp directory. Use verbose flags where available for better output."), mcp.WithString("command", mcp.Required(), mcp.Description("Command to execute.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to execute the command in.")), ) } func ExecuteCommand(ctx context.Context, request mcp.CallToolRequest, args ExecuteCommandArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient_cli.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return returnCommandError("Sandbox ID is required", "SandboxError") } if args.Command == nil || *args.Command == "" { return returnCommandError("Command must be a non-empty string", "ValueError") } // Process the command command := strings.TrimSpace(*args.Command) if strings.Contains(command, "&&") || strings.HasPrefix(command, "cd ") { // Wrap complex commands in /bin/sh -c command = fmt.Sprintf("/bin/sh -c %s", shellQuote(command)) } log.Infof("Executing command: %s", command) // Execute the command result, _, err := apiClient.ToolboxAPI.ExecuteCommandDeprecated(ctx, *args.Id). ExecuteRequest(*apiclient.NewExecuteRequest(command)). Execute() if err != nil { // Classify error types errStr := err.Error() switch { case strings.Contains(errStr, "Connection") || strings.Contains(errStr, "Timeout"): return returnCommandError(fmt.Sprintf("Network error during command execution: %s", errStr), "NetworkError") case strings.Contains(errStr, "Unauthorized") || strings.Contains(errStr, "401"): return returnCommandError("Authentication failed during command execution. Please check your API key", "NetworkError") default: return returnCommandError(fmt.Sprintf("Command execution failed: %s", errStr), "CommandExecutionError") } } // Process command output cmdResult := CommandResult{ Stdout: strings.TrimSpace(result.Result), ExitCode: int(result.ExitCode), } // Log truncated output outputLen := len(cmdResult.Stdout) logOutput := cmdResult.Stdout if outputLen > 500 { logOutput = cmdResult.Stdout[:500] + "..." } log.Infof("Command completed - exit code: %d, output length: %d", cmdResult.ExitCode, outputLen) log.Debugf("Command output (truncated): %s", logOutput) // Check for non-zero exit code if cmdResult.ExitCode > 0 { log.Infof("Command exited with non-zero status - exit code: %d", cmdResult.ExitCode) } // Convert result to JSON resultJSON, err := json.MarshalIndent(cmdResult, "", " ") if err != nil { return returnCommandError(fmt.Sprintf("Error marshaling result: %v", err), "CommandExecutionError") } return mcp.NewToolResultText(string(resultJSON)), nil } // Helper function to return command errors in a consistent format func returnCommandError(message, errorType string) (*mcp.CallToolResult, error) { return &mcp.CallToolResult{ IsError: true, Result: mcp.Result{ Meta: map[string]interface{}{ "Stdout": "", "Stderr": message, "ExitCode": -1, "ErrorType": errorType, }, }, }, nil } // Helper function to quote shell commands func shellQuote(s string) string { // Simple shell quoting - wrap in single quotes and escape existing single quotes return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'" } ================================================ FILE: apps/cli/mcp/tools/file_info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "encoding/json" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type FileInfoArgs struct { Id *string `json:"id,omitempty"` FilePath *string `json:"filePath,omitempty"` } func GetFileInfoTool() mcp.Tool { return mcp.NewTool("get_file_info", mcp.WithDescription("Get information about a file in the Daytona sandbox."), mcp.WithString("filePath", mcp.Required(), mcp.Description("Path to the file to get information about.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to get the file information from.")), ) } func FileInfo(ctx context.Context, request mcp.CallToolRequest, args FileInfoArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } if args.FilePath == nil || *args.FilePath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("filePath parameter is required") } // Get file info fileInfo, _, err := apiClient.ToolboxAPI.GetFileInfoDeprecated(ctx, *args.Id).Path(*args.FilePath).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error getting file info: %v", err) } // Convert file info to JSON fileInfoJSON, err := json.MarshalIndent(fileInfo, "", " ") if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error marshaling file info: %v", err) } log.Infof("Retrieved file info for: %s", *args.FilePath) return mcp.NewToolResultText(string(fileInfoJSON)), nil } ================================================ FILE: apps/cli/mcp/tools/git_clone.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type GitCloneArgs struct { Id *string `json:"id,omitempty"` Url *string `json:"url,omitempty"` Path *string `json:"path,omitempty"` Branch *string `json:"branch,omitempty"` CommitId *string `json:"commitId,omitempty"` Username *string `json:"username,omitempty"` Password *string `json:"password,omitempty"` } func GetGitCloneTool() mcp.Tool { return mcp.NewTool("git_clone", mcp.WithDescription("Clone a Git repository into the Daytona sandbox."), mcp.WithString("url", mcp.Required(), mcp.Description("URL of the Git repository to clone.")), mcp.WithString("path", mcp.Description("Directory to clone the repository into (defaults to current directory).")), mcp.WithString("branch", mcp.Description("Branch to clone.")), mcp.WithString("commitId", mcp.Description("Commit ID to clone.")), mcp.WithString("username", mcp.Description("Username to clone the repository with.")), mcp.WithString("password", mcp.Description("Password to clone the repository with.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to clone the repository in.")), ) } func GitClone(ctx context.Context, request mcp.CallToolRequest, args GitCloneArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient_cli.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } gitCloneRequest, err := getGitCloneRequest(args) if err != nil { return &mcp.CallToolResult{IsError: true}, err } _, err = apiClient.ToolboxAPI.GitCloneRepositoryDeprecated(ctx, *args.Id).GitCloneRequest(*gitCloneRequest).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error cloning repository: %v", err) } log.Infof("Cloned repository: %s to %s", gitCloneRequest.Url, gitCloneRequest.Path) return mcp.NewToolResultText(fmt.Sprintf("Cloned repository: %s to %s", gitCloneRequest.Url, gitCloneRequest.Path)), nil } func getGitCloneRequest(args GitCloneArgs) (*apiclient.GitCloneRequest, error) { gitCloneRequest := apiclient.GitCloneRequest{} if args.Url == nil || *args.Url == "" { return nil, fmt.Errorf("url parameter is required") } gitCloneRequest.Url = *args.Url gitCloneRequest.Path = "." if args.Path != nil && *args.Path != "" { gitCloneRequest.Path = *args.Path } if args.Branch != nil && *args.Branch != "" { gitCloneRequest.Branch = args.Branch } if args.CommitId != nil && *args.CommitId != "" { gitCloneRequest.CommitId = args.CommitId } if args.Username != nil && *args.Username != "" { gitCloneRequest.Username = args.Username } if args.Password != nil && *args.Password != "" { gitCloneRequest.Password = args.Password } return &gitCloneRequest, nil } ================================================ FILE: apps/cli/mcp/tools/list_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "encoding/json" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type ListFilesArgs struct { Id *string `json:"id,omitempty"` Path *string `json:"path,omitempty"` } func GetListFilesTool() mcp.Tool { return mcp.NewTool("list_files", mcp.WithDescription("List files in a directory in the Daytona sandbox."), mcp.WithString("path", mcp.Description("Path to the directory to list files from (defaults to current directory).")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to list the files from.")), ) } func ListFiles(ctx context.Context, request mcp.CallToolRequest, args ListFilesArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } // Get directory path from request arguments (optional) dirPath := "." if args.Path != nil && *args.Path != "" { dirPath = *args.Path } // List files files, _, err := apiClient.ToolboxAPI.ListFilesDeprecated(ctx, *args.Id).Path(dirPath).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error listing files: %v", err) } // Convert files to JSON filesJSON, err := json.MarshalIndent(files, "", " ") if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error marshaling files: %v", err) } log.Infof("Listed files in directory: %s", dirPath) return mcp.NewToolResultText(string(filesJSON)), nil } ================================================ FILE: apps/cli/mcp/tools/move_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type MoveFileArgs struct { Id *string `json:"id,omitempty"` SourcePath *string `json:"sourcePath,omitempty"` DestPath *string `json:"destPath,omitempty"` } func GetMoveFileTool() mcp.Tool { return mcp.NewTool("move_file", mcp.WithDescription("Move or rename a file in the Daytona sandbox."), mcp.WithString("sourcePath", mcp.Required(), mcp.Description("Source path of the file to move.")), mcp.WithString("destPath", mcp.Required(), mcp.Description("Destination path where to move the file.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to move the file in.")), ) } func MoveFile(ctx context.Context, request mcp.CallToolRequest, args MoveFileArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return &mcp.CallToolResult{IsError: true}, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } // Get source and destination paths from request arguments if args.SourcePath == nil || *args.SourcePath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sourcePath parameter is required") } if args.DestPath == nil || *args.DestPath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("destPath parameter is required") } _, err = apiClient.ToolboxAPI.MoveFileDeprecated(ctx, *args.Id).Source(*args.SourcePath).Destination(*args.DestPath).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error moving file: %v", err) } log.Infof("Moved file from %s to %s", *args.SourcePath, *args.DestPath) return mcp.NewToolResultText(fmt.Sprintf("Moved file from %s to %s", *args.SourcePath, *args.DestPath)), nil } ================================================ FILE: apps/cli/mcp/tools/preview_link.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "fmt" "strconv" "strings" apiclient_cli "github.com/daytonaio/daytona/cli/apiclient" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type PreviewLinkArgs struct { Id *string `json:"id,omitempty"` Port *int32 `json:"port,omitempty"` CheckServer *bool `json:"checkServer,omitempty"` Description *string `json:"description,omitempty"` } func GetPreviewLinkTool() mcp.Tool { return mcp.NewTool("preview_link", mcp.WithDescription("Generate accessible preview URLs for web applications running in the Daytona sandbox. Creates a secure tunnel to expose local ports externally without configuration. Validates if a server is actually running on the specified port and provides diagnostic information for troubleshooting. Supports custom descriptions and metadata for better organization of multiple services."), mcp.WithNumber("port", mcp.Required(), mcp.Description("Port to expose.")), mcp.WithString("description", mcp.Required(), mcp.Description("Description of the service.")), mcp.WithBoolean("checkServer", mcp.Required(), mcp.Description("Check if a server is running on the specified port.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to generate the preview link for.")), ) } func PreviewLink(ctx context.Context, request mcp.CallToolRequest, args PreviewLinkArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient_cli.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return nil, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } if args.Port == nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("port parameter is required") } checkServer := false if args.CheckServer != nil && *args.CheckServer { checkServer = *args.CheckServer } log.Infof("Generating preview link - port: %d", *args.Port) // Get the sandbox using sandbox ID sandbox, _, err := apiClient.SandboxAPI.GetSandbox(ctx, *args.Id).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to get sandbox: %v", err) } if sandbox == nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("no sandbox available") } // Check if server is running on specified port if checkServer { log.Infof("Checking if server is running - port: %d", *args.Port) checkCmd := fmt.Sprintf("curl -s -o /dev/null -w '%%{http_code}' http://localhost:%d --max-time 2 || echo 'error'", *args.Port) result, _, err := apiClient.ToolboxAPI.ExecuteCommandDeprecated(ctx, *args.Id).ExecuteRequest(*apiclient.NewExecuteRequest(checkCmd)).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error checking server: %v", err) } response := strings.TrimSpace(result.Result) if response == "error" || strings.HasPrefix(response, "0") { log.Infof("No server detected - port: %d", *args.Port) // Check what might be using the port psCmd := fmt.Sprintf("ps aux | grep ':%d' | grep -v grep || echo 'No process found'", *args.Port) psResult, _, err := apiClient.ToolboxAPI.ExecuteCommandDeprecated(ctx, *args.Id).ExecuteRequest(*apiclient.NewExecuteRequest(psCmd)).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error checking processes: %v", err) } return &mcp.CallToolResult{IsError: true}, fmt.Errorf("no server detected on port %d. Process info: %s", *args.Port, strings.TrimSpace(psResult.Result)) } } // Fetch preview URL previewURL, _, err := apiClient.SandboxAPI.GetPortPreviewUrl(ctx, *args.Id, float32(*args.Port)).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to get preview URL: %v", err) } // Test URL accessibility if requested var accessible bool var statusCode string if checkServer { checkCmd := fmt.Sprintf("curl -s -o /dev/null -w '%%{http_code}' %s --max-time 3 || echo 'error'", previewURL.Url) result, _, err := apiClient.ToolboxAPI.ExecuteCommandDeprecated(ctx, *args.Id).ExecuteRequest(*apiclient.NewExecuteRequest(checkCmd)).Execute() if err != nil { log.Errorf("Error checking preview URL: %v", err) } else { response := strings.TrimSpace(result.Result) accessible = response != "error" && !strings.HasPrefix(response, "0") if _, err := strconv.Atoi(response); err == nil { statusCode = response } } } log.Infof("Preview link generated: %s", previewURL.Url) log.Infof("Accessible: %t", accessible) log.Infof("Status code: %s", statusCode) return mcp.NewToolResultText(fmt.Sprintf("Preview link generated: %s", previewURL.Url)), nil } ================================================ FILE: apps/cli/mcp/tools/upload_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package tools import ( "context" "encoding/base64" "fmt" "os" "path/filepath" "github.com/daytonaio/daytona/cli/apiclient" "github.com/mark3labs/mcp-go/mcp" log "github.com/sirupsen/logrus" ) type FileUploadArgs struct { Id *string `json:"id,omitempty"` FilePath *string `json:"filePath,omitempty"` Content *string `json:"content,omitempty"` Encoding *string `json:"encoding,omitempty"` Overwrite *bool `json:"overwrite,omitempty"` } func GetFileUploadTool() mcp.Tool { return mcp.NewTool("file_upload", mcp.WithDescription("Upload files to the Daytona sandbox from text or base64-encoded binary content. Creates necessary parent directories automatically and verifies successful writes. Files persist during the session and have appropriate permissions for further tool operations. Supports overwrite controls and maintains original file formats."), mcp.WithString("filePath", mcp.Required(), mcp.Description("Path to the file to upload. Files should always be uploaded to the /tmp directory if user doesn't specify otherwise.")), mcp.WithString("content", mcp.Required(), mcp.Description("Content of the file to upload.")), mcp.WithString("encoding", mcp.Required(), mcp.Description("Encoding of the file to upload.")), mcp.WithBoolean("overwrite", mcp.Required(), mcp.Description("Overwrite the file if it already exists.")), mcp.WithString("id", mcp.Required(), mcp.Description("ID of the sandbox to upload the file to.")), ) } func FileUpload(ctx context.Context, request mcp.CallToolRequest, args FileUploadArgs) (*mcp.CallToolResult, error) { apiClient, err := apiclient.GetApiClient(nil, daytonaMCPHeaders) if err != nil { return nil, err } if args.Id == nil || *args.Id == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("sandbox ID is required") } if args.FilePath == nil || *args.FilePath == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("filePath parameter is required") } if args.Content == nil || *args.Content == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("content parameter is required") } if args.Encoding == nil || *args.Encoding == "" { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("encoding parameter is required") } overwrite := false if args.Overwrite != nil && *args.Overwrite { overwrite = *args.Overwrite } // Get the sandbox using sandbox ID sandbox, _, err := apiClient.SandboxAPI.GetSandbox(ctx, *args.Id).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("failed to get sandbox: %v", err) } if sandbox == nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("no sandbox available") } // Check if file exists and handle overwrite if !overwrite { fileInfo, _, err := apiClient.ToolboxAPI.GetFileInfoDeprecated(ctx, *args.Id).Path(*args.FilePath).Execute() if err == nil && fileInfo != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("file '%s' already exists and overwrite=false", *args.FilePath) } } // Prepare content based on encoding var binaryContent []byte if *args.Encoding == "base64" { var err error binaryContent, err = base64.StdEncoding.DecodeString(*args.Content) if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("invalid base64 encoding: %v", err) } } else { // Default is text encoding binaryContent = []byte(*args.Content) } // Create parent directories if they don't exist parentDir := filepath.Dir(*args.FilePath) if parentDir != "" { _, err := apiClient.ToolboxAPI.CreateFolderDeprecated(ctx, *args.Id).Path(parentDir).Mode("0755").Execute() if err != nil { log.Errorf("Error creating parent directory: %v", err) // Continue anyway as upload might handle this } } // Upload the file tempFile, err := os.CreateTemp("", "upload-*") if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error creating temp file: %v", err) } defer os.Remove(tempFile.Name()) // Clean up temp file when done defer tempFile.Close() // Write content to temp file if _, err := tempFile.Write(binaryContent); err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error writing to temp file: %v", err) } // Reset file pointer to beginning if _, err := tempFile.Seek(0, 0); err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error seeking temp file: %v", err) } // Upload the file _, err = apiClient.ToolboxAPI.UploadFileDeprecated(ctx, *args.Id).Path(*args.FilePath).File(tempFile).Execute() if err != nil { return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error uploading file: %v", err) } // Get file info for size fileInfo, _, err := apiClient.ToolboxAPI.GetFileInfoDeprecated(ctx, *args.Id).Path(*args.FilePath).Execute() if err != nil { log.Errorf("Error getting file info after upload: %v", err) return &mcp.CallToolResult{IsError: true}, fmt.Errorf("error getting file info after upload: %v", err) } fileSizeKB := float64(fileInfo.Size) / 1024 log.Infof("File uploaded successfully: %s, size: %.2fKB", *args.FilePath, fileSizeKB) return mcp.NewToolResultText(fmt.Sprintf("File uploaded successfully: %s", *args.FilePath)), nil } ================================================ FILE: apps/cli/pkg/minio/minio.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package minio import ( "archive/tar" "bytes" "context" "crypto/sha256" "encoding/hex" "fmt" "io" "os" "path/filepath" "strings" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" ) const CONTEXT_TAR_FILE_NAME = "context.tar" type Client struct { minioClient *minio.Client bucket string } func NewClient(endpoint, accessKey, secretKey, bucket string, useSSL bool, sessionToken string) (*Client, error) { minioClient, err := minio.New(endpoint, &minio.Options{ Creds: credentials.NewStaticV4(accessKey, secretKey, sessionToken), Secure: useSSL, }) if err != nil { return nil, fmt.Errorf("failed to create MinIO client: %w", err) } return &Client{ minioClient: minioClient, bucket: bucket, }, nil } func (c *Client) UploadFile(ctx context.Context, objectName string, data []byte) error { exists, err := c.minioClient.BucketExists(ctx, c.bucket) if err != nil { return fmt.Errorf("error checking bucket existence: %w", err) } if !exists { err = c.minioClient.MakeBucket(ctx, c.bucket, minio.MakeBucketOptions{}) if err != nil { return fmt.Errorf("error creating bucket: %w", err) } } reader := bytes.NewReader(data) objectSize := int64(len(data)) _, err = c.minioClient.PutObject(ctx, c.bucket, objectName, reader, objectSize, minio.PutObjectOptions{ ContentType: "application/octet-stream", }) if err != nil { return fmt.Errorf("error uploading file: %w", err) } return nil } func readIgnoreFile(rootPath, filename string) []string { ignoreFile := filepath.Join(rootPath, filename) content, err := os.ReadFile(ignoreFile) if err != nil { return nil } var patterns []string lines := strings.Split(string(content), "\n") for _, line := range lines { line = strings.TrimSpace(line) // Skip empty lines and comments if line == "" || strings.HasPrefix(line, "#") { continue } patterns = append(patterns, line) } return patterns } func matchPattern(filePath string, patterns []string) bool { filePath = filepath.ToSlash(filePath) for _, pattern := range patterns { pattern = filepath.ToSlash(pattern) if strings.HasSuffix(pattern, "/") { pattern = strings.TrimSuffix(pattern, "/") if strings.HasPrefix(filePath, pattern+"/") || filePath == pattern { return true } continue } // Handle double star patterns (**) - matches any number of directories if strings.Contains(pattern, "**") { // Convert ** to a simpler pattern for basic matching parts := strings.Split(pattern, "**") if len(parts) == 2 { prefix := parts[0] suffix := parts[1] // Remove trailing/leading slashes from prefix/suffix prefix = strings.TrimSuffix(prefix, "/") suffix = strings.TrimPrefix(suffix, "/") if prefix == "" && suffix != "" { // Pattern like **/node_modules if strings.Contains(filePath, "/"+suffix) || strings.HasSuffix(filePath, suffix) || filePath == suffix { return true } } else if prefix != "" && suffix == "" { // Pattern like .git/** if strings.HasPrefix(filePath, prefix+"/") || filePath == prefix { return true } } else if prefix != "" && suffix != "" { // Pattern like src/**/test if strings.HasPrefix(filePath, prefix+"/") && (strings.Contains(filePath, "/"+suffix) || strings.HasSuffix(filePath, suffix)) { return true } } } continue } if strings.Contains(pattern, "*") { matched, err := filepath.Match(pattern, filepath.Base(filePath)) if err == nil && matched { return true } // Also check full path for patterns like */node_modules matched, err = filepath.Match(pattern, filePath) if err == nil && matched { return true } continue } // Handle exact matches and prefix matches if filePath == pattern || strings.HasPrefix(filePath, pattern+"/") || filepath.Base(filePath) == pattern { return true } } return false } func shouldExcludeFile(filePath, rootPath string) bool { relPath, err := filepath.Rel(rootPath, filePath) if err != nil { return false } dockerignorePatterns := readIgnoreFile(rootPath, ".dockerignore") if len(dockerignorePatterns) == 0 { return false } return matchPattern(relPath, dockerignorePatterns) } func (c *Client) ListObjects(ctx context.Context, prefix string) ([]string, error) { var objects []string objectCh := c.minioClient.ListObjects(ctx, c.bucket, minio.ListObjectsOptions{ Prefix: prefix, }) for object := range objectCh { if object.Err != nil { return nil, object.Err } objects = append(objects, object.Key) } return objects, nil } func (c *Client) ProcessDirectory(ctx context.Context, dirPath, orgID string, existingObjects map[string]bool) ([]string, error) { // Check if .dockerignore exists and provide helpful message if context is large dockerignoreExists := false if _, err := os.Stat(filepath.Join(dirPath, ".dockerignore")); err == nil { dockerignoreExists = true } tarFile, err := os.Create(CONTEXT_TAR_FILE_NAME) if err != nil { return nil, fmt.Errorf("failed to create tar file: %w", err) } defer tarFile.Close() tw := tar.NewWriter(tarFile) defer tw.Close() fileCount := 0 totalSize := int64(0) warned := false err = filepath.Walk(dirPath, func(file string, fi os.FileInfo, err error) error { if err != nil { return err } if fi.Name() == CONTEXT_TAR_FILE_NAME { return nil } if shouldExcludeFile(file, dirPath) { relPath, _ := filepath.Rel(dirPath, file) if fi.IsDir() { fmt.Printf("Excluding directory: %s\n", relPath) return filepath.SkipDir } fmt.Printf("Excluding file: %s\n", relPath) return nil } header, err := tar.FileInfoHeader(fi, fi.Name()) if err != nil { return err } relPath, err := filepath.Rel(dirPath, file) if err != nil { return err } header.Name = relPath if err := tw.WriteHeader(header); err != nil { return err } // Write file contents if regular file if fi.Mode().IsRegular() { fileCount++ totalSize += fi.Size() if fileCount%1000 == 0 { fmt.Printf("Processing... %d files, %.2f MB total\n", fileCount, float64(totalSize)/(1024*1024)) } // Warn if context is getting very large (only warn once) if totalSize > 100*1024*1024 && !dockerignoreExists && !warned { fmt.Printf("Warning: Context size exceeds 100MB. Consider adding a .dockerignore file to exclude unnecessary files.\n") warned = true } f, err := os.Open(file) if err != nil { return err } defer f.Close() _, err = io.Copy(tw, f) return err } return nil }) if err != nil { if strings.Contains(err.Error(), "write too long") { return nil, fmt.Errorf("context directory is too large for tar archive. Please create a .dockerignore file to exclude large directories like .git, node_modules, dist, etc. Original error: %w", err) } return nil, fmt.Errorf("failed to process directory: %w", err) } fmt.Printf("Context processing complete: %d files, %.2f MB total\n", fileCount, float64(totalSize)/(1024*1024)) // Seek to start of tar file before calculating hash if _, err := tarFile.Seek(0, 0); err != nil { return nil, fmt.Errorf("failed to seek tar file: %w", err) } // Calculate hash of tar contents hasher := sha256.New() if _, err := io.Copy(hasher, tarFile); err != nil { return nil, fmt.Errorf("failed to hash tar: %w", err) } hash := hex.EncodeToString(hasher.Sum(nil)) objectName := fmt.Sprintf("%s/%s", orgID, hash) if _, exists := existingObjects[objectName]; !exists { err = c.CreateDirectory(ctx, objectName) if err != nil { return nil, err } if _, err := tarFile.Seek(0, 0); err != nil { return nil, fmt.Errorf("failed to seek tar file: %w", err) } tarContent, err := io.ReadAll(tarFile) if err != nil { return nil, fmt.Errorf("failed to read tar file: %w", err) } err = c.UploadFile(ctx, fmt.Sprintf("%s/%s", objectName, CONTEXT_TAR_FILE_NAME), tarContent) if err != nil { return nil, fmt.Errorf("failed to upload tar: %w", err) } if err := os.Remove(CONTEXT_TAR_FILE_NAME); err != nil { return nil, fmt.Errorf("failed to remove tar file: %w", err) } } else { fmt.Printf("Directory %s with hash %s already exists in storage\n", dirPath, hash) } return []string{hash}, nil } func (c *Client) ProcessFile(ctx context.Context, filePath, orgID string, existingObjects map[string]bool) (string, error) { fileContent, err := os.ReadFile(filePath) if err != nil { return "", fmt.Errorf("failed to read file: %w", err) } hasher := sha256.New() hasher.Write(fileContent) hash := hex.EncodeToString(hasher.Sum(nil)) objectName := fmt.Sprintf("%s/%s", orgID, hash) if _, exists := existingObjects[objectName]; !exists { var tarBuffer bytes.Buffer tarWriter := tar.NewWriter(&tarBuffer) fileName := filepath.Base(filePath) fileInfo, err := os.Stat(filePath) if err != nil { return "", fmt.Errorf("failed to stat file: %w", err) } header, err := tar.FileInfoHeader(fileInfo, "") if err != nil { return "", fmt.Errorf("failed to create tar header: %w", err) } header.Name = fileName if err := tarWriter.WriteHeader(header); err != nil { return "", fmt.Errorf("failed to write tar header: %w", err) } if _, err := tarWriter.Write(fileContent); err != nil { return "", fmt.Errorf("failed to write file to tar: %w", err) } if err := tarWriter.Close(); err != nil { return "", fmt.Errorf("failed to close tar writer: %w", err) } err = c.CreateDirectory(ctx, objectName) if err != nil { return "", err } // Upload tar file instead of raw content err = c.UploadFile(ctx, fmt.Sprintf("%s/%s", objectName, CONTEXT_TAR_FILE_NAME), tarBuffer.Bytes()) if err != nil { return "", err } } else { fmt.Printf("File %s with hash %s already exists in storage - skipping\n", filePath, hash) } return hash, nil } func (c *Client) CreateDirectory(ctx context.Context, directoryPath string) error { exists, err := c.minioClient.BucketExists(ctx, c.bucket) if err != nil { return fmt.Errorf("error checking bucket existence: %w", err) } if !exists { return fmt.Errorf("bucket does not exist") } // Ensure the directory path ends with a slash to represent a directory if !strings.HasSuffix(directoryPath, "/") { directoryPath = directoryPath + "/" } // Create an empty object to represent the directory emptyContent := []byte{} reader := bytes.NewReader(emptyContent) _, err = c.minioClient.PutObject(ctx, c.bucket, directoryPath, reader, 0, minio.PutObjectOptions{ ContentType: "application/directory", }) if err != nil { return fmt.Errorf("error creating directory marker: %w", err) } return nil } ================================================ FILE: apps/cli/project.json ================================================ { "name": "cli", "$schema": "../../node_modules/nx/schemas/project-schema.json", "projectType": "application", "sourceRoot": "apps/cli", "tags": [], "targets": { "build": { "executor": "@nx-go/nx-go:build", "inputs": ["goProduction"], "options": { "main": "{projectRoot}/main.go", "outputPath": "dist/apps/cli/cli" } }, "format": { "executor": "nx:run-commands", "options": { "command": "cd {projectRoot} && go fmt ./... && prettier --write \"**/*.{yaml,html}\"" } }, "test": { "executor": "@nx-go/nx-go:test" }, "lint": { "executor": "@nx-go/nx-go:lint" } } } ================================================ FILE: apps/cli/toolbox/toolbox.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package toolbox import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" "strings" "github.com/daytonaio/daytona/cli/config" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) type ExecuteRequest struct { Command string `json:"command"` Cwd *string `json:"cwd,omitempty"` Timeout *float32 `json:"timeout,omitempty"` } type ExecuteResponse struct { ExitCode float32 `json:"exitCode"` Result string `json:"result"` } type Client struct { apiClient *apiclient.APIClient } func NewClient(apiClient *apiclient.APIClient) *Client { return &Client{ apiClient: apiClient, } } // Gets the toolbox proxy URL for a sandbox, caching by region in config func (c *Client) getProxyURL(ctx context.Context, sandboxId, region string) (string, error) { // Check config cache first cachedURL, err := config.GetToolboxProxyUrl(region) if err == nil && cachedURL != "" { return cachedURL, nil } // Fetch from API toolboxProxyUrl, _, err := c.apiClient.SandboxAPI.GetToolboxProxyUrl(ctx, sandboxId).Execute() if err != nil { return "", fmt.Errorf("failed to get toolbox proxy URL: %w", err) } // Best-effort caching _ = config.SetToolboxProxyUrl(region, toolboxProxyUrl.Url) return toolboxProxyUrl.Url, nil } func (c *Client) ExecuteCommand(ctx context.Context, sandbox *apiclient.Sandbox, request ExecuteRequest) (*ExecuteResponse, error) { proxyURL, err := c.getProxyURL(ctx, sandbox.Id, sandbox.Target) if err != nil { return nil, err } return c.executeCommandViaProxy(ctx, proxyURL, sandbox.Id, request) } // TODO: replace this with the toolbox api client at some point func (c *Client) executeCommandViaProxy(ctx context.Context, proxyURL, sandboxId string, request ExecuteRequest) (*ExecuteResponse, error) { // Build the URL: {proxyUrl}/{sandboxId}/process/execute url := fmt.Sprintf("%s/%s/process/execute", strings.TrimSuffix(proxyURL, "/"), sandboxId) body, err := json.Marshal(request) if err != nil { return nil, fmt.Errorf("failed to marshal request: %w", err) } req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body)) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } req.Header.Set("Content-Type", "application/json") cfg, err := config.GetConfig() if err != nil { return nil, err } activeProfile, err := cfg.GetActiveProfile() if err != nil { return nil, err } if activeProfile.Api.Key != nil { req.Header.Set("Authorization", "Bearer "+*activeProfile.Api.Key) } else if activeProfile.Api.Token != nil { req.Header.Set("Authorization", "Bearer "+activeProfile.Api.Token.AccessToken) } if activeProfile.ActiveOrganizationId != nil { req.Header.Set("X-Daytona-Organization-ID", *activeProfile.ActiveOrganizationId) } client := &http.Client{} resp, err := client.Do(req) if err != nil { return nil, fmt.Errorf("failed to execute request: %w", err) } defer resp.Body.Close() respBody, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read response: %w", err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("request failed with status %d: %s", resp.StatusCode, string(respBody)) } var response ExecuteResponse if err := json.Unmarshal(respBody, &response); err != nil { return nil, fmt.Errorf("failed to parse response: %w", err) } return &response, nil } ================================================ FILE: apps/cli/util/pointer.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util // Use generics to create a pointer to a value func Pointer[T any](d T) *T { return &d } ================================================ FILE: apps/cli/views/common/common.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" "strings" "github.com/charmbracelet/huh" "github.com/charmbracelet/lipgloss" ) var DefaultLayoutMarginTop = 1 var DefaultHorizontalMargin = 1 var TUITableMinimumWidth = 80 var SeparatorString = lipgloss.NewStyle().Foreground(LightGray).Render("===") var Checkmark = lipgloss.NewStyle().Foreground(lipgloss.Color("42")).SetString("✓").String() var ( minimumWidth = 40 maximumWidth = 160 widthBreakpoints = []int{60, 80, 100, 120, 140, 160} ) func RenderMainTitle(title string) { fmt.Println(lipgloss.NewStyle().Foreground(Green).Bold(true).Padding(1, 0, 1, 0).Render(title)) } func RenderTip(message string) { fmt.Println(lipgloss.NewStyle().Padding(0, 0, 1, 1).Render(message)) } func RenderInfoMessage(message string) { fmt.Println(lipgloss.NewStyle().PaddingLeft(1).Render(message)) } func RenderInfoMessageBold(message string) { fmt.Println(lipgloss.NewStyle().Bold(true).Padding(1, 0, 1, 1).Render(message)) } func GetStyledMainTitle(content string) string { return lipgloss.NewStyle().Foreground(Dark).Background(Light).Padding(0, 1).MarginTop(1).Render(content) } func GetInfoMessage(message string) string { return lipgloss.NewStyle().Padding(1, 0, 1, 1).Render(message) } func GetContainerBreakpointWidth(terminalWidth int) int { if terminalWidth < minimumWidth { return 0 } for _, width := range widthBreakpoints { if terminalWidth < width { return width - 20 - DefaultHorizontalMargin - DefaultHorizontalMargin } } return maximumWidth } func GetEnvVarsInput(envVars *map[string]string) *huh.Text { if envVars == nil { return nil } var inputText string for key, value := range *envVars { inputText += fmt.Sprintf("%s=%s\n", key, value) } inputText = strings.TrimSuffix(inputText, "\n") return huh.NewText(). Title("Environment Variables"). Description("Enter environment variables in the format KEY=VALUE\nTo pass machine env variables at runtime, use $VALUE"). CharLimit(-1). Value(&inputText). Validate(func(str string) error { tempEnvVars := map[string]string{} for i, line := range strings.Split(str, "\n") { if line == "" { continue } parts := strings.SplitN(line, "=", 2) if len(parts) != 2 { return fmt.Errorf("invalid format: %s on line %d", line, i+1) } tempEnvVars[parts[0]] = parts[1] } *envVars = tempEnvVars return nil }) } // Bolds the message and prepends a checkmark func GetPrettyLogLine(message string) string { return fmt.Sprintf("%s \033[1m%s\033[0m\n", lipgloss.NewStyle().Foreground(lipgloss.Color("42")).SetString("✓").String(), message) } ================================================ FILE: apps/cli/views/common/prompt.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" "strings" "github.com/charmbracelet/bubbles/textinput" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" ) type promptModel struct { textInput textinput.Model err error done bool title string desc string } func (m promptModel) Init() tea.Cmd { return textinput.Blink } func (m promptModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { var cmd tea.Cmd switch msg := msg.(type) { case tea.KeyMsg: switch msg.Type { case tea.KeyEnter: m.done = true return m, tea.Quit case tea.KeyCtrlC: m.done = true m.err = fmt.Errorf("user cancelled") return m, tea.Quit } } m.textInput, cmd = m.textInput.Update(msg) return m, cmd } func (m promptModel) View() string { if m.done { return "" } titleStyle := lipgloss.NewStyle(). Bold(true). MarginLeft(2). MarginTop(1) descStyle := lipgloss.NewStyle(). Foreground(lipgloss.Color("241")). MarginLeft(2) return fmt.Sprintf("\n%s\n%s\n\n %s\n\n", titleStyle.Render(m.title), descStyle.Render(m.desc), m.textInput.View()) } func PromptForInput(prompt, title, desc string) (string, error) { ti := textinput.New() ti.Focus() ti.CharLimit = 156 ti.Width = 80 ti.Prompt = "› " m := promptModel{ textInput: ti, title: title, desc: desc, } p := tea.NewProgram(m, tea.WithAltScreen()) model, err := p.Run() if err != nil { return "", fmt.Errorf("error running prompt: %w", err) } finalModel, ok := model.(promptModel) if !ok { return "", fmt.Errorf("could not read model state") } if finalModel.err != nil { return "", finalModel.err } return strings.TrimSpace(finalModel.textInput.Value()), nil } ================================================ FILE: apps/cli/views/common/select.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" ) // SelectItem represents an item in the selection list type SelectItem struct { Title string Desc string } // SelectModel represents the selection UI model type SelectModel struct { Title string Items []SelectItem Selected int Choice string Quitting bool } // NewSelectModel creates a new select model with the given title and items func NewSelectModel(title string, items []SelectItem) SelectModel { return SelectModel{ Title: title, Items: items, Selected: 0, } } func (m SelectModel) Init() tea.Cmd { return nil } func (m SelectModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { switch msg := msg.(type) { case tea.KeyMsg: switch msg.String() { case "ctrl+c": m.Quitting = true return m, tea.Quit case "up", "k": if m.Selected > 0 { m.Selected-- } case "down", "j": if m.Selected < len(m.Items)-1 { m.Selected++ } case "enter": m.Choice = m.Items[m.Selected].Title return m, tea.Quit } } return m, nil } func (m SelectModel) View() string { if m.Quitting { return "" } s := lipgloss.NewStyle(). Bold(true). MarginLeft(2). MarginTop(1). Render(m.Title) + "\n\n" for i, item := range m.Items { cursor := " " style := lipgloss.NewStyle(). Foreground(lipgloss.Color("151")). PaddingLeft(2) if i == m.Selected { cursor = "› " style = style.Foreground(lipgloss.Color("42")).Bold(true) } s += style.Render(cursor+item.Title) + "\n" s += lipgloss.NewStyle(). PaddingLeft(4). Foreground(lipgloss.Color("241")). Render(item.Desc) + "\n\n" } return s } // Select displays a selection prompt with the given title and items // Returns the selected item's title and any error that occurred func Select(title string, items []SelectItem) (string, error) { p := tea.NewProgram(NewSelectModel(title, items), tea.WithAltScreen()) m, err := p.Run() if err != nil { return "", err } finalModel, ok := m.(SelectModel) if !ok { return "", nil } if finalModel.Quitting { return "", nil } return finalModel.Choice, nil } ================================================ FILE: apps/cli/views/common/styles.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" "os" "github.com/charmbracelet/bubbles/list" "github.com/charmbracelet/huh" "github.com/charmbracelet/lipgloss" ) var ( ListNavigationText = "load more" ListNavigationRenderText = "+ Load more.." ) var ( Green = lipgloss.AdaptiveColor{Light: "#23cc71", Dark: "#23cc71"} Blue = lipgloss.AdaptiveColor{Light: "#017ffe", Dark: "#017ffe"} Yellow = lipgloss.AdaptiveColor{Light: "#d4ed2d", Dark: "#d4ed2d"} Cyan = lipgloss.AdaptiveColor{Light: "#3ef7e5", Dark: "#3ef7e5"} DimmedGreen = lipgloss.AdaptiveColor{Light: "#7be0a9", Dark: "#7be0a9"} Orange = lipgloss.AdaptiveColor{Light: "#e3881b", Dark: "#e3881b"} Light = lipgloss.AdaptiveColor{Light: "#000", Dark: "#fff"} Dark = lipgloss.AdaptiveColor{Light: "#fff", Dark: "#000"} Gray = lipgloss.AdaptiveColor{Light: "243", Dark: "243"} LightGray = lipgloss.AdaptiveColor{Light: "#828282", Dark: "#828282"} Red = lipgloss.AdaptiveColor{Light: "#FF4672", Dark: "#ED567A"} ) var ( ColorPending = lipgloss.AdaptiveColor{Light: "#cce046", Dark: "#cce046"} ColorSuccess = lipgloss.AdaptiveColor{Light: "#2ecc71", Dark: "#2ecc71"} ColorStarting = ColorSuccess ColorStopped = lipgloss.AdaptiveColor{Light: "#a2a2a2", Dark: "#a2a2a2"} ColorStopping = ColorStopped ColorError = lipgloss.AdaptiveColor{Light: "#e74c3c", Dark: "#e74c3c"} ColorDeleting = ColorStopped ColorDeleted = ColorStopped ColorUnresponsive = ColorError ) var ( BaseTableStyleHorizontalPadding = 4 BaseTableStyle = lipgloss.NewStyle(). PaddingLeft(BaseTableStyleHorizontalPadding). PaddingRight(BaseTableStyleHorizontalPadding). PaddingTop(1). Margin(1, 0) NameStyle = lipgloss.NewStyle().Foreground(Light) LinkStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("12")) ActiveStyle = lipgloss.NewStyle().Foreground(Green) InactiveStyle = lipgloss.NewStyle().Foreground(Orange) DefaultRowDataStyle = lipgloss.NewStyle().Foreground(Gray) BaseCellStyle = lipgloss.NewRenderer(os.Stdout).NewStyle().Padding(0, 4, 1, 0) TableHeaderStyle = BaseCellStyle.Foreground(LightGray).Bold(false).Padding(0).MarginRight(4) ) var ( UndefinedStyle = lipgloss.NewStyle().Foreground(ColorPending) PendingStyle = lipgloss.NewStyle().Foreground(ColorPending) RunningStyle = lipgloss.NewStyle().Foreground(ColorPending) RunSuccessfulStyle = lipgloss.NewStyle().Foreground(ColorSuccess) CreatingStyle = lipgloss.NewStyle().Foreground(ColorPending) StartedStyle = lipgloss.NewStyle().Foreground(ColorSuccess) StartingStyle = lipgloss.NewStyle().Foreground(ColorStarting) StoppedStyle = lipgloss.NewStyle().Foreground(ColorStopped) StoppingStyle = lipgloss.NewStyle().Foreground(ColorStopping) ErrorStyle = lipgloss.NewStyle().Foreground(ColorError) DeletingStyle = lipgloss.NewStyle().Foreground(ColorDeleting) DeletedStyle = lipgloss.NewStyle().Foreground(ColorDeleted) UnresponsiveStyle = lipgloss.NewStyle().Foreground(ColorUnresponsive) ) var LogPrefixColors = []lipgloss.AdaptiveColor{ Blue, Orange, Cyan, Yellow, } type SelectionListOptions struct { ParentIdentifier string IsPaginationDisabled bool CursorIndex int } func GetStyledSelectList(items []list.Item, listOptions ...SelectionListOptions) list.Model { d := list.NewDefaultDelegate() d.Styles.SelectedTitle = lipgloss.NewStyle(). Border(lipgloss.NormalBorder(), false, false, false, true). BorderForeground(Green). Foreground(Green). Bold(true). Padding(0, 0, 0, 1) d.Styles.SelectedDesc = d.Styles.SelectedTitle.Foreground(DimmedGreen).Bold(false) l := list.New(items, d, 0, 0) if listOptions != nil { // Sets the mouse cursor to point to the first index of newly loaded items if listOptions[0].CursorIndex > 0 { l.Select(listOptions[0].CursorIndex) } if !listOptions[0].IsPaginationDisabled { // Add the 'Load More' option in search filter results l.Filter = func(term string, targets []string) []list.Rank { ranks := list.DefaultFilter(term, targets) loadMoreIdx := -1 // Ideally 'Load More' option if present should be found at the last index for i := len(targets) - 1; i >= 0; i-- { if targets[i] == ListNavigationRenderText { loadMoreIdx = i break } } if loadMoreIdx == -1 { return ranks } // Return if already present for i := range ranks { if ranks[i].Index == loadMoreIdx { return ranks } } // Append 'Load More' option in search filter results ranks = append(ranks, list.Rank{ Index: loadMoreIdx, }) return ranks } } } l.Styles.FilterPrompt = lipgloss.NewStyle().Foreground(Green) l.Styles.FilterCursor = lipgloss.NewStyle().Foreground(Green).Background(Green) l.Styles.Title = lipgloss.NewStyle().Foreground(Dark).Bold(true). Background(lipgloss.Color("#fff")).Padding(0) l.FilterInput.PromptStyle = lipgloss.NewStyle().Foreground(Green) l.FilterInput.TextStyle = lipgloss.NewStyle().Foreground(Green) singularItemName := "item " + SeparatorString var pluralItemName string if listOptions == nil { pluralItemName = fmt.Sprintf("items\n\n%s", SeparatorString) } else if len(listOptions[0].ParentIdentifier) > 0 { pluralItemName = fmt.Sprintf("items (%s)\n\n%s", listOptions[0].ParentIdentifier, SeparatorString) } l.SetStatusBarItemName(singularItemName, pluralItemName) return l } func GetCustomTheme() *huh.Theme { t := huh.ThemeCharm() t.Blurred.FocusedButton = t.Blurred.FocusedButton.Background(Green) t.Blurred.FocusedButton = t.Blurred.FocusedButton.Bold(true) t.Blurred.TextInput.Prompt = t.Blurred.TextInput.Prompt.Foreground(Light) t.Blurred.TextInput.Cursor = t.Blurred.TextInput.Cursor.Foreground(Light) t.Blurred.SelectSelector = t.Blurred.SelectSelector.Foreground(Green) t.Blurred.Title = t.Blurred.Title.Foreground(Gray).Bold(true) t.Blurred.Description = t.Blurred.Description.Foreground(LightGray) t.Focused.Title = t.Focused.Title.Foreground(Green).Bold(true) t.Focused.Description = t.Focused.Description.Foreground(LightGray).Bold(true) t.Focused.FocusedButton = t.Focused.FocusedButton.Bold(true) t.Focused.FocusedButton = t.Focused.FocusedButton.Background(Green) t.Focused.TextInput.Prompt = t.Focused.TextInput.Prompt.Foreground(Green) t.Focused.TextInput.Cursor = t.Focused.TextInput.Cursor.Foreground(Light) t.Focused.SelectSelector = t.Focused.SelectSelector.Foreground(Green) t.Focused.SelectedOption = t.Focused.SelectedOption.Foreground(Green) t.Focused.ErrorIndicator = t.Focused.ErrorIndicator.Foreground(Red) t.Focused.ErrorMessage = t.Focused.ErrorMessage.Foreground(Red) t.Focused.Base = t.Focused.Base.BorderForeground(Green) t.Focused.Base = t.Focused.Base.BorderBottomForeground(Green) t.Focused.Base = t.Focused.Base.MarginTop(DefaultLayoutMarginTop) t.Blurred.Base = t.Blurred.Base.MarginTop(DefaultLayoutMarginTop) return t } func GetInitialCommandTheme() *huh.Theme { newTheme := huh.ThemeCharm() newTheme.Blurred.Title = newTheme.Focused.Title b := &newTheme.Blurred b.FocusedButton = b.FocusedButton.Background(Green) b.FocusedButton = b.FocusedButton.Bold(true) b.TextInput.Prompt = b.TextInput.Prompt.Foreground(Green) b.TextInput.Cursor = b.TextInput.Cursor.Foreground(Green) b.SelectSelector = b.SelectSelector.Foreground(Green) f := &newTheme.Focused f.Base = f.Base.BorderForeground(lipgloss.Color("fff")) f.Title = f.Title.Foreground(Green).Bold(true) f.FocusedButton = f.FocusedButton.Bold(true) f.FocusedButton = f.FocusedButton.Background(Green) f.TextInput.Prompt = f.TextInput.Prompt.Foreground(Green) f.TextInput.Cursor = f.TextInput.Cursor.Foreground(Light) f.SelectSelector = f.SelectSelector.Foreground(Green) f.Base = f.Base.UnsetMarginLeft() f.Base = f.Base.UnsetPaddingLeft() f.Base = f.Base.BorderLeft(false) f.SelectedOption = lipgloss.NewStyle().Foreground(Green) return newTheme } ================================================ FILE: apps/cli/views/organization/info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "fmt" "os" "github.com/charmbracelet/lipgloss" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "golang.org/x/term" ) func RenderInfo(organization *apiclient.Organization, forceUnstyled bool) { var output string nameLabel := "Organization" output += "\n" output += getInfoLine(nameLabel, organization.Name) + "\n" output += getInfoLine("Created", util.GetTimeSinceLabel(organization.CreatedAt)) + "\n" output += getInfoLine("ID", organization.Id) + "\n" terminalWidth, _, err := term.GetSize(int(os.Stdout.Fd())) if err != nil { fmt.Println(output) return } if terminalWidth < common.TUITableMinimumWidth || forceUnstyled { renderUnstyledInfo(output) return } output = common.GetStyledMainTitle("Organization Info") + "\n" + output renderTUIView(output, common.GetContainerBreakpointWidth(terminalWidth)) } func renderUnstyledInfo(output string) { fmt.Println(output) } func renderTUIView(output string, width int) { output = lipgloss.NewStyle().PaddingLeft(3).Render(output) content := lipgloss. NewStyle().Width(width). Render(output) fmt.Println(content) } func getInfoLine(key, value string) string { return util.PropertyNameStyle.Render(fmt.Sprintf("%-*s", util.PropertyNameWidth, key)) + util.PropertyValueStyle.Render(value) + "\n" } ================================================ FILE: apps/cli/views/organization/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "fmt" "sort" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) type RowData struct { Name string Id string Created string } func ListOrganizations(organizationList []apiclient.Organization, activeOrganizationId *string) { if len(organizationList) == 0 { util.NotifyEmptyOrganizationList(true) return } SortOrganizations(&organizationList) headers := []string{"Name", "Id", "Created"} data := [][]string{} for _, o := range organizationList { var rowData *RowData var row []string rowData = getTableRowData(o, activeOrganizationId) row = getRowFromRowData(*rowData) data = append(data, row) } table := util.GetTableView(data, headers, nil, func() { renderUnstyledList(organizationList) }) fmt.Println(table) } func SortOrganizations(organizationList *[]apiclient.Organization) { sort.Slice(*organizationList, func(i, j int) bool { return (*organizationList)[i].CreatedAt.After((*organizationList)[j].CreatedAt) }) } func getTableRowData(organization apiclient.Organization, activeOrganizationId *string) *RowData { rowData := RowData{"", "", ""} rowData.Name = organization.Name + util.AdditionalPropertyPadding if activeOrganizationId != nil && *activeOrganizationId == organization.Id { rowData.Name = "*" + rowData.Name } rowData.Id = organization.Id rowData.Created = util.GetTimeSinceLabel(organization.CreatedAt) return &rowData } func renderUnstyledList(organizationList []apiclient.Organization) { for _, organization := range organizationList { RenderInfo(&organization, true) if organization.Id != organizationList[len(organizationList)-1].Id { fmt.Printf("\n%s\n\n", common.SeparatorString) } } } func getRowFromRowData(rowData RowData) []string { row := []string{ common.NameStyle.Render(rowData.Name), common.DefaultRowDataStyle.Render(rowData.Id), common.DefaultRowDataStyle.Render(rowData.Created), } return row } ================================================ FILE: apps/cli/views/organization/select.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package organization import ( "github.com/charmbracelet/huh" "github.com/daytonaio/daytona/cli/views/common" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) func GetOrganizationIdFromPrompt(organizationList []apiclient.Organization) (*apiclient.Organization, error) { var chosenOrganizationId string var organizationOptions []huh.Option[string] for _, organization := range organizationList { organizationOptions = append(organizationOptions, huh.NewOption(organization.Name, organization.Id)) } form := huh.NewForm( huh.NewGroup( huh.NewSelect[string](). Title("Choose an Organization"). Options( organizationOptions..., ). Value(&chosenOrganizationId), ).WithTheme(common.GetCustomTheme()), ) if err := form.Run(); err != nil { return nil, err } for _, organization := range organizationList { if organization.Id == chosenOrganizationId { return &organization, nil } } return nil, nil } ================================================ FILE: apps/cli/views/sandbox/info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "fmt" "os" "strings" "github.com/charmbracelet/lipgloss" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "golang.org/x/term" ) func RenderInfo(sandbox *apiclient.Sandbox, forceUnstyled bool) { var output string output += "\n" output += getInfoLine("ID", sandbox.Id) + "\n" if sandbox.State != nil { output += getInfoLine("State", getStateLabel(*sandbox.State)) + "\n" } if sandbox.Snapshot != nil { output += getInfoLine("Snapshot", *sandbox.Snapshot) + "\n" } output += getInfoLine("Region", sandbox.Target) + "\n" if sandbox.Class != nil { output += getInfoLine("Class", *sandbox.Class) + "\n" } if sandbox.CreatedAt != nil { output += getInfoLine("Created", util.GetTimeSinceLabelFromString(*sandbox.CreatedAt)) + "\n" } if sandbox.UpdatedAt != nil { output += getInfoLine("Last Event", util.GetTimeSinceLabelFromString(*sandbox.UpdatedAt)) + "\n" } terminalWidth, _, err := term.GetSize(int(os.Stdout.Fd())) if err != nil { fmt.Println(output) return } if terminalWidth < common.TUITableMinimumWidth || forceUnstyled { renderUnstyledInfo(output) return } output = common.GetStyledMainTitle("Sandbox Info") + "\n" + output if len(sandbox.Labels) > 0 { labels := "" i := 0 for k, v := range sandbox.Labels { label := fmt.Sprintf("%s=%s\n", k, v) if i == 0 { labels += label + "\n" } else { labels += getInfoLine("", fmt.Sprintf("%s=%s\n", k, v)) } i++ } labels = strings.TrimSuffix(labels, "\n") output += "\n" + strings.TrimSuffix(getInfoLine("Labels", labels), "\n") } renderTUIView(output, common.GetContainerBreakpointWidth(terminalWidth)) } func renderUnstyledInfo(output string) { fmt.Println(output) } func renderTUIView(output string, width int) { output = lipgloss.NewStyle().PaddingLeft(3).Render(output) content := lipgloss. NewStyle().Width(width). Render(output) fmt.Println(content) } func getInfoLine(key, value string) string { return util.PropertyNameStyle.Render(fmt.Sprintf("%-*s", util.PropertyNameWidth, key)) + util.PropertyValueStyle.Render(value) + "\n" } func getStateLabel(state apiclient.SandboxState) string { switch state { case apiclient.SANDBOXSTATE_CREATING: return common.CreatingStyle.Render("CREATING") case apiclient.SANDBOXSTATE_RESTORING: return common.CreatingStyle.Render("RESTORING") case apiclient.SANDBOXSTATE_DESTROYED: return common.DeletedStyle.Render("DESTROYED") case apiclient.SANDBOXSTATE_DESTROYING: return common.DeletedStyle.Render("DESTROYING") case apiclient.SANDBOXSTATE_STARTED: return common.StartedStyle.Render("STARTED") case apiclient.SANDBOXSTATE_STOPPED: return common.StoppedStyle.Render("STOPPED") case apiclient.SANDBOXSTATE_STARTING: return common.StartingStyle.Render("STARTING") case apiclient.SANDBOXSTATE_STOPPING: return common.StoppingStyle.Render("STOPPING") case apiclient.SANDBOXSTATE_PULLING_SNAPSHOT: return common.CreatingStyle.Render("PULLING SNAPSHOT") case apiclient.SANDBOXSTATE_ARCHIVING: return common.CreatingStyle.Render("ARCHIVING") case apiclient.SANDBOXSTATE_ARCHIVED: return common.StoppedStyle.Render("ARCHIVED") case apiclient.SANDBOXSTATE_ERROR: return common.ErrorStyle.Render("ERROR") case apiclient.SANDBOXSTATE_BUILD_FAILED: return common.ErrorStyle.Render("BUILD FAILED") case apiclient.SANDBOXSTATE_UNKNOWN: return common.UndefinedStyle.Render("UNKNOWN") default: return common.UndefinedStyle.Render("/") } } ================================================ FILE: apps/cli/views/sandbox/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package sandbox import ( "fmt" "sort" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) type RowData struct { Name string State string Region string Class string LastEvent string } func ListSandboxes(sandboxList []apiclient.Sandbox, activeOrganizationName *string) { if len(sandboxList) == 0 { util.NotifyEmptySandboxList(true) return } headers := []string{"Sandbox", "State", "Region", "Class", "Last Event"} data := [][]string{} for _, s := range sandboxList { var rowData *RowData var row []string rowData = getTableRowData(s) row = getRowFromRowData(*rowData) data = append(data, row) } table := util.GetTableView(data, headers, activeOrganizationName, func() { renderUnstyledList(sandboxList) }) fmt.Println(table) } func SortSandboxes(sandboxList *[]apiclient.Sandbox) { sort.Slice(*sandboxList, func(i, j int) bool { pi, pj := getStateSortPriorities(*(*sandboxList)[i].State, *(*sandboxList)[j].State) if pi != pj { return pi < pj } if (*sandboxList)[i].CreatedAt == nil || (*sandboxList)[j].CreatedAt == nil { return true } // If two sandboxes have the same state priority, compare the UpdatedAt property return *(*sandboxList)[i].CreatedAt > *(*sandboxList)[j].CreatedAt }) } func getTableRowData(sandbox apiclient.Sandbox) *RowData { rowData := RowData{"", "", "", "", ""} rowData.Name = sandbox.Id + util.AdditionalPropertyPadding if sandbox.State != nil { rowData.State = getStateLabel(*sandbox.State) } rowData.Region = sandbox.Target if sandbox.Class != nil { rowData.Class = *sandbox.Class } if sandbox.UpdatedAt != nil { rowData.LastEvent = util.GetTimeSinceLabelFromString(*sandbox.UpdatedAt) } return &rowData } func renderUnstyledList(sandboxList []apiclient.Sandbox) { for _, sandbox := range sandboxList { RenderInfo(&sandbox, true) if sandbox.Id != sandboxList[len(sandboxList)-1].Id { fmt.Printf("\n%s\n\n", common.SeparatorString) } } } func getRowFromRowData(rowData RowData) []string { row := []string{ common.NameStyle.Render(rowData.Name), rowData.State, common.DefaultRowDataStyle.Render(rowData.Region), common.DefaultRowDataStyle.Render(rowData.Class), common.DefaultRowDataStyle.Render(rowData.LastEvent), } return row } func getStateSortPriorities(state1, state2 apiclient.SandboxState) (int, int) { pi, ok := sandboxListStatePriorities[state1] if !ok { pi = 99 } pj, ok2 := sandboxListStatePriorities[state2] if !ok2 { pj = 99 } return pi, pj } // Sandboxes that have actions being performed on them have a higher priority when listing var sandboxListStatePriorities = map[apiclient.SandboxState]int{ "pending": 1, "pending-start": 1, "deleting": 1, "creating": 1, "started": 2, "undefined": 2, "error": 3, "build-failed": 3, "stopped": 4, } ================================================ FILE: apps/cli/views/snapshot/info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "fmt" "os" "github.com/charmbracelet/lipgloss" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "golang.org/x/term" ) func RenderInfo(snapshot *apiclient.SnapshotDto, forceUnstyled bool) { var output string nameLabel := "Snapshot" output += "\n" output += getInfoLine(nameLabel, snapshot.Name) + "\n" output += getInfoLine("State", getStateLabel(snapshot.State)) + "\n" if size := snapshot.Size.Get(); size != nil { output += getInfoLine("Size", fmt.Sprintf("%.2f GB", *size)) + "\n" } else { output += getInfoLine("Size", "-") + "\n" } output += getInfoLine("Created", util.GetTimeSinceLabel(snapshot.CreatedAt)) + "\n" output += getInfoLine("ID", snapshot.Id) + "\n" terminalWidth, _, err := term.GetSize(int(os.Stdout.Fd())) if err != nil { fmt.Println(output) return } if terminalWidth < common.TUITableMinimumWidth || forceUnstyled { renderUnstyledInfo(output) return } output = common.GetStyledMainTitle("Snapshot Info") + "\n" + output renderTUIView(output, common.GetContainerBreakpointWidth(terminalWidth)) } func renderUnstyledInfo(output string) { fmt.Println(output) } func renderTUIView(output string, width int) { output = lipgloss.NewStyle().PaddingLeft(3).Render(output) content := lipgloss. NewStyle().Width(width). Render(output) fmt.Println(content) } func getInfoLine(key, value string) string { return util.PropertyNameStyle.Render(fmt.Sprintf("%-*s", util.PropertyNameWidth, key)) + util.PropertyValueStyle.Render(value) + "\n" } func getStateLabel(state apiclient.SnapshotState) string { switch state { case apiclient.SNAPSHOTSTATE_PENDING: return common.CreatingStyle.Render("PENDING") case apiclient.SNAPSHOTSTATE_PULLING: return common.CreatingStyle.Render("PULLING SNAPSHOT") case apiclient.SNAPSHOTSTATE_ACTIVE: return common.StartedStyle.Render("ACTIVE") case apiclient.SNAPSHOTSTATE_ERROR: return common.ErrorStyle.Render("ERROR") case apiclient.SNAPSHOTSTATE_BUILD_FAILED: return common.ErrorStyle.Render("BUILD FAILED") case apiclient.SNAPSHOTSTATE_REMOVING: return common.DeletedStyle.Render("REMOVING") default: return common.UndefinedStyle.Render("/") } } ================================================ FILE: apps/cli/views/snapshot/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package snapshot import ( "fmt" "sort" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) type RowData struct { Name string State string Size string Created string } func ListSnapshots(snapshotList []apiclient.SnapshotDto, activeOrganizationName *string) { if len(snapshotList) == 0 { util.NotifyEmptySnapshotList(true) return } SortSnapshots(&snapshotList) headers := []string{"Snapshot", "State", "Size", "Created"} data := [][]string{} for _, img := range snapshotList { var rowData *RowData var row []string rowData = getTableRowData(img) row = getRowFromRowData(*rowData) data = append(data, row) } table := util.GetTableView(data, headers, activeOrganizationName, func() { renderUnstyledList(snapshotList) }) fmt.Println(table) } func SortSnapshots(snapshotList *[]apiclient.SnapshotDto) { sort.Slice(*snapshotList, func(i, j int) bool { pi, pj := getStateSortPriorities((*snapshotList)[i].State, (*snapshotList)[j].State) if pi != pj { return pi < pj } // If two snapshots have the same state priority, compare the UpdatedAt property return (*snapshotList)[i].CreatedAt.After((*snapshotList)[j].CreatedAt) }) } func getTableRowData(snapshot apiclient.SnapshotDto) *RowData { rowData := RowData{"", "", "", ""} rowData.Name = snapshot.Name + util.AdditionalPropertyPadding rowData.State = getStateLabel(snapshot.State) if snapshot.Size.IsSet() && snapshot.Size.Get() != nil { rowData.Size = fmt.Sprintf("%.2f GB", *snapshot.Size.Get()) } else { rowData.Size = "-" } rowData.Created = util.GetTimeSinceLabel(snapshot.CreatedAt) return &rowData } func renderUnstyledList(snapshotList []apiclient.SnapshotDto) { for _, snapshot := range snapshotList { RenderInfo(&snapshot, true) if snapshot.Id != snapshotList[len(snapshotList)-1].Id { fmt.Printf("\n%s\n\n", common.SeparatorString) } } } func getRowFromRowData(rowData RowData) []string { row := []string{ common.NameStyle.Render(rowData.Name), rowData.State, common.DefaultRowDataStyle.Render(rowData.Size), common.DefaultRowDataStyle.Render(rowData.Created), } return row } func getStateSortPriorities(state1, state2 apiclient.SnapshotState) (int, int) { pi, ok := snapshotListStatePriorities[state1] if !ok { pi = 99 } pj, ok2 := snapshotListStatePriorities[state2] if !ok2 { pj = 99 } return pi, pj } // snapshots that have actions being performed on them have a higher priority when listing var snapshotListStatePriorities = map[apiclient.SnapshotState]int{ apiclient.SNAPSHOTSTATE_PENDING: 1, apiclient.SNAPSHOTSTATE_PULLING: 1, apiclient.SNAPSHOTSTATE_ERROR: 2, apiclient.SNAPSHOTSTATE_ACTIVE: 3, apiclient.SNAPSHOTSTATE_REMOVING: 4, } ================================================ FILE: apps/cli/views/util/empty_list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "github.com/daytonaio/daytona/cli/views/common" ) func NotifyEmptySandboxList(tip bool) { common.RenderInfoMessageBold("No sandboxes found") if tip { common.RenderTip("Use the Daytona SDK to get started.") } } func NotifyEmptySnapshotList(tip bool) { common.RenderInfoMessageBold("No snapshots found") if tip { common.RenderTip("Use 'daytona snapshot push' to push a snapshot.") } } func NotifyEmptyOrganizationList(tip bool) { common.RenderInfoMessageBold("No organizations found") if tip { common.RenderTip("Use 'daytona organization create' to create an organization.") } } func NotifyEmptyVolumeList(tip bool) { common.RenderInfoMessageBold("No volumes found") if tip { common.RenderTip("Use 'daytona volume create' to create a volume.") } } ================================================ FILE: apps/cli/views/util/info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "github.com/charmbracelet/lipgloss" "github.com/daytonaio/daytona/cli/views/common" ) const PropertyNameWidth = 16 var PropertyNameStyle = lipgloss.NewStyle(). Foreground(common.LightGray) var PropertyValueStyle = lipgloss.NewStyle(). Foreground(common.Light). Bold(true) ================================================ FILE: apps/cli/views/util/spinner.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "fmt" "os" "github.com/charmbracelet/bubbles/spinner" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" "github.com/daytonaio/daytona/cli/views/common" log "github.com/sirupsen/logrus" "golang.org/x/term" ) var isAborted bool type model struct { spinner spinner.Model quitting bool message string inline bool } type Msg string func initialModel(message string, inline bool) model { s := spinner.New() s.Spinner = spinner.Dot s.Style = lipgloss.NewStyle().Foreground(common.Green) return model{spinner: s, message: message, inline: inline} } func (m model) Init() tea.Cmd { return m.spinner.Tick } func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { switch msg := msg.(type) { case Msg: m.quitting = true return m, tea.Quit case tea.KeyMsg: switch keypress := msg.String(); keypress { case "ctrl+c": isAborted = true m.quitting = true return m, tea.Quit } } var cmd tea.Cmd m.spinner, cmd = m.spinner.Update(msg) return m, cmd } func WithSpinner(message string, fn func() error) error { if isTTY() { p := start(message, false) defer stop(p) } return fn() } func WithInlineSpinner(message string, fn func() error) error { if isTTY() { p := start(message, true) defer stop(p) } return fn() } func start(message string, inline bool) *tea.Program { var p *tea.Program if inline { p = tea.NewProgram(initialModel(message, true)) } else { p = tea.NewProgram(initialModel(message, false), tea.WithAltScreen()) } go func() { if _, err := p.Run(); err != nil { fmt.Println(err) os.Exit(1) } if isAborted { fmt.Println("Operation cancelled") os.Exit(1) } }() return p } func stop(p *tea.Program) { p.Send(Msg("quit")) err := p.ReleaseTerminal() if err != nil { log.Fatal(err) } } func (m model) View() string { if m.quitting { return "" } str := "" if m.inline { str = common.GetInfoMessage(fmt.Sprintf("%s %s ...", m.spinner.View(), m.message)) } else { str = common.NameStyle.Render(fmt.Sprintf("\n\n %s %s ...\n\n", m.spinner.View(), m.message)) } return str } func isTTY() bool { return term.IsTerminal(int(os.Stdout.Fd())) } ================================================ FILE: apps/cli/views/util/table.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "fmt" "os" "regexp" "strings" "github.com/charmbracelet/lipgloss" "github.com/charmbracelet/lipgloss/table" "github.com/daytonaio/daytona/cli/views/common" "golang.org/x/term" ) var AdditionalPropertyPadding = " " // Left border, BaseTableStyle padding left, additional padding for target name and target config, BaseTableStyle padding right, BaseCellStyle padding right, right border var RowWhiteSpace = 1 + 4 + len(AdditionalPropertyPadding)*2 + 4 + 4 + 1 var ArbitrarySpace = 10 // Gets the table view string or falls back to an unstyled view for lower terminal widths func GetTableView(data [][]string, headers []string, activeOrganizationName *string, fallbackRender func()) string { re := lipgloss.NewRenderer(os.Stdout) terminalWidth, _, err := term.GetSize(int(os.Stdout.Fd())) if err != nil { fmt.Println(data) return "" } breakpointWidth := common.GetContainerBreakpointWidth(terminalWidth) minWidth := getMinimumWidth(data) if breakpointWidth == 0 || minWidth > breakpointWidth { fallbackRender() return "" } t := table.New(). Headers(headers...). Rows(data...). BorderStyle(re.NewStyle().Foreground(common.LightGray)). BorderRow(false).BorderColumn(false).BorderLeft(false).BorderRight(false).BorderTop(false).BorderBottom(false). StyleFunc(func(_, _ int) lipgloss.Style { return common.BaseCellStyle }).Width(breakpointWidth - 2*common.BaseTableStyleHorizontalPadding - 1) table := t.String() if activeOrganizationName != nil { activeOrgMessage := common.GetInfoMessage(fmt.Sprintf("Active organization: %s", *activeOrganizationName)) rightAlignedStyle := lipgloss.NewStyle().Width(breakpointWidth - 2*common.BaseTableStyleHorizontalPadding - 1).Align(lipgloss.Right) table += "\n" + rightAlignedStyle.Render(activeOrgMessage) } return common.BaseTableStyle.Render(table) } func getMinimumWidth(data [][]string) int { width := 0 widestRow := 0 for _, row := range data { for _, cell := range row { // Remove ANSI escape codes regex := regexp.MustCompile("\x1b\\[[0-9;]*[a-zA-Z]") strippedCell := regex.ReplaceAllString(cell, "") width += longestLineLength(strippedCell) if width > widestRow { widestRow = width } } width = 0 } return widestRow } // Returns the length of the longest line in a string func longestLineLength(input string) int { lines := strings.Split(input, "\n") maxLength := 0 for _, line := range lines { if len(line) > maxLength { maxLength = len(line) } } return maxLength } ================================================ FILE: apps/cli/views/util/time.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "fmt" "time" ) var timeLayout = "2006-01-02T15:04:05.999999999Z07:00" func GetTimeSinceLabelFromString(input string) string { t, err := time.Parse(timeLayout, input) if err != nil { return "/" } return GetTimeSinceLabel(t) } func GetTimeSinceLabel(t time.Time) string { duration := time.Since(t) if duration < time.Minute { return "< 1 minute ago" } else if duration < time.Hour { minutes := int(duration.Minutes()) if minutes == 1 { return "1 minute ago" } return fmt.Sprintf("%d minutes ago", minutes) } else if duration < 24*time.Hour { hours := int(duration.Hours()) if hours == 1 { return "1 hour ago" } return fmt.Sprintf("%d hours ago", hours) } else { days := int(duration.Hours() / 24) if days == 1 { return "1 day ago" } return fmt.Sprintf("%d days ago", days) } } ================================================ FILE: apps/cli/views/volume/info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "fmt" "os" "github.com/charmbracelet/lipgloss" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" "golang.org/x/term" ) func RenderInfo(volume *apiclient.VolumeDto, forceUnstyled bool) { var output string nameLabel := "Volume" output += "\n" output += getInfoLine(nameLabel, volume.Name) + "\n" output += getInfoLine("ID", volume.Id) + "\n" output += getInfoLine("State", getStateLabel(volume.State)) + "\n" output += getInfoLine("Created", util.GetTimeSinceLabelFromString(volume.CreatedAt)) + "\n" terminalWidth, _, err := term.GetSize(int(os.Stdout.Fd())) if err != nil { fmt.Println(output) return } if terminalWidth < common.TUITableMinimumWidth || forceUnstyled { renderUnstyledInfo(output) return } output = common.GetStyledMainTitle("Volume Info") + "\n" + output renderTUIView(output, common.GetContainerBreakpointWidth(terminalWidth)) } func renderUnstyledInfo(output string) { fmt.Println(output) } func renderTUIView(output string, width int) { output = lipgloss.NewStyle().PaddingLeft(3).Render(output) content := lipgloss. NewStyle().Width(width). Render(output) fmt.Println(content) } func getInfoLine(key, value string) string { return util.PropertyNameStyle.Render(fmt.Sprintf("%-*s", util.PropertyNameWidth, key)) + util.PropertyValueStyle.Render(value) + "\n" } func getStateLabel(state apiclient.VolumeState) string { switch state { case apiclient.VOLUMESTATE_PENDING_CREATE: return common.CreatingStyle.Render("PENDING CREATE") case apiclient.VOLUMESTATE_CREATING: return common.CreatingStyle.Render("CREATING") case apiclient.VOLUMESTATE_READY: return common.StartedStyle.Render("READY") case apiclient.VOLUMESTATE_PENDING_DELETE: return common.DeletedStyle.Render("PENDING DELETE") case apiclient.VOLUMESTATE_DELETING: return common.DeletedStyle.Render("DELETING") case apiclient.VOLUMESTATE_DELETED: return common.DeletedStyle.Render("DELETED") case apiclient.VOLUMESTATE_ERROR: return common.ErrorStyle.Render("ERROR") default: return common.UndefinedStyle.Render("/") } } ================================================ FILE: apps/cli/views/volume/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package volume import ( "fmt" "sort" "github.com/daytonaio/daytona/cli/views/common" "github.com/daytonaio/daytona/cli/views/util" apiclient "github.com/daytonaio/daytona/libs/api-client-go" ) type RowData struct { Name string State string Size string Created string } func ListVolumes(volumeList []apiclient.VolumeDto, activeOrganizationName *string) { if len(volumeList) == 0 { util.NotifyEmptyVolumeList(true) return } SortVolumes(&volumeList) headers := []string{"Volume", "State", "Size", "Created"} data := [][]string{} for _, v := range volumeList { var rowData *RowData var row []string rowData = getTableRowData(v) row = getRowFromRowData(*rowData) data = append(data, row) } table := util.GetTableView(data, headers, activeOrganizationName, func() { renderUnstyledList(volumeList) }) fmt.Println(table) } func SortVolumes(volumeList *[]apiclient.VolumeDto) { sort.Slice(*volumeList, func(i, j int) bool { if (*volumeList)[i].State != (*volumeList)[j].State { pi, pj := getStateSortPriorities((*volumeList)[i].State, (*volumeList)[j].State) return pi < pj } // If two volumes have the same state priority, compare the CreatedAt property return (*volumeList)[i].CreatedAt > (*volumeList)[j].CreatedAt }) } func getTableRowData(volume apiclient.VolumeDto) *RowData { rowData := RowData{"", "", "", ""} rowData.Name = volume.Name + util.AdditionalPropertyPadding rowData.State = getStateLabel(volume.State) rowData.Created = util.GetTimeSinceLabelFromString(volume.CreatedAt) return &rowData } func renderUnstyledList(volumeList []apiclient.VolumeDto) { for _, volume := range volumeList { RenderInfo(&volume, true) if volume.Id != volumeList[len(volumeList)-1].Id { fmt.Printf("\n%s\n\n", common.SeparatorString) } } } func getRowFromRowData(rowData RowData) []string { row := []string{ common.NameStyle.Render(rowData.Name), rowData.State, common.DefaultRowDataStyle.Render(rowData.Size), common.DefaultRowDataStyle.Render(rowData.Created), } return row } func getStateSortPriorities(state1, state2 apiclient.VolumeState) (int, int) { pi, ok := volumeListStatePriorities[state1] if !ok { pi = 99 } pj, ok2 := volumeListStatePriorities[state2] if !ok2 { pj = 99 } return pi, pj } // Volumes that have actions being performed on them have a higher priority when listing var volumeListStatePriorities = map[apiclient.VolumeState]int{ apiclient.VOLUMESTATE_PENDING_CREATE: 1, apiclient.VOLUMESTATE_CREATING: 1, apiclient.VOLUMESTATE_PENDING_DELETE: 1, apiclient.VOLUMESTATE_DELETING: 1, apiclient.VOLUMESTATE_READY: 2, apiclient.VOLUMESTATE_ERROR: 3, apiclient.VOLUMESTATE_DELETED: 4, } ================================================ FILE: apps/daemon/.gitignore ================================================ pkg/terminal/static !pkg/terminal/static/index.html ================================================ FILE: apps/daemon/cmd/daemon/config/config.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package config import ( "time" "github.com/go-playground/validator/v10" "github.com/kelseyhightower/envconfig" ) type Config struct { DaemonLogFilePath string `envconfig:"DAYTONA_DAEMON_LOG_FILE_PATH"` UserHomeAsWorkDir bool `envconfig:"DAYTONA_USER_HOME_AS_WORKDIR"` SandboxId string `envconfig:"DAYTONA_SANDBOX_ID" validate:"required"` OtelEndpoint *string `envconfig:"DAYTONA_OTEL_ENDPOINT"` TerminationCheckInterval time.Duration `envconfig:"DAYTONA_TERMINATION_CHECK_INTERVAL" default:"100ms" validate:"min_duration=1ms"` TerminationGracePeriod time.Duration `envconfig:"DAYTONA_TERMINATION_GRACE_PERIOD" default:"5s" validate:"min_duration=1s"` RecordingsDir string `envconfig:"DAYTONA_RECORDINGS_DIR"` OrganizationId *string `envconfig:"DAYTONA_ORGANIZATION_ID"` RegionId *string `envconfig:"DAYTONA_REGION_ID"` } var defaultDaemonLogFilePath = "/tmp/daytona-daemon.log" var config *Config func GetConfig() (*Config, error) { if config != nil { return config, nil } config = &Config{} err := envconfig.Process("", config) if err != nil { return nil, err } var validate = validator.New() // Register a custom tag "min_duration" that accepts a duration string like "1ms" err = validate.RegisterValidation("min_duration", func(fl validator.FieldLevel) bool { min, err := time.ParseDuration(fl.Param()) if err != nil { return false } d, ok := fl.Field().Interface().(time.Duration) if !ok { return false } return d >= min }) if err != nil { return nil, err } err = validate.Struct(config) if err != nil { return nil, err } if config.DaemonLogFilePath == "" { config.DaemonLogFilePath = defaultDaemonLogFilePath } return config, nil } ================================================ FILE: apps/daemon/cmd/daemon/main.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package main import ( "context" "errors" "io" "log/slog" "os" "os/signal" "path/filepath" "syscall" "time" golog "log" "github.com/daytonaio/common-go/pkg/log" "github.com/daytonaio/daemon/cmd/daemon/config" "github.com/daytonaio/daemon/internal/util" "github.com/daytonaio/daemon/pkg/recording" "github.com/daytonaio/daemon/pkg/recordingdashboard" "github.com/daytonaio/daemon/pkg/session" "github.com/daytonaio/daemon/pkg/ssh" "github.com/daytonaio/daemon/pkg/terminal" "github.com/daytonaio/daemon/pkg/toolbox" "github.com/lmittmann/tint" "github.com/mattn/go-isatty" ) func main() { os.Exit(run()) } func run() int { logLevel := log.ParseLogLevel(os.Getenv("LOG_LEVEL")) // Create the console handler with tint for colored output consoleHandler := tint.NewHandler(os.Stdout, &tint.Options{ NoColor: !isatty.IsTerminal(os.Stdout.Fd()), TimeFormat: time.RFC3339, Level: logLevel, }) logger := slog.New(consoleHandler) slog.SetDefault(logger) // Redirect standard library log to slog golog.SetOutput(&log.DebugLogWriter{}) homeDir, err := os.UserHomeDir() if err != nil { logger.Error("Failed to get user home directory", "error", err) return 2 } configDir := filepath.Join(homeDir, ".daytona") err = os.MkdirAll(configDir, 0755) if err != nil { logger.Error("Failed to create config directory", "path", configDir, "error", err) return 2 } entrypointLogFilePath := filepath.Join(configDir, "sessions", util.EntrypointSessionID, util.EntrypointCommandID, "output.log") // Check if user wants to read entrypoint logs args := os.Args[1:] if len(args) == 2 && args[0] == "entrypoint" && args[1] == "logs" { err := util.ReadEntrypointLogs(entrypointLogFilePath) if err != nil { if errors.Is(err, os.ErrNotExist) { logger.Warn("Logs not found, please check if correct entrypoint was provided for sandbox.") } else { logger.Error("Failed to read entrypoint log file", "error", err) } return 1 } return 0 } c, err := config.GetConfig() if err != nil { logger.Error("Failed to get config", "error", err) return 2 } // If workdir in image is not set, use user home as workdir if c.UserHomeAsWorkDir { err = os.Chdir(homeDir) if err != nil { logger.Warn("Failed to change working directory to home directory", "error", err) } } var logWriter io.Writer if c.DaemonLogFilePath != "" { logFile, err := os.OpenFile(c.DaemonLogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { logger.Error("Failed to open log file", "path", c.DaemonLogFilePath, "error", err) } else { defer logFile.Close() logWriter = logFile fileHandler := slog.NewTextHandler(logWriter, &slog.HandlerOptions{ Level: logLevel, }) handler := log.NewMultiHandler([]slog.Handler{consoleHandler, fileHandler}...) logger = slog.New(handler) slog.SetDefault(logger) } } sessionService := session.NewSessionService(logger, configDir, c.TerminationGracePeriod, c.TerminationCheckInterval) // Execute passed arguments as command in entrypoint session if len(args) > 0 { // Create entrypoint session err = sessionService.Create(util.EntrypointSessionID, false) if err != nil { logger.Error("Failed to create entrypoint session", "error", err) return 2 } // Defer entrypoint session deletion concurrently with toolbox shutdown defer func() { delErr := sessionService.Delete(context.Background(), util.EntrypointSessionID) if delErr != nil { logger.Error("Failed to delete entrypoint session", "error", delErr) } else { logger.Debug("Deleted entrypoint session", "session_id", util.EntrypointSessionID) } }() logger.Debug("Created entrypoint session", "session_id", util.EntrypointSessionID) // Execute command asynchronously via session command := util.ShellQuoteJoin(args) _, err := sessionService.Execute( util.EntrypointSessionID, util.EntrypointCommandID, command, true, // async=true for non-blocking false, // isCombinedOutput=false true, // suppressInputEcho=true ) if err != nil { logger.Error("Failed to execute entrypoint command", "error", err) return 2 } } errChan := make(chan error) workDir, err := os.Getwd() if err != nil { logger.Error("Failed to get current working directory", "error", err) return 2 } recordingsDir := c.RecordingsDir if recordingsDir == "" { recordingsDir = filepath.Join(configDir, "recordings") } recordingService := recording.NewRecordingService(logger, recordingsDir) toolBoxServer := toolbox.NewServer(toolbox.ServerConfig{ Logger: logger, WorkDir: workDir, ConfigDir: configDir, OtelEndpoint: c.OtelEndpoint, SandboxId: c.SandboxId, SessionService: sessionService, RecordingService: recordingService, OrganizationId: c.OrganizationId, RegionId: c.RegionId, EntrypointLogFilePath: entrypointLogFilePath, }) // Start the toolbox server in a go routine go func() { err := toolBoxServer.Start() if err != nil { errChan <- err } }() // Start terminal server go func() { if err := terminal.StartTerminalServer(22222); err != nil { errChan <- err } }() // Start recording dashboard server go func() { if err := recordingdashboard.NewDashboardServer(logger, recordingService).Start(); err != nil { errChan <- err } }() sshServer := ssh.NewServer(logger, workDir, workDir) go func() { if err := sshServer.Start(); err != nil { errChan <- err } }() // Set up signal handling for graceful shutdown sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) // Wait for either an error or shutdown signal select { case err := <-errChan: logger.Error("Error occurred", "error", err) case sig := <-sigChan: logger.Info("Received signal, shutting down gracefully...", "signal", sig) } // Toolbox server graceful shutdown toolBoxServer.Shutdown() slog.Info("Shutdown complete") return 0 } ================================================ FILE: apps/daemon/go.mod ================================================ module github.com/daytonaio/daemon go 1.25.4 // v0.5.0 breaks tailscale-connected docker clients so we need to pin it to v0.4.0 replace github.com/docker/go-connections => github.com/docker/go-connections v0.4.0 // samber/lo v1.47.0 - required by headscale breaks frp replace github.com/samber/lo => github.com/samber/lo v1.39.0 require ( github.com/Masterminds/semver/v3 v3.4.0 github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 github.com/creack/pty v1.1.23 github.com/gin-gonic/gin v1.10.1 github.com/gliderlabs/ssh v0.3.8 github.com/go-git/go-git/v5 v5.16.5 github.com/go-playground/validator/v10 v10.27.0 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-plugin v1.6.3 github.com/kelseyhightower/envconfig v1.4.0 github.com/lmittmann/tint v1.1.2 github.com/mattn/go-isatty v0.0.20 github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/pkg/sftp v1.13.6 github.com/samber/slog-gin v1.20.1 github.com/shirou/gopsutil/v4 v4.25.12 github.com/sourcegraph/jsonrpc2 v0.2.0 github.com/stretchr/testify v1.11.1 github.com/swaggo/files v1.0.1 github.com/swaggo/gin-swagger v1.6.0 github.com/swaggo/swag v1.16.4 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.63.0 go.opentelemetry.io/otel/sdk v1.40.0 go.opentelemetry.io/otel/sdk/log v0.14.0 go.opentelemetry.io/otel/sdk/metric v1.40.0 golang.org/x/crypto v0.47.0 golang.org/x/sys v0.40.0 gopkg.in/ini.v1 v1.67.0 ) require ( dario.cat/mergo v1.0.1 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/bytedance/sonic v1.14.0 // indirect github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.3 // indirect github.com/cloudwego/base64x v0.1.6 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/ebitengine/purego v0.9.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/gabriel-vasile/mimetype v1.4.10 // indirect github.com/gin-contrib/sse v1.1.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/oklog/run v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect github.com/tklauser/numcpus v0.11.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.3.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/log v0.14.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect golang.org/x/arch v0.20.0 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect golang.org/x/net v0.49.0 // indirect golang.org/x/text v0.33.0 // indirect golang.org/x/tools v0.40.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/grpc v1.79.3 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) ================================================ FILE: apps/daemon/go.sum ================================================ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A= github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4= github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk= github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lmittmann/tint v1.1.2 h1:2CQzrL6rslrsyjqLDwD11bZ5OpLBPU+g3G/r5LSfS8w= github.com/lmittmann/tint v1.1.2/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k= github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/samber/slog-gin v1.20.1 h1:75wbryS7XrmGcVu/lfOwSFWWjmGoqV4GlE41nQFP0a4= github.com/samber/slog-gin v1.20.1/go.mod h1:7R4VMQGENllRLLnwGyoB5nUSB+qzxThpGe5G02xla6o= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shirou/gopsutil/v4 v4.25.12 h1:e7PvW/0RmJ8p8vPGJH4jvNkOyLmbkXgXW4m6ZPic6CY= github.com/shirou/gopsutil/v4 v4.25.12/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/sourcegraph/jsonrpc2 v0.2.0 h1:KjN/dC4fP6aN9030MZCJs9WQbTOjWHhrtKVpzzSrr/U= github.com/sourcegraph/jsonrpc2 v0.2.0/go.mod h1:ZafdZgk/axhT1cvZAPOhw+95nz2I/Ra5qMlU4gTRwIo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE= github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg= github.com/swaggo/gin-swagger v1.6.0 h1:y8sxvQ3E20/RCyrXeFfg60r6H0Z+SwpTjMYsMm+zy8M= github.com/swaggo/gin-swagger v1.6.0/go.mod h1:BG00cCEy294xtVpyIAHG6+e2Qzj/xKlRdOqDkvq0uzo= github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.63.0 h1:5kSIJ0y8ckZZKoDhZHdVtcyjVi6rXyAwyaR8mp4zLbg= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.63.0/go.mod h1:i+fIMHvcSQtsIY82/xgiVWRklrNt/O6QriHLjzGeY+s= go.opentelemetry.io/contrib/propagators/b3 v1.38.0 h1:uHsCCOSKl0kLrV2dLkFK+8Ywk9iKa/fptkytc6aFFEo= go.opentelemetry.io/contrib/propagators/b3 v1.38.0/go.mod h1:wMRSZJZcY8ya9mApLLhwIMjqmApy2o/Ml+62lhvxyHU= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ================================================ FILE: apps/daemon/internal/buildinfo.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package internal var ( Version = "v0.0.0-dev" ) ================================================ FILE: apps/daemon/internal/util/entrypoint_logs.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "context" "errors" "fmt" "os" "github.com/daytonaio/common-go/pkg/log" ) func ReadEntrypointLogs(entrypointLogFilePath string) error { if entrypointLogFilePath == "" { return errors.New("entrypoint log file path is not configured") } logFile, err := os.Open(entrypointLogFilePath) if err != nil { return fmt.Errorf("failed to open entrypoint log file at %s: %w", entrypointLogFilePath, err) } defer logFile.Close() errChan := make(chan error, 1) stdoutChan := make(chan []byte) stderrChan := make(chan []byte) ctx, cancel := context.WithCancel(context.Background()) defer cancel() go log.ReadMultiplexedLog(ctx, logFile, true, stdoutChan, stderrChan, errChan) for { select { case <-ctx.Done(): return nil case line := <-stdoutChan: _, err := os.Stdout.Write(line) if err != nil { return fmt.Errorf("failed to write entrypoint log line to stdout: %w", err) } case line := <-stderrChan: _, err := os.Stderr.Write(line) if err != nil { return fmt.Errorf("failed to write entrypoint log line to stderr: %w", err) } case err := <-errChan: if err != nil { return err } } } } ================================================ FILE: apps/daemon/internal/util/entrypoint_session.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util const EntrypointSessionID string = "entrypoint" const EntrypointCommandID string = "entrypoint_command" const EmptyCommandID string = "" ================================================ FILE: apps/daemon/internal/util/log_reader.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "bufio" "context" "io" "os" "strings" "time" ) func ReadLogWithExitCode(ctx context.Context, logReader io.Reader, follow bool, exitCodeFilePath string, c chan []byte, errChan chan error) { reader := bufio.NewReader(logReader) consecutiveEOFCount := 0 maxConsecutiveEOF := 50 // Check exit code after 50 consecutive EOF reads ( 50 * 20ms = 1 second) for { select { case <-ctx.Done(): return default: bytes := make([]byte, 1024) n, err := reader.Read(bytes) if err != nil { if err != io.EOF { errChan <- err return } else if !follow { errChan <- io.EOF return } // EOF while following - increment counter consecutiveEOFCount++ // Check exit code after maxConsecutiveEOF consecutive EOF reads if exitCodeFilePath != "" && consecutiveEOFCount >= maxConsecutiveEOF { hasExit := hasExitCode(exitCodeFilePath) if hasExit { errChan <- io.EOF return } // Reset counter and continue consecutiveEOFCount = 0 } // Sleep for a short time to avoid busy-waiting time.Sleep(20 * time.Millisecond) continue } // Reset EOF counter on successful read if consecutiveEOFCount > 0 { consecutiveEOFCount = 0 } if n > 0 { // Create a new slice with only the actual read data to avoid sending null bytes data := make([]byte, n) copy(data, bytes[:n]) c <- data } } } } func hasExitCode(exitCodeFilePath string) bool { content, err := os.ReadFile(exitCodeFilePath) if err != nil { return false } return len(strings.TrimSpace(string(content))) > 0 } ================================================ FILE: apps/daemon/internal/util/pointer.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util // Use generics to create a pointer to a value func Pointer[T any](d T) *T { return &d } ================================================ FILE: apps/daemon/internal/util/sandbox.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "errors" "net/url" "regexp" "strings" ) func GetValidatedName(input string) (string, error) { input = strings.ReplaceAll(input, " ", "-") // Regular expression that catches letters, numbers, and dashes pattern := "^[a-zA-Z0-9-]+$" matched, err := regexp.MatchString(pattern, input) if err != nil { return "", err } if !matched { return "", errors.New("only letters, numbers, and dashes are allowed") } return input, nil } func GetValidatedUrl(input string) (string, error) { // Check if the input starts with a scheme (e.g., http:// or https://) if !strings.HasPrefix(input, "http://") && !strings.HasPrefix(input, "https://") { return "", errors.New("input is missing http:// or https://") } // Try to parse the input as a URL parsedURL, err := url.Parse(input) if err != nil { return "", errors.New("input is not a valid URL") } // If parsing was successful, return the fixed URL return parsedURL.String(), nil } func GetRepositorySlugFromUrl(url string, specifyGitProviders bool) string { if url == "" { return "/" } url = strings.TrimSuffix(url, "/") parts := strings.Split(url, "/") if len(parts) < 2 { return "" } if specifyGitProviders { return parts[len(parts)-3] + "/" + parts[len(parts)-2] + "/" + parts[len(parts)-1] } return parts[len(parts)-2] + "/" + parts[len(parts)-1] } func CleanUpRepositoryUrl(url string) string { url = strings.ToLower(url) return strings.TrimSuffix(url, "/") } ================================================ FILE: apps/daemon/internal/util/shell_quote.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import "strings" // ShellQuoteJoin quotes each argument for safe use in a shell command string. // Each arg is wrapped in single quotes, with any internal single quotes escaped. func ShellQuoteJoin(args []string) string { quoted := make([]string, len(args)) for i, arg := range args { quoted[i] = "'" + strings.ReplaceAll(arg, "'", "'\\''") + "'" } return strings.Join(quoted, " ") } ================================================ FILE: apps/daemon/internal/util/version.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "fmt" "net/http" "regexp" "strings" semver "github.com/Masterminds/semver/v3" ) // ExtractSdkVersionFromHeader extracts the SDK version from the headers. // If the X-Daytona-SDK-Version header is not present, it looks through // the Sec-WebSocket-Protocol header looking for the version protocol formatted like // X-Daytona-SDK-Version/. // If no version is found, it returns an empty string. func ExtractSdkVersionFromHeader(header http.Header) string { if v := header.Get("X-Daytona-SDK-Version"); v != "" { return v } // no explicit header; look through Sec-WebSocket-Protocol entries protocol := ExtractSdkVersionSubprotocol(header) if protocol != "" { // found version protocol; split off the version parts := strings.SplitN(protocol, "~", 2) if len(parts) == 2 { return parts[1] } } return "" } // ExtractSdkVersionSubprotocol extracts the SDK version subprotocol from request headers // It looks for the X-Daytona-SDK-Version~ subprotocol in the Sec-WebSocket-Protocol header. // Returns an empty string if no SDK version subprotocol is found. func ExtractSdkVersionSubprotocol(header http.Header) string { subprotocols := header.Get("Sec-WebSocket-Protocol") if subprotocols == "" { return "" } const prefix = "X-Daytona-SDK-Version~" // split comma-separated protocols for _, subprotocol := range strings.Split(subprotocols, ",") { subprotocol = strings.TrimSpace(subprotocol) if strings.HasPrefix(subprotocol, prefix) { // Return the full subprotocol string return subprotocol } } return "" } // CompareVersions compares two versions and returns: // 1 if v1 is greater than v2 // -1 if v1 is less than v2 // 0 if they are equal // // It considers pre-releases to be invalid if the ranges does not include one. // If you want to have it include pre-releases a simple solution is to include -0 in your range. func CompareVersions(v1 string, v2 string) (*int, error) { semverV1, err := semver.NewVersion(normalizeSemver(v1)) if err != nil { return nil, fmt.Errorf("failed to parse semver v1: %s, normalized: %s, error: %w", v1, normalizeSemver(v1), err) } semverV2, err := semver.NewVersion(normalizeSemver(v2)) if err != nil { return nil, fmt.Errorf("failed to parse semver v2: %s, normalized: %s, error: %w", v2, normalizeSemver(v2), err) } comparison := semverV1.Compare(semverV2) return &comparison, nil } func normalizeSemver(input string) string { // If it's already in the form X.Y.Z-suffix, return as-is. reAlreadyDashed := regexp.MustCompile(`^\d+\.\d+\.\d+-\S+$`) if reAlreadyDashed.MatchString(input) { return input } // If there's a non-digit suffix immediately after X.Y.Z, dash it. reNeedsDash := regexp.MustCompile(`^(\d+)\.(\d+)\.(\d+)(\D.+)$`) if reNeedsDash.MatchString(input) { return reNeedsDash.ReplaceAllString(input, `$1.$2.$3-$4`) } // Otherwise (pure X.Y.Z or something else), leave unchanged. return input } ================================================ FILE: apps/daemon/internal/util/websocket.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "net/http" "github.com/gorilla/websocket" ) // UpgradeToWebSocket is a toolbox utility function that upgrades an HTTP connection to a WebSocket connection. // It automatically extracts and accepts SDK version subprotocols (if present) from the request headers and accepts them during handshake. // It uses a permissive CORS (CheckOrigin always returns true) to allow connections from any origin. func UpgradeToWebSocket(w http.ResponseWriter, r *http.Request) (*websocket.Conn, error) { // Extract SDK version subprotocol from request headers subprotocol := ExtractSdkVersionSubprotocol(r.Header) var protocols []string if subprotocol != "" { protocols = []string{subprotocol} } // Create a new upgrader for this request to prevent concurrency issues upgrader := websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { return true }, Subprotocols: protocols, } // Upgrade the connection to a WebSocket protocol ws, err := upgrader.Upgrade(w, r, nil) if err != nil { return nil, err } return ws, nil } ================================================ FILE: apps/daemon/internal/util/ws_keepalive.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package util import ( "log/slog" "time" "github.com/gorilla/websocket" ) // SetupWSKeepAlive configures WebSocket keepalive ping/pong handling on the // given connection. It installs a custom PingHandler that queues pong payloads // onto a buffered channel instead of writing to the connection directly. This // avoids write-mutex contention between the PingHandler and the caller's // single writer goroutine. // // The caller's writer goroutine must drain the returned channel via // WritePendingPongs before each data write so that keepalive pongs are never // delayed. func SetupWSKeepAlive(conn *websocket.Conn, logger *slog.Logger) <-chan []byte { pongCh := make(chan []byte, 10) conn.SetPingHandler(func(message string) error { select { case pongCh <- []byte(message): default: logger.Warn("pong channel full, dropping pong response") } return nil }) return pongCh } // WritePendingPongs drains all queued pong responses and writes them to the // connection. This MUST be called from the single writer goroutine before each // data write so that keepalive pongs are never delayed by data writes. Because // only one goroutine writes to the conn, WriteControl acquires the // gorilla/websocket write mutex instantly — no contention, no silent drops. func WritePendingPongs(conn *websocket.Conn, pongCh <-chan []byte, deadline time.Duration, logger *slog.Logger) { for { select { case pongData := <-pongCh: if err := conn.WriteControl(websocket.PongMessage, pongData, time.Now().Add(deadline)); err != nil { logger.Debug("failed to write pong", "error", err) return } default: return } } } ================================================ FILE: apps/daemon/pkg/common/errors.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "time" ) // ErrorResponse represents the error response structure // // @Description Error response // @Schema ErrorResponse type ErrorResponse struct { StatusCode int `json:"statusCode" example:"400" binding:"required"` Message string `json:"message" example:"Bad request" binding:"required"` Code string `json:"code" example:"BAD_REQUEST" binding:"required"` Timestamp time.Time `json:"timestamp" example:"2023-01-01T12:00:00Z" binding:"required"` Path string `json:"path" example:"/api/resource" binding:"required"` Method string `json:"method" example:"GET" binding:"required"` } // @name ErrorResponse ================================================ FILE: apps/daemon/pkg/common/get_shell.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "os" "os/exec" "strings" ) func GetShell() string { out, err := exec.Command("sh", "-c", "grep '^[^#]' /etc/shells").Output() if err != nil { return "sh" } if strings.Contains(string(out), "/usr/bin/zsh") { return "/usr/bin/zsh" } if strings.Contains(string(out), "/bin/zsh") { return "/bin/zsh" } if strings.Contains(string(out), "/usr/bin/bash") { return "/usr/bin/bash" } if strings.Contains(string(out), "/bin/bash") { return "/bin/bash" } shellEnv, shellSet := os.LookupEnv("SHELL") if shellSet { return shellEnv } return "sh" } ================================================ FILE: apps/daemon/pkg/common/spawn_tty.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package common import ( "fmt" "io" "os" "os/exec" "syscall" "unsafe" "github.com/creack/pty" ) type TTYSize struct { Height int Width int } type SpawnTTYOptions struct { Dir string StdIn io.Reader StdOut io.Writer Term string Env []string SizeCh <-chan TTYSize } func SpawnTTY(opts SpawnTTYOptions) error { shell := GetShell() cmd := exec.Command(shell) cmd.Dir = opts.Dir cmd.Env = append(cmd.Env, fmt.Sprintf("TERM=%s", opts.Term)) cmd.Env = append(cmd.Env, os.Environ()...) cmd.Env = append(cmd.Env, fmt.Sprintf("SHELL=%s", shell)) cmd.Env = append(cmd.Env, opts.Env...) f, err := pty.Start(cmd) if err != nil { return err } defer f.Close() go func() { for win := range opts.SizeCh { syscall.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(&struct{ h, w, x, y uint16 }{uint16(win.Height), uint16(win.Width), 0, 0}))) } }() go func() { io.Copy(f, opts.StdIn) // stdin }() _, err = io.Copy(opts.StdOut, f) // stdout return err } ================================================ FILE: apps/daemon/pkg/git/add.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import "github.com/go-git/go-git/v5" func (s *Service) Add(files []string) error { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return err } w, err := repo.Worktree() if err != nil { return err } for _, file := range files { _, err = w.Add(file) if err != nil { return err } } return nil } ================================================ FILE: apps/daemon/pkg/git/branch.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" ) func (s *Service) CreateBranch(name string) error { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return err } w, err := repo.Worktree() if err != nil { return err } return w.Checkout(&git.CheckoutOptions{ Create: true, Branch: plumbing.NewBranchReferenceName(name), }) } func (s *Service) ListBranches() ([]string, error) { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return []string{}, err } branches, err := repo.Branches() if err != nil { return []string{}, err } var branchList []string err = branches.ForEach(func(ref *plumbing.Reference) error { branchList = append(branchList, ref.Name().Short()) return nil }) return branchList, err } func (s *Service) DeleteBranch(name string) error { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return err } return repo.Storer.RemoveReference(plumbing.NewBranchReferenceName(name)) } ================================================ FILE: apps/daemon/pkg/git/checkout.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" ) func (s *Service) Checkout(branch string) error { r, err := git.PlainOpen(s.WorkDir) if err != nil { return fmt.Errorf("failed to open repository: %w", err) } w, err := r.Worktree() if err != nil { return fmt.Errorf("failed to get worktree: %w", err) } // Try to checkout as a branch first err = w.Checkout(&git.CheckoutOptions{ Branch: plumbing.NewBranchReferenceName(branch), }) if err != nil { // If branch checkout fails, try as a commit hash err = w.Checkout(&git.CheckoutOptions{ Hash: plumbing.NewHash(branch), }) if err != nil { return fmt.Errorf("failed to checkout branch or commit '%s': %w", branch, err) } } return nil } ================================================ FILE: apps/daemon/pkg/git/clone.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "strings" "github.com/daytonaio/daemon/pkg/gitprovider" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/http" ) func (s *Service) CloneRepository(repo *gitprovider.GitRepository, auth *http.BasicAuth) error { cloneOptions := &git.CloneOptions{ URL: repo.Url, SingleBranch: true, InsecureSkipTLS: true, Auth: auth, } if s.LogWriter != nil { cloneOptions.Progress = s.LogWriter } // Azure DevOps requires capabilities multi_ack / multi_ack_detailed, // which are not fully implemented and by default are included in // transport.UnsupportedCapabilities. // // This can be removed once go-git implements the git v2 protocol. transport.UnsupportedCapabilities = []capability.Capability{ capability.ThinPack, } if repo.Branch != "" { cloneOptions.ReferenceName = plumbing.NewBranchReferenceName(repo.Branch) } _, err := git.PlainClone(s.WorkDir, false, cloneOptions) if err != nil { return err } if repo.Target == gitprovider.CloneTargetCommit { r, err := git.PlainOpen(s.WorkDir) if err != nil { return err } w, err := r.Worktree() if err != nil { return err } err = w.Checkout(&git.CheckoutOptions{ Hash: plumbing.NewHash(repo.Sha), }) if err != nil { return err } } return err } func (s *Service) CloneRepositoryCmd(repo *gitprovider.GitRepository, auth *http.BasicAuth) []string { cloneCmd := []string{"git", "clone", "--single-branch"} // Only add branch flag if a specific branch is provided if repo.Branch != "" { cloneCmd = append(cloneCmd, "--branch", fmt.Sprintf("\"%s\"", repo.Branch)) } cloneUrl := repo.Url // Default to https protocol if not specified if !strings.Contains(cloneUrl, "://") { cloneUrl = fmt.Sprintf("https://%s", cloneUrl) } if auth != nil { cloneUrl = fmt.Sprintf("%s://%s:%s@%s", strings.Split(cloneUrl, "://")[0], auth.Username, auth.Password, strings.SplitN(cloneUrl, "://", 2)[1]) } cloneCmd = append(cloneCmd, cloneUrl, s.WorkDir) if repo.Target == gitprovider.CloneTargetCommit { cloneCmd = append(cloneCmd, "&&", "cd", s.WorkDir) cloneCmd = append(cloneCmd, "&&", "git", "checkout", repo.Sha) } return cloneCmd } ================================================ FILE: apps/daemon/pkg/git/commit.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import "github.com/go-git/go-git/v5" func (s *Service) Commit(message string, options *git.CommitOptions) (string, error) { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return "", err } w, err := repo.Worktree() if err != nil { return "", err } commit, err := w.Commit(message, options) if err != nil { return "", err } return commit.String(), nil } ================================================ FILE: apps/daemon/pkg/git/config.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "bytes" "fmt" "os" "path/filepath" "github.com/daytonaio/daemon/pkg/gitprovider" "gopkg.in/ini.v1" ) func (s *Service) SetGitConfig(userData *gitprovider.GitUser, providerConfig *gitprovider.GitProviderConfig) error { gitConfigFileName := s.GitConfigFileName var gitConfigContent []byte gitConfigContent, err := os.ReadFile(gitConfigFileName) if err != nil { gitConfigContent = []byte{} } cfg, err := ini.Load(gitConfigContent) if err != nil { return err } if !cfg.HasSection("credential") { _, err := cfg.NewSection("credential") if err != nil { return err } } _, err = cfg.Section("credential").NewKey("helper", "/usr/local/bin/daytona git-cred") if err != nil { return err } if !cfg.HasSection("safe") { _, err := cfg.NewSection("safe") if err != nil { return err } } _, err = cfg.Section("safe").NewKey("directory", s.WorkDir) if err != nil { return err } if userData != nil { if !cfg.HasSection("user") { _, err := cfg.NewSection("user") if err != nil { return err } } _, err := cfg.Section("user").NewKey("name", userData.Name) if err != nil { return err } _, err = cfg.Section("user").NewKey("email", userData.Email) if err != nil { return err } } if err := s.setSigningConfig(cfg, providerConfig, userData); err != nil { return err } var buf bytes.Buffer _, err = cfg.WriteTo(&buf) if err != nil { return err } return os.WriteFile(gitConfigFileName, buf.Bytes(), 0644) } func (s *Service) setSigningConfig(cfg *ini.File, providerConfig *gitprovider.GitProviderConfig, userData *gitprovider.GitUser) error { if providerConfig == nil || providerConfig.SigningMethod == nil || providerConfig.SigningKey == nil { return nil } if !cfg.HasSection("user") { _, err := cfg.NewSection("user") if err != nil { return err } } _, err := cfg.Section("user").NewKey("signingkey", *providerConfig.SigningKey) if err != nil { return err } if !cfg.HasSection("commit") { _, err := cfg.NewSection("commit") if err != nil { return err } } switch *providerConfig.SigningMethod { case gitprovider.SigningMethodGPG: _, err := cfg.Section("commit").NewKey("gpgSign", "true") if err != nil { return err } case gitprovider.SigningMethodSSH: err := s.configureAllowedSigners(userData.Email, *providerConfig.SigningKey) if err != nil { return err } if !cfg.HasSection("gpg") { _, err := cfg.NewSection("gpg") if err != nil { return err } } _, err = cfg.Section("gpg").NewKey("format", "ssh") if err != nil { return err } if !cfg.HasSection("gpg \"ssh\"") { _, err := cfg.NewSection("gpg \"ssh\"") if err != nil { return err } } allowedSignersFile := filepath.Join(os.Getenv("HOME"), ".ssh/allowed_signers") _, err = cfg.Section("gpg \"ssh\"").NewKey("allowedSignersFile", allowedSignersFile) if err != nil { return err } } return nil } func (s *Service) configureAllowedSigners(email, sshKey string) error { homeDir := os.Getenv("HOME") sshDir := filepath.Join(homeDir, ".ssh") allowedSignersFile := filepath.Join(sshDir, "allowed_signers") err := os.MkdirAll(sshDir, 0700) if err != nil { return fmt.Errorf("failed to create SSH directory: %w", err) } entry := fmt.Sprintf("%s namespaces=\"git\" %s\n", email, sshKey) existingContent, err := os.ReadFile(allowedSignersFile) if err != nil && !os.IsNotExist(err) { return fmt.Errorf("failed to read allowed_signers file: %w", err) } newContent := string(existingContent) + entry err = os.WriteFile(allowedSignersFile, []byte(newContent), 0600) if err != nil { return fmt.Errorf("failed to write to allowed_signers file: %w", err) } return nil } ================================================ FILE: apps/daemon/pkg/git/log.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5" ) func (s *Service) Log() ([]GitCommitInfo, error) { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return []GitCommitInfo{}, err } ref, err := repo.Head() if err != nil { return []GitCommitInfo{}, err } commits, err := repo.Log(&git.LogOptions{From: ref.Hash()}) if err != nil { return []GitCommitInfo{}, err } var history []GitCommitInfo err = commits.ForEach(func(commit *object.Commit) error { history = append(history, GitCommitInfo{ Hash: commit.Hash.String(), Author: commit.Author.Name, Email: commit.Author.Email, Message: commit.Message, Timestamp: commit.Author.When, }) return nil }) return history, err } ================================================ FILE: apps/daemon/pkg/git/pull.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5" ) func (s *Service) Pull(auth *http.BasicAuth) error { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return err } w, err := repo.Worktree() if err != nil { return err } options := &git.PullOptions{ RemoteName: "origin", Auth: auth, } return w.Pull(options) } ================================================ FILE: apps/daemon/pkg/git/push.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5" ) func (s *Service) Push(auth *http.BasicAuth) error { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return err } ref, err := repo.Head() if err != nil { return err } options := &git.PushOptions{ Auth: auth, RefSpecs: []config.RefSpec{ config.RefSpec(fmt.Sprintf("%s:%s", ref.Name(), ref.Name())), }, } return repo.Push(options) } ================================================ FILE: apps/daemon/pkg/git/service.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "io" "os" "path/filepath" "github.com/daytonaio/daemon/pkg/gitprovider" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing/transport/http" ) type GitStatus struct { CurrentBranch string `json:"currentBranch" validate:"required"` Files []*FileStatus `json:"fileStatus" validate:"required"` BranchPublished bool `json:"branchPublished" validate:"optional"` Ahead int `json:"ahead" validate:"optional"` Behind int `json:"behind" validate:"optional"` } // @name GitStatus type FileStatus struct { Name string `json:"name" validate:"required"` Extra string `json:"extra" validate:"required"` Staging Status `json:"staging" validate:"required"` Worktree Status `json:"worktree" validate:"required"` } // @name FileStatus // Status status code of a file in the Worktree type Status string // @name Status const ( Unmodified Status = "Unmodified" Untracked Status = "Untracked" Modified Status = "Modified" Added Status = "Added" Deleted Status = "Deleted" Renamed Status = "Renamed" Copied Status = "Copied" UpdatedButUnmerged Status = "Updated but unmerged" ) var MapStatus map[git.StatusCode]Status = map[git.StatusCode]Status{ git.Unmodified: Unmodified, git.Untracked: Untracked, git.Modified: Modified, git.Added: Added, git.Deleted: Deleted, git.Renamed: Renamed, git.Copied: Copied, git.UpdatedButUnmerged: UpdatedButUnmerged, } type IGitService interface { CloneRepository(repo *gitprovider.GitRepository, auth *http.BasicAuth) error CloneRepositoryCmd(repo *gitprovider.GitRepository, auth *http.BasicAuth) []string RepositoryExists() (bool, error) SetGitConfig(userData *gitprovider.GitUser, providerConfig *gitprovider.GitProviderConfig) error GetGitStatus() (*GitStatus, error) } type Service struct { WorkDir string GitConfigFileName string LogWriter io.Writer OpenRepository *git.Repository } func (s *Service) RepositoryExists() (bool, error) { _, err := os.Stat(filepath.Join(s.WorkDir, ".git")) if os.IsNotExist(err) { return false, nil } if err != nil { return false, err } return true, nil } ================================================ FILE: apps/daemon/pkg/git/service_test.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git_test import ( "testing" "github.com/daytonaio/daemon/pkg/git" "github.com/daytonaio/daemon/pkg/gitprovider" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/stretchr/testify/suite" ) var repoHttp = &gitprovider.GitRepository{ Id: "123", Url: "http://localhost:3000/daytonaio/daytona", Name: "daytona", Branch: "main", Target: gitprovider.CloneTargetBranch, } var repoHttps = &gitprovider.GitRepository{ Id: "123", Url: "https://github.com/daytonaio/daytona", Name: "daytona", Branch: "main", Target: gitprovider.CloneTargetBranch, } var repoWithoutProtocol = &gitprovider.GitRepository{ Id: "123", Url: "github.com/daytonaio/daytona", Name: "daytona", Branch: "main", Target: gitprovider.CloneTargetBranch, } var repoWithCloneTargetCommit = &gitprovider.GitRepository{ Id: "123", Url: "https://github.com/daytonaio/daytona", Name: "daytona", Branch: "main", Sha: "1234567890", Target: gitprovider.CloneTargetCommit, } var creds = &http.BasicAuth{ Username: "daytonaio", Password: "Daytona123", } type GitServiceTestSuite struct { suite.Suite gitService git.IGitService } func NewGitServiceTestSuite() *GitServiceTestSuite { return &GitServiceTestSuite{} } func (s *GitServiceTestSuite) SetupTest() { s.gitService = &git.Service{ WorkDir: "/work-dir", } } func TestGitService(t *testing.T) { suite.Run(t, NewGitServiceTestSuite()) } func (s *GitServiceTestSuite) TestCloneRepositoryCmd_WithCreds() { cloneCmd := s.gitService.CloneRepositoryCmd(repoHttps, creds) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "https://daytonaio:Daytona123@github.com/daytonaio/daytona", "/work-dir"}, cloneCmd) cloneCmd = s.gitService.CloneRepositoryCmd(repoHttp, creds) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "http://daytonaio:Daytona123@localhost:3000/daytonaio/daytona", "/work-dir"}, cloneCmd) cloneCmd = s.gitService.CloneRepositoryCmd(repoWithoutProtocol, creds) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "https://daytonaio:Daytona123@github.com/daytonaio/daytona", "/work-dir"}, cloneCmd) cloneCmd = s.gitService.CloneRepositoryCmd(repoWithCloneTargetCommit, creds) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "https://daytonaio:Daytona123@github.com/daytonaio/daytona", "/work-dir", "&&", "cd", "/work-dir", "&&", "git", "checkout", "1234567890"}, cloneCmd) } func (s *GitServiceTestSuite) TestCloneRepositoryCmd_WithoutCreds() { cloneCmd := s.gitService.CloneRepositoryCmd(repoHttps, nil) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "https://github.com/daytonaio/daytona", "/work-dir"}, cloneCmd) cloneCmd = s.gitService.CloneRepositoryCmd(repoHttp, nil) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "http://localhost:3000/daytonaio/daytona", "/work-dir"}, cloneCmd) cloneCmd = s.gitService.CloneRepositoryCmd(repoWithoutProtocol, nil) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "https://github.com/daytonaio/daytona", "/work-dir"}, cloneCmd) cloneCmd = s.gitService.CloneRepositoryCmd(repoWithCloneTargetCommit, nil) s.Require().Equal([]string{"git", "clone", "--single-branch", "--branch", "\"main\"", "https://github.com/daytonaio/daytona", "/work-dir", "&&", "cd", "/work-dir", "&&", "git", "checkout", "1234567890"}, cloneCmd) } ================================================ FILE: apps/daemon/pkg/git/status.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "os/exec" "strconv" "strings" "github.com/go-git/go-git/v5" ) func (s *Service) GetGitStatus() (*GitStatus, error) { repo, err := git.PlainOpen(s.WorkDir) if err != nil { return nil, err } ref, err := repo.Head() if err != nil { return nil, err } worktree, err := repo.Worktree() if err != nil { return nil, err } status, err := worktree.Status() if err != nil { return nil, err } files := []*FileStatus{} for path, file := range status { files = append(files, &FileStatus{ Name: path, Extra: file.Extra, Staging: MapStatus[file.Staging], Worktree: MapStatus[file.Worktree], }) } branchPublished, err := s.isBranchPublished() if err != nil { return nil, err } ahead, behind, err := s.getAheadBehindInfo() if err != nil { return nil, err } return &GitStatus{ CurrentBranch: ref.Name().Short(), Files: files, BranchPublished: branchPublished, Ahead: ahead, Behind: behind, }, nil } func (s *Service) isBranchPublished() (bool, error) { upstream, err := s.getUpstreamBranch() if err != nil { return false, err } return upstream != "", nil } func (s *Service) getUpstreamBranch() (string, error) { cmd := exec.Command("git", "-C", s.WorkDir, "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{upstream}") out, err := cmd.CombinedOutput() if err != nil { return "", nil } return strings.TrimSpace(string(out)), nil } func (s *Service) getAheadBehindInfo() (int, int, error) { upstream, err := s.getUpstreamBranch() if err != nil { return 0, 0, err } if upstream == "" { return 0, 0, nil } cmd := exec.Command("git", "-C", s.WorkDir, "rev-list", "--left-right", "--count", fmt.Sprintf("%s...HEAD", upstream)) out, err := cmd.CombinedOutput() if err != nil { return 0, 0, nil } return parseAheadBehind(out) } func parseAheadBehind(output []byte) (int, int, error) { counts := strings.Split(strings.TrimSpace(string(output)), "\t") if len(counts) != 2 { return 0, 0, nil } ahead, err := strconv.Atoi(counts[1]) if err != nil { return 0, 0, nil } behind, err := strconv.Atoi(counts[0]) if err != nil { return 0, 0, nil } return ahead, behind, nil } ================================================ FILE: apps/daemon/pkg/git/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import "time" type GitCommitInfo struct { Hash string `json:"hash" validate:"required"` Author string `json:"author" validate:"required"` Email string `json:"email" validate:"required"` Message string `json:"message" validate:"required"` Timestamp time.Time `json:"timestamp" validate:"required"` } // @name GitCommitInfo ================================================ FILE: apps/daemon/pkg/gitprovider/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package gitprovider type SigningMethod string // @name SigningMethod const ( SigningMethodSSH SigningMethod = "ssh" SigningMethodGPG SigningMethod = "gpg" ) type GitProviderConfig struct { Id string `json:"id" validate:"required"` ProviderId string `json:"providerId" validate:"required"` Username string `json:"username" validate:"required"` BaseApiUrl *string `json:"baseApiUrl,omitempty" validate:"optional"` Token string `json:"token" validate:"required"` Alias string `json:"alias" validate:"required"` SigningKey *string `json:"signingKey,omitempty" validate:"optional"` SigningMethod *SigningMethod `json:"signingMethod,omitempty" validate:"optional"` } // @name GitProvider type GitUser struct { Id string `json:"id" validate:"required"` Username string `json:"username" validate:"required"` Name string `json:"name" validate:"required"` Email string `json:"email" validate:"required"` } // @name GitUser type CloneTarget string // @name CloneTarget const ( CloneTargetBranch CloneTarget = "branch" CloneTargetCommit CloneTarget = "commit" ) type GitRepository struct { Id string `json:"id" validate:"required"` Url string `json:"url" validate:"required"` Name string `json:"name" validate:"required"` Branch string `json:"branch" validate:"required"` Sha string `json:"sha" validate:"required"` Owner string `json:"owner" validate:"required"` PrNumber *uint32 `json:"prNumber,omitempty" validate:"optional"` Source string `json:"source" validate:"required"` Path *string `json:"path,omitempty" validate:"optional"` Target CloneTarget `json:"cloneTarget,omitempty" validate:"optional"` } // @name GitRepository type GitNamespace struct { Id string `json:"id" validate:"required"` Name string `json:"name" validate:"required"` } // @name GitNamespace type GitBranch struct { Name string `json:"name" validate:"required"` Sha string `json:"sha" validate:"required"` } // @name GitBranch type GitPullRequest struct { Name string `json:"name" validate:"required"` Branch string `json:"branch" validate:"required"` Sha string `json:"sha" validate:"required"` SourceRepoId string `json:"sourceRepoId" validate:"required"` SourceRepoUrl string `json:"sourceRepoUrl" validate:"required"` SourceRepoOwner string `json:"sourceRepoOwner" validate:"required"` SourceRepoName string `json:"sourceRepoName" validate:"required"` } // @name GitPullRequest type GitEventData struct { Url string `json:"url" validate:"required"` Branch string `json:"branch" validate:"required"` Sha string `json:"sha" validate:"required"` Owner string `json:"user" validate:"required"` AffectedFiles []string `json:"affectedFiles" validate:"required"` } // @name GitEventData ================================================ FILE: apps/daemon/pkg/recording/delete.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "fmt" "os" ) // DeleteRecording deletes a recording by ID func (s *RecordingService) DeleteRecording(id string) error { // Check if it's an active recording if _, exists := s.activeRecordings.Get(id); exists { return ErrRecordingStillActive } // Find the recording recording, err := s.GetRecording(id) if err != nil { return err } // Delete the file if err := os.Remove(recording.FilePath); err != nil { if os.IsNotExist(err) { return ErrRecordingNotFound } return fmt.Errorf("failed to delete recording file: %w", err) } s.logger.Debug("Deleted recording", "id", id, "filePath", recording.FilePath) return nil } ================================================ FILE: apps/daemon/pkg/recording/get.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording // GetRecording returns a recording by ID (active or from filesystem) func (s *RecordingService) GetRecording(id string) (*Recording, error) { // First check active recordings if active, exists := s.activeRecordings.Get(id); exists { recording := *active.recording return &recording, nil } // Search in completed recordings on disk recordings, err := s.ListRecordings() if err != nil { return nil, err } for _, rec := range recordings { if rec.ID == id { return &rec, nil } } return nil, ErrRecordingNotFound } ================================================ FILE: apps/daemon/pkg/recording/list.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "fmt" "os" "path/filepath" "strings" "github.com/google/uuid" ) // ListRecordings returns all recordings (active and completed) func (s *RecordingService) ListRecordings() ([]Recording, error) { recordings := []Recording{} // Add active recordings for item := range s.activeRecordings.IterBuffered() { recordings = append(recordings, *item.Val.recording) } // Scan recordings directory for completed recordings if _, err := os.Stat(s.recordingsDir); os.IsNotExist(err) { return recordings, nil } entries, err := os.ReadDir(s.recordingsDir) if err != nil { return nil, fmt.Errorf("failed to read recordings directory: %w", err) } for _, entry := range entries { if entry.IsDir() { continue } // Only include MP4 files if filepath.Ext(entry.Name()) != ".mp4" { continue } // Skip files that are currently being recorded isActive := false for item := range s.activeRecordings.IterBuffered() { if item.Val.recording.FileName == entry.Name() { isActive = true break } } if isActive { continue } filePath := filepath.Join(s.recordingsDir, entry.Name()) fileInfo, err := entry.Info() if err != nil { continue } // Extract ID from filename (format: {id}_{label}_{timestamp}.mp4 or {id}_session_{timestamp}.mp4) // The ID is the first part before the underscore (UUID format) fileName := entry.Name() var recordingID string if idx := strings.Index(fileName, "_"); idx > 0 { potentialID := fileName[:idx] // Validate it's a UUID if _, err := uuid.Parse(potentialID); err == nil { recordingID = potentialID } } // Fallback to generating ID from file path for legacy recordings without ID in filename if recordingID == "" { recordingID = uuid.NewSHA1(uuid.NameSpaceURL, []byte(filePath)).String() } // Create recording entry from file info // Use file modification time as a proxy for end time modTime := fileInfo.ModTime() size := fileInfo.Size() recording := Recording{ ID: recordingID, FileName: fileName, FilePath: filePath, StartTime: modTime, // Approximation - actual start time unknown for old recordings EndTime: &modTime, Status: "completed", SizeBytes: &size, } recordings = append(recordings, recording) } return recordings, nil } ================================================ FILE: apps/daemon/pkg/recording/service.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "log/slog" cmap "github.com/orcaman/concurrent-map/v2" ) // RecordingService manages screen recording sessions type RecordingService struct { logger *slog.Logger activeRecordings cmap.ConcurrentMap[string, *activeRecording] recordingsDir string } func NewRecordingService(logger *slog.Logger, recordingsDir string) *RecordingService { return &RecordingService{ logger: logger.With(slog.String("component", "recording_service")), activeRecordings: cmap.New[*activeRecording](), recordingsDir: recordingsDir, } } func (s *RecordingService) GetRecordingsDir() string { return s.recordingsDir } ================================================ FILE: apps/daemon/pkg/recording/start.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "fmt" "os" "os/exec" "path/filepath" "regexp" "strings" "time" "github.com/google/uuid" ) // validateLabel validates a user-provided label to prevent path injection // and ensure it's safe for use in a filename. Returns error if invalid. func validateLabel(label string) error { const maxLabelLength = 100 // Trim whitespace for validation trimmed := strings.TrimSpace(label) // Check if label is empty after trimming if trimmed == "" { return ErrInvalidLabel } // Check length if len(label) > maxLabelLength { return ErrInvalidLabel } // Check for path separators (directory traversal) if strings.Contains(label, "/") || strings.Contains(label, "\\") { return ErrInvalidLabel } // Check for leading dots (hidden files) if strings.HasPrefix(trimmed, ".") { return ErrInvalidLabel } // Only allow safe characters: alphanumeric, spaces, dots, underscores, and hyphens safePattern := regexp.MustCompile(`^[A-Za-z0-9.\s_-]+$`) if !safePattern.MatchString(label) { return ErrInvalidLabel } return nil } // StartRecording starts a new screen recording session func (s *RecordingService) StartRecording(label *string) (*Recording, error) { // Ensure recordings directory exists if err := os.MkdirAll(s.recordingsDir, 0755); err != nil { return nil, fmt.Errorf("failed to create recordings directory: %w", err) } // Check if ffmpeg is available ffmpegPath, err := exec.LookPath("ffmpeg") if err != nil { return nil, ErrFFmpegNotFound } // Check for DISPLAY environment variable (required for X11) display := os.Getenv("DISPLAY") if display == "" { display = ":0" // Default to :0 if not set } // Generate recording ID and filename // ID is included in filename so it can be recovered when scanning disk id := uuid.New().String() now := time.Now() timestamp := now.Format("20060102_150405") // Validate label if provided (reject invalid labels without modification) if label != nil && *label != "" { if err := validateLabel(*label); err != nil { return nil, err } } var fileName string if label != nil && *label != "" { fileName = fmt.Sprintf("%s_%s_%s.mp4", id, *label, timestamp) } else { fileName = fmt.Sprintf("%s_session_%s.mp4", id, timestamp) } filePath := filepath.Join(s.recordingsDir, fileName) // Create recording entry recording := &Recording{ ID: id, FileName: fileName, FilePath: filePath, StartTime: now, Status: "recording", } // Build ffmpeg command for Linux screen capture using x11grab // -f x11grab: X11 screen capture // -framerate 30: 30 FPS // -i :0.0: Capture from display :0, screen 0 // -c:v libx264: H.264 codec // -preset ultrafast: Fast encoding for real-time capture // -pix_fmt yuv420p: Standard pixel format for compatibility cmd := exec.Command(ffmpegPath, "-f", "x11grab", "-framerate", "30", "-i", display, "-c:v", "libx264", "-preset", "ultrafast", "-pix_fmt", "yuv420p", "-y", // Overwrite output file if exists filePath, ) // Set environment to ensure DISPLAY is available cmd.Env = append(os.Environ(), fmt.Sprintf("DISPLAY=%s", display)) // Get stdin pipe for graceful shutdown stdinPipe, err := cmd.StdinPipe() if err != nil { return nil, fmt.Errorf("failed to get stdin pipe: %w", err) } // Start ffmpeg process if err := cmd.Start(); err != nil { return nil, fmt.Errorf("failed to start ffmpeg: %w", err) } s.logger.Debug("Started recording", "id", id, "path", filePath, "display", display) // Create a done channel to receive the Wait() result exactly once done := make(chan error, 1) // Store active recording s.activeRecordings.Set(id, &activeRecording{ recording: recording, cmd: cmd, stdinPipe: stdinPipe, done: done, }) // Start a goroutine to wait for the process and handle unexpected exits go func() { err := cmd.Wait() done <- err // Signal the done channel with the result // Atomically remove from active recordings if still there if active, exists := s.activeRecordings.Pop(id); exists { if err != nil { s.logger.Warn("Recording ffmpeg process exited unexpectedly", "id", id, "error", err) active.recording.Status = "failed" } } }() return recording, nil } ================================================ FILE: apps/daemon/pkg/recording/stop.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "os" "time" ) // StopRecording stops an active recording session func (s *RecordingService) StopRecording(id string) (*Recording, error) { active, exists := s.activeRecordings.Pop(id) if !exists { return nil, ErrRecordingNotFound } // Send 'q' to ffmpeg stdin for graceful shutdown // This allows ffmpeg to properly finalize the video file if active.stdinPipe != nil { _, err := active.stdinPipe.Write([]byte("q")) if err != nil { s.logger.Warn("Failed to send quit signal to ffmpeg", "error", err) } active.stdinPipe.Close() } // Wait for ffmpeg to finish by waiting on the done channel select { case <-active.done: // Process exited normally case <-time.After(10 * time.Second): // Force kill if it doesn't exit gracefully s.logger.Warn("Recording did not stop gracefully, force killing", "id", id) if active.cmd.Process != nil { err := active.cmd.Process.Kill() if err != nil { s.logger.Error("Failed to force kill recording", "id", id, "error", err) } } // Still wait for the done channel to avoid goroutine leak <-active.done } // Update recording metadata now := time.Now() active.recording.EndTime = &now active.recording.Status = "completed" duration := now.Sub(active.recording.StartTime).Seconds() active.recording.DurationSeconds = &duration // Get file size if fileInfo, err := os.Stat(active.recording.FilePath); err == nil { size := fileInfo.Size() active.recording.SizeBytes = &size } s.logger.Debug("Stopped recording", "id", id, "durationSeconds", duration) return active.recording, nil } ================================================ FILE: apps/daemon/pkg/recording/types.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "errors" "io" "os/exec" "time" ) var ( ErrRecordingNotFound = errors.New("recording not found") ErrRecordingNotActive = errors.New("recording is not active") ErrRecordingStillActive = errors.New("cannot delete an active recording") ErrFFmpegNotFound = errors.New("ffmpeg not found in PATH") ErrInvalidLabel = errors.New("invalid label: must be 1-100 characters, cannot start with dot, cannot contain path separators (/ or \\), and can only contain letters, numbers, spaces, dots, underscores, and hyphens") ) type Recording struct { ID string FileName string FilePath string StartTime time.Time EndTime *time.Time Status string DurationSeconds *float64 SizeBytes *int64 } // activeRecording holds the state of a currently running recording type activeRecording struct { recording *Recording cmd *exec.Cmd stdinPipe io.WriteCloser done chan error } ================================================ FILE: apps/daemon/pkg/recordingdashboard/assets.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recordingdashboard import "embed" //go:embed static var static embed.FS ================================================ FILE: apps/daemon/pkg/recordingdashboard/server.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recordingdashboard import ( "fmt" "io/fs" "log/slog" "net/http" "os" "path/filepath" "strings" "github.com/daytonaio/daemon/pkg/recording" recordingcontroller "github.com/daytonaio/daemon/pkg/toolbox/computeruse/recording" "github.com/daytonaio/daemon/pkg/toolbox/config" "github.com/gin-gonic/gin" ) // DashboardServer serves the recording dashboard type DashboardServer struct { logger *slog.Logger recordingService *recording.RecordingService } // NewDashboardServer creates a new dashboard server func NewDashboardServer(logger *slog.Logger, recordingService *recording.RecordingService) *DashboardServer { return &DashboardServer{ logger: logger.With(slog.String("component", "recordings_dashboard")), recordingService: recordingService, } } // Start starts the dashboard server on the configured port func (s *DashboardServer) Start() error { gin.SetMode(gin.ReleaseMode) r := gin.New() r.Use(gin.Recovery()) // Prepare the embedded frontend files // Serve the files from the embedded filesystem staticFS, err := fs.Sub(static, "static") if err != nil { return fmt.Errorf("failed to create sub filesystem: %w", err) } // Serve dashboard HTML from embedded files r.GET("/", gin.WrapH(http.FileServer(http.FS(staticFS)))) // Serve video files r.GET("/videos/:filename", s.serveVideo) // API endpoints r.GET("/api/recordings", s.listRecordings) r.DELETE("/api/recordings", s.deleteRecordings) addr := fmt.Sprintf(":%d", config.RECORDING_DASHBOARD_PORT) s.logger.Info("Starting recording dashboard", "port", config.RECORDING_DASHBOARD_PORT) err = r.Run(addr) return err } func (s *DashboardServer) serveVideo(ctx *gin.Context) { filename := ctx.Param("filename") recordingsDir := s.recordingService.GetRecordingsDir() filePath := filepath.Join(recordingsDir, filename) // Security check - prevent path traversal // filepath.Rel returns a path with ".." if target is outside base directory rel, err := filepath.Rel(recordingsDir, filePath) if err != nil || strings.Contains(rel, "..") { ctx.JSON(http.StatusForbidden, gin.H{"error": "access denied"}) return } if _, err := os.Stat(filePath); os.IsNotExist(err) { ctx.JSON(http.StatusNotFound, gin.H{"error": "file not found"}) return } ctx.File(filePath) } func (s *DashboardServer) listRecordings(ctx *gin.Context) { recordings, err := s.recordingService.ListRecordings() if err != nil { ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } recordingDTOs := make([]recordingcontroller.RecordingDTO, 0, len(recordings)) for _, rec := range recordings { recordingDTOs = append(recordingDTOs, *recordingcontroller.RecordingToDTO(&rec)) } ctx.JSON(http.StatusOK, gin.H{"recordings": recordingDTOs}) } type deleteRequest struct { IDs []string `json:"ids"` } func (s *DashboardServer) deleteRecordings(ctx *gin.Context) { var req deleteRequest if err := ctx.ShouldBindJSON(&req); err != nil { ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) return } deleted := []string{} failed := []string{} // Direct calls to data provider for _, id := range req.IDs { if err := s.recordingService.DeleteRecording(id); err != nil { failed = append(failed, id) s.logger.Warn("Failed to delete recording", "id", id, "error", err) } else { deleted = append(deleted, id) } } ctx.JSON(http.StatusOK, gin.H{ "deleted": deleted, "failed": failed, }) } ================================================ FILE: apps/daemon/pkg/recordingdashboard/static/index.html ================================================ Screen Recordings

Screen Recordings

Auto-refresh: 5s
Filename Duration Size Status Date Actions
📹
Loading...
================================================ FILE: apps/daemon/pkg/session/command.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "errors" "fmt" "os" "strconv" "strings" common_errors "github.com/daytonaio/common-go/pkg/errors" ) func (s *SessionService) getSessionCommands(sessionId string) ([]*Command, error) { session, ok := s.sessions.Get(sessionId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("session not found")) } commands := []*Command{} for _, command := range session.commands.Items() { cmd, err := s.GetSessionCommand(sessionId, command.Id) if err != nil { return nil, err } commands = append(commands, cmd) } return commands, nil } func (s *SessionService) GetSessionCommand(sessionId, cmdId string) (*Command, error) { session, ok := s.sessions.Get(sessionId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("session not found")) } command, ok := session.commands.Get(cmdId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("command not found")) } if command.ExitCode != nil { return command, nil } _, exitCodeFilePath := command.LogFilePath(session.Dir(s.configDir)) exitCode, err := os.ReadFile(exitCodeFilePath) if err != nil { if os.IsNotExist(err) { return command, nil } return nil, fmt.Errorf("failed to read exit code file: %w", err) } exitCodeInt, err := strconv.Atoi(strings.TrimRight(string(exitCode), "\n")) if err != nil { return nil, fmt.Errorf("failed to convert exit code to int: %w", err) } command.ExitCode = &exitCodeInt return command, nil } ================================================ FILE: apps/daemon/pkg/session/common.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "net/http" ) func IsCombinedOutput(sdkVersion string, versionComparison *int, requestHeader http.Header) bool { return (versionComparison != nil && *versionComparison < 0 && sdkVersion != "0.0.0-dev") || (sdkVersion == "" && requestHeader.Get("X-Daytona-Split-Output") != "true") } ================================================ FILE: apps/daemon/pkg/session/create.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "context" "errors" "fmt" "os" "os/exec" "github.com/daytonaio/daemon/pkg/common" cmap "github.com/orcaman/concurrent-map/v2" common_errors "github.com/daytonaio/common-go/pkg/errors" ) func (s *SessionService) Create(sessionId string, isLegacy bool) error { ctx, cancel := context.WithCancel(context.Background()) cmd := exec.CommandContext(ctx, common.GetShell()) cmd.Env = os.Environ() if isLegacy { homeDir, err := os.UserHomeDir() if err != nil { cancel() return fmt.Errorf("failed to obtain user home directory for legacy SDK compatibility: %w", err) } cmd.Dir = homeDir } if _, ok := s.sessions.Get(sessionId); ok { cancel() return common_errors.NewConflictError(errors.New("session already exists")) } stdinWriter, err := cmd.StdinPipe() if err != nil { cancel() return err } err = cmd.Start() if err != nil { cancel() return err } session := &session{ id: sessionId, cmd: cmd, stdinWriter: stdinWriter, commands: cmap.New[*Command](), ctx: ctx, cancel: cancel, } s.sessions.Set(sessionId, session) err = os.MkdirAll(session.Dir(s.configDir), 0755) if err != nil { return err } return nil } ================================================ FILE: apps/daemon/pkg/session/delete.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "context" "errors" "os" "syscall" "time" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/shirou/gopsutil/v4/process" ) func (s *SessionService) Delete(ctx context.Context, sessionId string) error { session, ok := s.sessions.Get(sessionId) if !ok { return common_errors.NewNotFoundError(errors.New("session not found")) } // Terminate process group first with signals (SIGTERM -> SIGKILL) err := s.terminateSession(ctx, session) if err != nil { s.logger.ErrorContext(ctx, "Failed to terminate session", "sessionId", session.id, "error", err) // Continue with cleanup even if termination fails } // Cancel context after termination session.cancel() // Clean up session directory err = os.RemoveAll(session.Dir(s.configDir)) if err != nil { return common_errors.NewBadRequestError(err) } s.sessions.Remove(session.id) return nil } func (s *SessionService) terminateSession(ctx context.Context, session *session) error { if session.cmd == nil || session.cmd.Process == nil { return nil } pid := session.cmd.Process.Pid _ = s.signalProcessTree(pid, syscall.SIGTERM) err := session.cmd.Process.Signal(syscall.SIGTERM) if err != nil { // If SIGTERM fails, try SIGKILL immediately s.logger.WarnContext(ctx, "SIGTERM failed for session, trying SIGKILL", "sessionId", session.id, "error", err) _ = s.signalProcessTree(pid, syscall.SIGKILL) return session.cmd.Process.Kill() } // Wait for graceful termination if s.waitForTermination(ctx, pid, s.terminationGracePeriod, s.terminationCheckInterval) { s.logger.DebugContext(ctx, "Session terminated gracefully", "sessionId", session.id) return nil } s.logger.DebugContext(ctx, "Session timeout, sending SIGKILL to process tree", "sessionId", session.id) _ = s.signalProcessTree(pid, syscall.SIGKILL) return session.cmd.Process.Kill() } func (s *SessionService) signalProcessTree(pid int, sig syscall.Signal) error { parent, err := process.NewProcess(int32(pid)) if err != nil { return err } descendants, err := parent.Children() if err != nil { return err } for _, child := range descendants { childPid := int(child.Pid) _ = s.signalProcessTree(childPid, sig) } for _, child := range descendants { // Convert to OS process to send custom signal if childProc, err := os.FindProcess(int(child.Pid)); err == nil { _ = childProc.Signal(sig) } } return nil } func (s *SessionService) waitForTermination(ctx context.Context, pid int, timeout, interval time.Duration) bool { timeoutCtx, cancel := context.WithTimeout(ctx, timeout) defer cancel() ticker := time.NewTicker(interval) defer ticker.Stop() for { select { case <-timeoutCtx.Done(): return false case <-ticker.C: parent, err := process.NewProcess(int32(pid)) if err != nil { // Process doesn't exist anymore return true } children, err := parent.Children() if err != nil { // Unable to enumerate children - likely process is dying/dead return true } if len(children) == 0 { return true } } } } ================================================ FILE: apps/daemon/pkg/session/execute.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "bytes" "errors" "fmt" "os" "path/filepath" "strconv" "strings" "time" "github.com/daytonaio/daemon/internal/util" "github.com/google/uuid" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/common-go/pkg/log" ) func (s *SessionService) Execute(sessionId, cmdId, cmd string, async, isCombinedOutput, suppressInputEcho bool) (*SessionExecute, error) { session, ok := s.sessions.Get(sessionId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("session not found")) } if cmdId == util.EmptyCommandID { cmdId = uuid.NewString() } if _, ok := session.commands.Get(cmdId); ok { return nil, common_errors.NewConflictError(errors.New("command with the given ID already exists")) } command := &Command{ Id: cmdId, Command: cmd, SuppressInputEcho: suppressInputEcho, } session.commands.Set(cmdId, command) logFilePath, exitCodeFilePath := command.LogFilePath(session.Dir(s.configDir)) logDir := filepath.Dir(logFilePath) if err := os.MkdirAll(logDir, 0755); err != nil { return nil, common_errors.NewBadRequestError(fmt.Errorf("failed to create log directory: %w", err)) } logFile, err := os.Create(logFilePath) if err != nil { return nil, common_errors.NewBadRequestError(fmt.Errorf("failed to create log file: %w", err)) } defer logFile.Close() cmdToExec := fmt.Sprintf(cmdWrapperFormat+"\n", logFilePath, // %q -> log logDir, // %q -> dir command.InputFilePath(session.Dir(s.configDir)), // %q -> input toOctalEscapes(log.STDOUT_PREFIX), // %s -> stdout prefix toOctalEscapes(log.STDERR_PREFIX), // %s -> stderr prefix cmd, // %s -> verbatim script body exitCodeFilePath, // %q ) _, err = session.stdinWriter.Write([]byte(cmdToExec)) if err != nil { return nil, common_errors.NewBadRequestError(fmt.Errorf("failed to write command: %w", err)) } if async { return &SessionExecute{ CommandId: cmdId, }, nil } for { select { case <-session.ctx.Done(): command, ok := session.commands.Get(cmdId) if !ok { return nil, common_errors.NewBadRequestError(errors.New("command not found")) } command.ExitCode = util.Pointer(1) return nil, common_errors.NewBadRequestError(errors.New("session cancelled")) default: exitCode, err := os.ReadFile(exitCodeFilePath) if err != nil { if os.IsNotExist(err) { time.Sleep(50 * time.Millisecond) continue } return nil, common_errors.NewBadRequestError(fmt.Errorf("failed to read exit code file: %w", err)) } exitCodeInt, err := strconv.Atoi(strings.TrimRight(string(exitCode), "\n")) if err != nil { return nil, common_errors.NewBadRequestError(fmt.Errorf("failed to convert exit code to int: %w", err)) } command, ok := session.commands.Get(cmdId) if !ok { return nil, common_errors.NewBadRequestError(errors.New("command not found")) } command.ExitCode = &exitCodeInt logBytes, err := os.ReadFile(logFilePath) if err != nil { return nil, common_errors.NewBadRequestError(fmt.Errorf("failed to read log file: %w", err)) } logContent := string(logBytes) if isCombinedOutput { // remove prefixes from log bytes logBytes = bytes.ReplaceAll(bytes.ReplaceAll(logBytes, log.STDOUT_PREFIX, []byte{}), log.STDERR_PREFIX, []byte{}) logContent = string(logBytes) } return &SessionExecute{ CommandId: cmdId, Output: &logContent, ExitCode: &exitCodeInt, }, nil } } } func toOctalEscapes(b []byte) string { out := "" for _, c := range b { out += fmt.Sprintf("\\%03o", c) // e.g. 0x01 → \001 } return out } var cmdWrapperFormat string = ` { log=%q dir=%q # per-command FIFOs sp="$dir/stdout.pipe" ep="$dir/stderr.pipe" ip=%q rm -f "$sp" "$ep" "$ip" && mkfifo "$sp" "$ep" "$ip" || exit 1 cleanup() { rm -f "$sp" "$ep" "$ip"; } trap 'cleanup' EXIT HUP INT TERM # prefix each stream and append to shared log ( while IFS= read -r line || [ -n "$line" ]; do printf '%s%%s\n' "$line"; done < "$sp" ) >> "$log" & r1=$! ( while IFS= read -r line || [ -n "$line" ]; do printf '%s%%s\n' "$line"; done < "$ep" ) >> "$log" & r2=$! # Keep input FIFO open to prevent blocking when command opens stdin sleep infinity > "$ip" & # Run your command { %s; } < "$ip" > "$sp" 2> "$ep" echo "$?" >> %s # drain labelers (cleanup via trap) wait "$r1" "$r2" # Ensure unlink even if the waits failed cleanup } ` ================================================ FILE: apps/daemon/pkg/session/get.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "errors" common_errors "github.com/daytonaio/common-go/pkg/errors" ) func (s *SessionService) Get(sessionId string) (*Session, error) { _, ok := s.sessions.Get(sessionId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("session not found")) } commands, err := s.getSessionCommands(sessionId) if err != nil { return nil, err } return &Session{ SessionId: sessionId, Commands: commands, }, nil } ================================================ FILE: apps/daemon/pkg/session/input.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "errors" "fmt" "os" "strings" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/common-go/pkg/log" ) // SendInput sends data to the session's stdin for a specific running command // This enables interactive command input for sessions func (s *SessionService) SendInput(sessionId, commandId string, data string) error { session, ok := s.sessions.Get(sessionId) if !ok { return common_errors.NewNotFoundError(errors.New("session not found")) } // Check if the session process is still active if session.cmd.ProcessState != nil && session.cmd.ProcessState.Exited() { return common_errors.NewGoneError(errors.New("session process has exited")) } // Verify the command exists command, ok := session.commands.Get(commandId) if !ok { return common_errors.NewNotFoundError(errors.New("command not found")) } // Check if the command is still running (exit code not set means still running) if command.ExitCode != nil { return common_errors.NewGoneError(fmt.Errorf("command has already completed with exit code %d", *command.ExitCode)) } inputFilePath := command.InputFilePath(session.Dir(s.configDir)) f, err := os.OpenFile(inputFilePath, os.O_WRONLY, 0600) if err != nil { return common_errors.NewInternalServerError(fmt.Errorf("failed to open input pipe: %w", err)) } defer f.Close() // Ensure newline for commands like `read` if !strings.HasSuffix(data, "\n") { data += "\n" } // Write to input pipe if _, err := f.Write([]byte(data)); err != nil { return common_errors.NewInternalServerError(fmt.Errorf("failed to write to input pipe: %w", err)) } if !command.SuppressInputEcho { // Also echo input to log file for visibility (appears as stdout) logFilePath, _ := command.LogFilePath(session.Dir(s.configDir)) logFile, err := os.OpenFile(logFilePath, os.O_APPEND|os.O_WRONLY, 0600) if err != nil { s.logger.Debug("failed to open log file to echo input", "error", err) } else { defer logFile.Close() // Write with STDOUT prefix to maintain log format consistency dataWithPrefix := append(log.STDOUT_PREFIX, []byte(data)...) _, err = logFile.Write(dataWithPrefix) if err != nil { s.logger.Error("failed to echo input to log file", "error", err) } } } return nil } ================================================ FILE: apps/daemon/pkg/session/list.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import "github.com/daytonaio/daemon/internal/util" func (s *SessionService) List() ([]Session, error) { sessions := []Session{} for _, sessionId := range s.sessions.Keys() { if sessionId == util.EntrypointSessionID { continue } commands, err := s.getSessionCommands(sessionId) if err != nil { return nil, err } sessions = append(sessions, Session{ SessionId: sessionId, Commands: commands, }) } return sessions, nil } ================================================ FILE: apps/daemon/pkg/session/log.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "bytes" "context" "errors" "io" "log/slog" "net/http" "os" "time" "github.com/daytonaio/daemon/internal/util" "github.com/gorilla/websocket" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/common-go/pkg/log" ) type FetchLogsOptions struct { IsCombinedOutput bool IsWebsocketUpgrade bool Follow bool } func (s *SessionService) GetSessionCommandLogs(sessionId, commandId string, request *http.Request, responseWriter http.ResponseWriter, opts FetchLogsOptions) ([]byte, error) { session, ok := s.sessions.Get(sessionId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("session not found")) } command, ok := session.commands.Get(commandId) if !ok { return nil, common_errors.NewNotFoundError(errors.New("command not found")) } logFilePath, exitCodeFilePath := command.LogFilePath(session.Dir(s.configDir)) if opts.IsWebsocketUpgrade { logFile, err := os.Open(logFilePath) if err != nil { if os.IsNotExist(err) { return nil, common_errors.NewNotFoundError(err) } if os.IsPermission(err) { return nil, common_errors.NewForbiddenError(err) } return nil, common_errors.NewBadRequestError(err) } defer logFile.Close() ReadLog(s.logger, request, responseWriter, opts.Follow, logFile, util.ReadLogWithExitCode, exitCodeFilePath, func(logger *slog.Logger, conn *websocket.Conn, messages chan []byte, errors chan error, pongCh <-chan []byte) { var buffer []byte for { // Priority: always flush pending pong responses before writing data. // This ensures keepalive pongs are never delayed by data writes. util.WritePendingPongs(conn, pongCh, time.Second, logger) select { case <-session.ctx.Done(): // Flush any remaining bytes in buffer before closing if opts.IsCombinedOutput && len(buffer) > 0 { remainingData := flushRemainingBuffer(&buffer) if len(remainingData) > 0 { err := conn.WriteMessage(websocket.BinaryMessage, remainingData) if err != nil { s.logger.Error("websocket write error", "error", err) } } } err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Now().Add(time.Second)) if err != nil { s.logger.Error("websocket close control error", "error", err) } conn.Close() return case pong := <-pongCh: // Pong arrived while waiting for data — write it immediately if err := conn.WriteControl(websocket.PongMessage, pong, time.Now().Add(time.Second)); err != nil { s.logger.Debug("failed to write pong", "error", err) } case msg := <-messages: if opts.IsCombinedOutput { // Process chunks with buffering to handle prefixes split across chunks processedData := processLogChunkWithPrefixFiltering(msg, &buffer) if len(processedData) > 0 { err := conn.WriteMessage(websocket.BinaryMessage, processedData) if err != nil { errors <- err return } } } else { err := conn.WriteMessage(websocket.BinaryMessage, msg) if err != nil { errors <- err return } } case <-errors: // Stream ended, flush any remaining bytes in buffer if opts.IsCombinedOutput && len(buffer) > 0 { remainingData := flushRemainingBuffer(&buffer) if len(remainingData) > 0 { writeErr := conn.WriteMessage(websocket.BinaryMessage, remainingData) if writeErr != nil { s.logger.Error("websocket write error", "error", writeErr) } } } // The error will be handled by the main ReadLog function return } } }) return nil, nil } logBytes, err := os.ReadFile(logFilePath) if err != nil { if os.IsNotExist(err) { return nil, common_errors.NewNotFoundError(err) } if os.IsPermission(err) { return nil, common_errors.NewForbiddenError(err) } return nil, common_errors.NewBadRequestError(err) } if opts.IsCombinedOutput { // remove prefixes from log bytes logBytes = bytes.ReplaceAll(bytes.ReplaceAll(logBytes, log.STDOUT_PREFIX, []byte{}), log.STDERR_PREFIX, []byte{}) } return logBytes, nil } // ReadLog reads from the logReader and writes to the websocket. // TLogData is the type of the message to be read from the logReader. // The wsWriteFunc callback receives a pongCh that carries queued pong payloads; // the callback must drain it (via util.WritePendingPongs) before each data write // to give keepalive pongs priority over log data. func ReadLog[TLogData any](logger *slog.Logger, request *http.Request, responseWriter http.ResponseWriter, follow bool, logReader io.Reader, readFunc func(context.Context, io.Reader, bool, string, chan TLogData, chan error), exitCodeFilePath string, wsWriteFunc func(*slog.Logger, *websocket.Conn, chan TLogData, chan error, <-chan []byte)) { ws, err := util.UpgradeToWebSocket(responseWriter, request) if err != nil { logger.Error("websocket upgrade error", "error", err) return } pongCh := util.SetupWSKeepAlive(ws, logger) defer func() { closeErr := websocket.CloseNormalClosure if !errors.Is(err, io.EOF) { closeErr = websocket.CloseInternalServerErr } err := ws.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(closeErr, ""), time.Now().Add(time.Second)) if err != nil { logger.Debug("websocket close control error", "error", err) } ws.Close() }() msgChannel := make(chan TLogData) errChannel := make(chan error) ctx, cancel := context.WithCancel(request.Context()) defer cancel() go readFunc(ctx, logReader, follow, exitCodeFilePath, msgChannel, errChannel) go wsWriteFunc(logger, ws, msgChannel, errChannel, pongCh) readErr := make(chan error) go func() { for { _, _, err := ws.ReadMessage() readErr <- err } }() for { select { case <-ctx.Done(): return case err = <-errChannel: if err != nil { if !errors.Is(err, io.EOF) { logger.Error("log read error", "error", err) } cancel() return } case err := <-readErr: if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseAbnormalClosure) { logger.Error("websocket unexpected close error", "error", err) } if err != nil { return } } } } // processLogChunkWithPrefixFiltering processes log chunks with buffering to handle prefixes split across chunks func processLogChunkWithPrefixFiltering(chunk []byte, buffer *[]byte) []byte { // Append new chunk to buffer *buffer = append(*buffer, chunk...) var result []byte processed := 0 for processed < len(*buffer) { // Check if we have enough bytes to check for prefixes if len(*buffer)-processed < 3 { // Not enough bytes for a complete prefix // Check if remaining bytes could be part of a prefix remainingBytes := (*buffer)[processed:] // If remaining bytes could be start of STDOUT_PREFIX (0x01, 0x01, 0x01) couldBeStdoutPrefix := true for i, b := range remainingBytes { if b != log.STDOUT_PREFIX[i] { couldBeStdoutPrefix = false break } } // If remaining bytes could be start of STDERR_PREFIX (0x02, 0x02, 0x02) couldBeStderrPrefix := true for i, b := range remainingBytes { if b != log.STDERR_PREFIX[i] { couldBeStderrPrefix = false break } } // If remaining bytes could be part of either prefix, keep them in buffer if couldBeStdoutPrefix || couldBeStderrPrefix { *buffer = remainingBytes } else { // Remaining bytes cannot be part of any prefix, output them result = append(result, remainingBytes...) *buffer = (*buffer)[:0] } break } // Check for STDOUT_PREFIX (0x01, 0x01, 0x01) if (*buffer)[processed] == log.STDOUT_PREFIX[0] && (*buffer)[processed+1] == log.STDOUT_PREFIX[1] && (*buffer)[processed+2] == log.STDOUT_PREFIX[2] { // Found STDOUT_PREFIX, skip it processed += 3 continue } // Check for STDERR_PREFIX (0x02, 0x02, 0x02) if (*buffer)[processed] == log.STDERR_PREFIX[0] && (*buffer)[processed+1] == log.STDERR_PREFIX[1] && (*buffer)[processed+2] == log.STDERR_PREFIX[2] { // Found STDERR_PREFIX, skip it processed += 3 continue } // No prefix found, add this byte to result result = append(result, (*buffer)[processed]) processed++ } // Remove processed bytes from buffer if processed > 0 && processed < len(*buffer) { *buffer = (*buffer)[processed:] } return result } // flushRemainingBuffer processes any remaining bytes in the buffer at the end of the stream func flushRemainingBuffer(buffer *[]byte) []byte { if len(*buffer) == 0 { return nil } // At the end of stream, any remaining bytes are not prefixes (since they're incomplete) // So we should output them as regular data result := make([]byte, len(*buffer)) copy(result, *buffer) *buffer = (*buffer)[:0] // Clear the buffer return result } ================================================ FILE: apps/daemon/pkg/session/service.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "log/slog" "time" cmap "github.com/orcaman/concurrent-map/v2" ) type SessionService struct { logger *slog.Logger configDir string sessions cmap.ConcurrentMap[string, *session] terminationGracePeriod time.Duration terminationCheckInterval time.Duration } func NewSessionService(logger *slog.Logger, configDir string, terminationGracePeriod, terminationCheckInterval time.Duration) *SessionService { return &SessionService{ logger: logger.With(slog.String("component", "session_service")), configDir: configDir, sessions: cmap.New[*session](), terminationGracePeriod: terminationGracePeriod, terminationCheckInterval: terminationCheckInterval, } } ================================================ FILE: apps/daemon/pkg/session/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "context" "io" "os/exec" "path/filepath" cmap "github.com/orcaman/concurrent-map/v2" ) type session struct { id string cmd *exec.Cmd stdinWriter io.Writer commands cmap.ConcurrentMap[string, *Command] ctx context.Context cancel context.CancelFunc } func (s *session) Dir(configDir string) string { return filepath.Join(configDir, "sessions", s.id) } type Command struct { Id string `json:"id" validate:"required"` Command string `json:"command" validate:"required"` ExitCode *int `json:"exitCode,omitempty" validate:"optional"` SuppressInputEcho bool `json:"suppressInputEcho" validate:"optional"` } func (c *Command) LogFilePath(sessionDir string) (string, string) { return filepath.Join(sessionDir, c.Id, "output.log"), filepath.Join(sessionDir, c.Id, "exit_code") } func (c *Command) InputFilePath(sessionDir string) string { return filepath.Join(sessionDir, c.Id, "input.pipe") } type Session struct { SessionId string `json:"sessionId" validate:"required"` Commands []*Command `json:"commands" validate:"required"` } type SessionExecute struct { CommandId string `json:"cmdId" validate:"optional"` Output *string `json:"output" validate:"optional"` Stdout *string `json:"stdout" validate:"optional"` Stderr *string `json:"stderr" validate:"optional"` ExitCode *int `json:"exitCode" validate:"optional"` } ================================================ FILE: apps/daemon/pkg/ssh/config/config.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package config const SSH_PORT = 22220 ================================================ FILE: apps/daemon/pkg/ssh/server.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package ssh import ( "fmt" "io" "log/slog" "os" "os/exec" "github.com/daytonaio/daemon/pkg/common" "github.com/daytonaio/daemon/pkg/ssh/config" "github.com/gliderlabs/ssh" "github.com/pkg/sftp" "golang.org/x/sys/unix" ) type Server struct { logger *slog.Logger workDir string defaultWorkDir string } func NewServer(logger *slog.Logger, workDir, defaultWorkDir string) *Server { return &Server{ logger: logger.With(slog.String("component", "ssh_server")), workDir: workDir, defaultWorkDir: defaultWorkDir, } } func (s *Server) Start() error { forwardedTCPHandler := &ssh.ForwardedTCPHandler{} unixForwardHandler := newForwardedUnixHandler() sshServer := ssh.Server{ Addr: fmt.Sprintf(":%d", config.SSH_PORT), PublicKeyHandler: func(ctx ssh.Context, key ssh.PublicKey) bool { // Allow all public key authentication attempts s.logger.Debug("Public key authentication accepted", "user", ctx.User()) return true }, PasswordHandler: func(ctx ssh.Context, password string) bool { s.logger.Debug("Password authentication attempt", "user", ctx.User()) if len(password) > 0 { s.logger.Debug("Received password", "length", len(password)) } else { s.logger.Debug("Received empty password") } // Only allow authentication with the hardcoded password 'sandbox-ssh' authenticated := password == "sandbox-ssh" if authenticated { s.logger.Debug("Password authentication succeeded", "user", ctx.User()) } else { s.logger.Debug("Password authentication failed (wrong password)", "user", ctx.User()) } return authenticated }, Handler: func(session ssh.Session) { switch ss := session.Subsystem(); ss { case "": case "sftp": s.sftpHandler(session) return default: s.logger.Error("Subsystem not supported", "subsystem", ss) session.Exit(1) return } ptyReq, winCh, isPty := session.Pty() if session.RawCommand() == "" && isPty { s.handlePty(session, ptyReq, winCh) } else { s.handleNonPty(session) } }, ChannelHandlers: map[string]ssh.ChannelHandler{ "session": ssh.DefaultSessionHandler, "direct-tcpip": ssh.DirectTCPIPHandler, "direct-streamlocal@openssh.com": directStreamLocalHandler, }, RequestHandlers: map[string]ssh.RequestHandler{ "tcpip-forward": forwardedTCPHandler.HandleSSHRequest, "cancel-tcpip-forward": forwardedTCPHandler.HandleSSHRequest, "streamlocal-forward@openssh.com": unixForwardHandler.HandleSSHRequest, "cancel-streamlocal-forward@openssh.com": unixForwardHandler.HandleSSHRequest, }, SubsystemHandlers: map[string]ssh.SubsystemHandler{ "sftp": s.sftpHandler, }, LocalPortForwardingCallback: ssh.LocalPortForwardingCallback(func(ctx ssh.Context, dhost string, dport uint32) bool { return true }), ReversePortForwardingCallback: ssh.ReversePortForwardingCallback(func(ctx ssh.Context, host string, port uint32) bool { return true }), SessionRequestCallback: func(sess ssh.Session, requestType string) bool { return true }, } s.logger.Info("Starting ssh server", "port", config.SSH_PORT) return sshServer.ListenAndServe() } func (s *Server) handlePty(session ssh.Session, ptyReq ssh.Pty, winCh <-chan ssh.Window) { dir := s.workDir if _, err := os.Stat(s.workDir); os.IsNotExist(err) { dir = s.defaultWorkDir } env := []string{} if ssh.AgentRequested(session) { l, err := ssh.NewAgentListener() if err != nil { s.logger.Error("Failed to start agent listener", "error", err) return } defer l.Close() go ssh.ForwardAgentConnections(l, session) env = append(env, fmt.Sprintf("%s=%s", "SSH_AUTH_SOCK", l.Addr().String())) } sizeCh := make(chan common.TTYSize) go func() { for win := range winCh { sizeCh <- common.TTYSize{ Height: win.Height, Width: win.Width, } } }() err := common.SpawnTTY(common.SpawnTTYOptions{ Dir: dir, StdIn: session, StdOut: session, Term: ptyReq.Term, Env: env, SizeCh: sizeCh, }) if err != nil { // Debug log here because this gets called on each ssh "exit" // TODO: Find a better way to handle this s.logger.Debug("Failed to spawn tty", "error", err) return } } func (s *Server) handleNonPty(session ssh.Session) { args := []string{} if len(session.Command()) > 0 { args = append([]string{"-c"}, session.RawCommand()) } cmd := exec.Command("/bin/sh", args...) cmd.Env = append(cmd.Env, os.Environ()...) if ssh.AgentRequested(session) { l, err := ssh.NewAgentListener() if err != nil { s.logger.Error("Failed to start agent listener", "error", err) return } defer l.Close() go ssh.ForwardAgentConnections(l, session) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", "SSH_AUTH_SOCK", l.Addr().String())) } cmd.Dir = s.workDir if _, err := os.Stat(s.workDir); os.IsNotExist(err) { cmd.Dir = s.defaultWorkDir } cmd.Stdout = session cmd.Stderr = session.Stderr() stdinPipe, err := cmd.StdinPipe() if err != nil { s.logger.Error("Unable to setup stdin for session", "error", err) return } go func() { _, err := io.Copy(stdinPipe, session) if err != nil { s.logger.Error("Unable to read from session", "error", err) return } _ = stdinPipe.Close() }() err = cmd.Start() if err != nil { s.logger.Error("Unable to start command", "error", err) return } sigs := make(chan ssh.Signal, 1) session.Signals(sigs) defer func() { session.Signals(nil) close(sigs) }() go func() { for sig := range sigs { signal := s.osSignalFrom(sig) err := cmd.Process.Signal(signal) if err != nil { s.logger.Warn("Unable to send signal to process", "error", err) } } }() err = cmd.Wait() if err != nil { s.logger.Info("Command exited", "command", session.RawCommand(), "error", err) session.Exit(127) return } err = session.Exit(0) if err != nil { s.logger.Warn("Unable to exit session", "error", err) } } func (s *Server) osSignalFrom(sig ssh.Signal) os.Signal { switch sig { case ssh.SIGABRT: return unix.SIGABRT case ssh.SIGALRM: return unix.SIGALRM case ssh.SIGFPE: return unix.SIGFPE case ssh.SIGHUP: return unix.SIGHUP case ssh.SIGILL: return unix.SIGILL case ssh.SIGINT: return unix.SIGINT case ssh.SIGKILL: return unix.SIGKILL case ssh.SIGPIPE: return unix.SIGPIPE case ssh.SIGQUIT: return unix.SIGQUIT case ssh.SIGSEGV: return unix.SIGSEGV case ssh.SIGTERM: return unix.SIGTERM case ssh.SIGUSR1: return unix.SIGUSR1 case ssh.SIGUSR2: return unix.SIGUSR2 // Unhandled, use sane fallback. default: return unix.SIGKILL } } func (s *Server) sftpHandler(session ssh.Session) { debugStream := io.Discard serverOptions := []sftp.ServerOption{ sftp.WithDebug(debugStream), } server, err := sftp.NewServer( session, serverOptions..., ) if err != nil { s.logger.Error("sftp server init error", "error", err) return } if err := server.Serve(); err == io.EOF { server.Close() } else if err != nil { s.logger.Error("sftp server completed with error", "error", err) } } ================================================ FILE: apps/daemon/pkg/ssh/unix_forward.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package ssh import ( "context" "errors" "fmt" "io" "io/fs" "log/slog" "net" "os" "path/filepath" "sync" "syscall" "github.com/gliderlabs/ssh" gossh "golang.org/x/crypto/ssh" ) // streamLocalForwardPayload describes the extra data sent in a // streamlocal-forward@openssh.com containing the socket path to bind to. type streamLocalForwardPayload struct { SocketPath string } // forwardedStreamLocalPayload describes the data sent as the payload in the new // channel request when a Unix connection is accepted by the listener. type forwardedStreamLocalPayload struct { SocketPath string Reserved uint32 } // forwardedUnixHandler is a clone of ssh.ForwardedTCPHandler that does // streamlocal forwarding (aka. unix forwarding) instead of TCP forwarding. type forwardedUnixHandler struct { sync.Mutex forwards map[forwardKey]net.Listener } type forwardKey struct { sessionID string addr string } func newForwardedUnixHandler() *forwardedUnixHandler { return &forwardedUnixHandler{ forwards: make(map[forwardKey]net.Listener), } } func (h *forwardedUnixHandler) HandleSSHRequest(ctx ssh.Context, _ *ssh.Server, req *gossh.Request) (bool, []byte) { slog.Debug("handling SSH unix forward") conn, ok := ctx.Value(ssh.ContextKeyConn).(*gossh.ServerConn) if !ok { slog.Warn("SSH unix forward request from client with no gossh connection") return false, nil } switch req.Type { case "streamlocal-forward@openssh.com": var reqPayload streamLocalForwardPayload err := gossh.Unmarshal(req.Payload, &reqPayload) if err != nil { slog.Warn("parse streamlocal-forward@openssh.com request (SSH unix forward) payload from client", "error", err) return false, nil } addr := reqPayload.SocketPath slog.Debug("request begin SSH unix forward", "socketPath", addr) key := forwardKey{ sessionID: ctx.SessionID(), addr: addr, } h.Lock() _, ok := h.forwards[key] h.Unlock() if ok { // In cases where `ExitOnForwardFailure=yes` is set, returning false // here will cause the connection to be closed. To avoid this, and // to match OpenSSH behavior, we silently ignore the second forward // request. slog.Warn("SSH unix forward request for socket path that is already being forwarded on this session, ignoring", "socketPath", addr) return true, nil } // Create socket parent dir if not exists. parentDir := filepath.Dir(addr) err = os.MkdirAll(parentDir, 0o700) if err != nil { slog.Error("failed to create parent directory for unix socket", "error", err) return false, nil } // Remove existing socket if it exists. We do not use os.Remove() here // so that directories are kept. Note that it's possible that we will // overwrite a regular file here. Both of these behaviors match OpenSSH, // however, which is why we unlink. err = unlink(addr) if err != nil && !errors.Is(err, fs.ErrNotExist) { slog.Warn("remove existing socket for SSH unix forward request", "socketPath", addr, "error", err) return false, nil } lc := &net.ListenConfig{} ln, err := lc.Listen(ctx, "unix", addr) if err != nil { slog.Warn("listen on Unix socket for SSH unix forward request", "socketPath", addr, "error", err) return false, nil } slog.Debug("SSH unix forward listening on socket", "socketPath", addr) // The listener needs to successfully start before it can be added to // the map, so we don't have to worry about checking for an existing // listener. // // This is also what the upstream TCP version of this code does. h.Lock() h.forwards[key] = ln h.Unlock() slog.Debug("SSH unix forward added to cache", "socketPath", addr) ctx, cancel := context.WithCancel(ctx) go func() { <-ctx.Done() _ = ln.Close() }() go func() { defer cancel() for { c, err := ln.Accept() if err != nil { if !errors.Is(err, net.ErrClosed) { slog.Warn("accept on local Unix socket for SSH unix forward request", "socketPath", addr, "error", err) } // closed below slog.Debug("SSH unix forward listener closed", "socketPath", addr) break } slog.Debug("accepted SSH unix forward connection", "socketPath", addr) payload := gossh.Marshal(&forwardedStreamLocalPayload{ SocketPath: addr, }) go func() { ch, reqs, err := conn.OpenChannel("forwarded-streamlocal@openssh.com", payload) if err != nil { slog.Warn("open SSH unix forward channel to client", "socketPath", addr, "error", err) _ = c.Close() return } go gossh.DiscardRequests(reqs) Bicopy(ctx, ch, c) }() } h.Lock() if ln2, ok := h.forwards[key]; ok && ln2 == ln { delete(h.forwards, key) } h.Unlock() slog.Debug("SSH unix forward listener removed from cache", "socketPath", addr) _ = ln.Close() }() return true, nil case "cancel-streamlocal-forward@openssh.com": var reqPayload streamLocalForwardPayload err := gossh.Unmarshal(req.Payload, &reqPayload) if err != nil { slog.Warn("parse cancel-streamlocal-forward@openssh.com (SSH unix forward) request payload from client", "error", err) return false, nil } slog.Debug("request to cancel SSH unix forward", "socketPath", reqPayload.SocketPath) key := forwardKey{ sessionID: ctx.SessionID(), addr: reqPayload.SocketPath, } h.Lock() ln, ok := h.forwards[key] delete(h.forwards, key) h.Unlock() if !ok { slog.Warn("SSH unix forward not found in cache", "socketPath", reqPayload.SocketPath) return true, nil } _ = ln.Close() return true, nil default: return false, nil } } // directStreamLocalPayload describes the extra data sent in a // direct-streamlocal@openssh.com channel request containing the socket path. type directStreamLocalPayload struct { SocketPath string Reserved1 string Reserved2 uint32 } func directStreamLocalHandler(_ *ssh.Server, _ *gossh.ServerConn, newChan gossh.NewChannel, ctx ssh.Context) { var reqPayload directStreamLocalPayload err := gossh.Unmarshal(newChan.ExtraData(), &reqPayload) if err != nil { _ = newChan.Reject(gossh.ConnectionFailed, "could not parse direct-streamlocal@openssh.com channel payload") return } var dialer net.Dialer dconn, err := dialer.DialContext(ctx, "unix", reqPayload.SocketPath) if err != nil { _ = newChan.Reject(gossh.ConnectionFailed, fmt.Sprintf("dial unix socket %q: %+v", reqPayload.SocketPath, err.Error())) return } ch, reqs, err := newChan.Accept() if err != nil { _ = dconn.Close() return } go gossh.DiscardRequests(reqs) Bicopy(ctx, ch, dconn) } // unlink removes files and unlike os.Remove, directories are kept. func unlink(path string) error { // Ignore EINTR like os.Remove, see ignoringEINTR in os/file_posix.go // for more details. for { err := syscall.Unlink(path) if !errors.Is(err, syscall.EINTR) { return err } } } // Bicopy copies all of the data between the two connections and will close them // after one or both of them are done writing. If the context is canceled, both // of the connections will be closed. func Bicopy(ctx context.Context, c1, c2 io.ReadWriteCloser) { ctx, cancel := context.WithCancel(ctx) defer cancel() defer func() { _ = c1.Close() _ = c2.Close() }() var wg sync.WaitGroup copyFunc := func(dst io.WriteCloser, src io.Reader) { defer func() { wg.Done() // If one side of the copy fails, ensure the other one exits as // well. cancel() }() _, _ = io.Copy(dst, src) } wg.Add(2) go copyFunc(c1, c2) go copyFunc(c2, c1) // Convert waitgroup to a channel so we can also wait on the context. done := make(chan struct{}) go func() { defer close(done) wg.Wait() }() select { case <-ctx.Done(): case <-done: } } ================================================ FILE: apps/daemon/pkg/terminal/assets.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package terminal import "embed" //go:embed static var static embed.FS ================================================ FILE: apps/daemon/pkg/terminal/decoder.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package terminal import ( "bytes" "unicode/utf8" ) type UTF8Decoder struct { buffer []byte } func NewUTF8Decoder() *UTF8Decoder { return &UTF8Decoder{ buffer: make([]byte, 0, 1024), } } // Write appends new data to the internal buffer and decodes valid UTF-8 runes. // It returns the decoded string. Any incomplete bytes are kept for the next call. func (d *UTF8Decoder) Write(data []byte) string { // Combine buffer + new data data = append(d.buffer, data...) var output bytes.Buffer i := 0 for i < len(data) { r, size := utf8.DecodeRune(data[i:]) if r == utf8.RuneError { if size == 1 { // Could be incomplete rune at the end remaining := len(data) - i if remaining < utf8.UTFMax { // Buffer the remaining bytes for next call break } // Otherwise, it's an invalid byte, emit replacement and advance by 1 output.WriteRune(r) i++ continue } } output.WriteRune(r) i += size } // Save leftover bytes (possibly an incomplete rune) d.buffer = d.buffer[:0] if i < len(data) { d.buffer = append(d.buffer, data[i:]...) } return output.String() } ================================================ FILE: apps/daemon/pkg/terminal/server.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package terminal import ( "encoding/json" "fmt" "io" "io/fs" "log" "net/http" "github.com/daytonaio/daemon/pkg/common" "github.com/gorilla/websocket" ) var upgrader = websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { return true // Be careful with this in production }, } type windowSize struct { Rows uint16 `json:"rows"` Cols uint16 `json:"cols"` } func StartTerminalServer(port int) error { // Prepare the embedded frontend files // Serve the files from the embedded filesystem staticFS, err := fs.Sub(static, "static") if err != nil { return err } http.Handle("/", http.FileServer(http.FS(staticFS))) http.HandleFunc("/ws", handleWebSocket) addr := fmt.Sprintf(":%d", port) log.Printf("Starting terminal server on http://localhost%s", addr) return http.ListenAndServe(addr, nil) } func handleWebSocket(w http.ResponseWriter, r *http.Request) { conn, err := upgrader.Upgrade(w, r, nil) if err != nil { log.Printf("Failed to upgrade connection: %v", err) return } defer conn.Close() // Create a new UTF8Decoder instance for this connection decoder := NewUTF8Decoder() sizeCh := make(chan common.TTYSize) stdInReader, stdInWriter := io.Pipe() stdOutReader, stdOutWriter := io.Pipe() // Handle websocket -> pty go func() { for { messageType, p, err := conn.ReadMessage() if err != nil { return } // Check if it's a resize message if messageType == websocket.TextMessage { var size windowSize if err := json.Unmarshal(p, &size); err == nil { sizeCh <- common.TTYSize{ Height: int(size.Rows), Width: int(size.Cols), } continue } } // Write to pty _, err = stdInWriter.Write(p) if err != nil { return } } }() go func() { // Handle pty -> websocket buf := make([]byte, 1024) for { n, err := stdOutReader.Read(buf) if err != nil { if err != io.EOF { log.Printf("Failed to read from pty: %v", err) } return } // A multi-byte UTF-8 character can be split across stream reads. // UTF8Decoder buffers incomplete sequences to ensure proper decoding. decoded := decoder.Write(buf[:n]) err = conn.WriteMessage(websocket.TextMessage, []byte(decoded)) if err != nil { log.Printf("Failed to write to websocket: %v", err) return } } }() // Create a pty err = common.SpawnTTY(common.SpawnTTYOptions{ Dir: "/", StdIn: stdInReader, StdOut: stdOutWriter, Term: "xterm-256color", SizeCh: sizeCh, }) if err != nil { log.Printf("Failed to start pty: %v", err) return } } ================================================ FILE: apps/daemon/pkg/terminal/static/index.html ================================================ Web Terminal
================================================ FILE: apps/daemon/pkg/toolbox/computeruse/disabled_middleware.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package computeruse import ( "net/http" "github.com/gin-gonic/gin" ) // computerUseDisabledMiddleware returns a middleware that handles requests when computer-use is disabled func ComputerUseDisabledMiddleware() gin.HandlerFunc { return func(c *gin.Context) { c.JSON(http.StatusServiceUnavailable, gin.H{ "message": "Computer-use functionality is not available", "details": "The computer-use plugin failed to initialize due to missing dependencies in the runtime environment.", "solution": "Install the required X11 dependencies (x11-apps, xvfb, etc.) to enable computer-use functionality. Check the daemon logs for specific error details.", }) c.Abort() } } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/handler.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package computeruse import ( "fmt" "github.com/gin-gonic/gin" "net/http" ) type Handler struct { ComputerUse IComputerUse } // StartComputerUse godoc // // @Summary Start computer use processes // @Description Start all computer use processes and return their status // @Tags computer-use // @Produce json // @Success 200 {object} ComputerUseStartResponse // @Router /computeruse/start [post] // // @id StartComputerUse func (h *Handler) StartComputerUse(ctx *gin.Context) { _, err := h.ComputerUse.Start() if err != nil { ctx.JSON(http.StatusServiceUnavailable, gin.H{ "error": "Failed to start computer use", "details": err.Error(), }) return } status, err := h.ComputerUse.GetProcessStatus() if err != nil { ctx.JSON(http.StatusServiceUnavailable, gin.H{ "error": "Failed to get computer use status", "details": err.Error(), }) return } ctx.JSON(http.StatusOK, gin.H{ "message": "Computer use processes started successfully", "status": status, }) } // StopComputerUse godoc // // @Summary Stop computer use processes // @Description Stop all computer use processes and return their status // @Tags computer-use // @Produce json // @Success 200 {object} ComputerUseStopResponse // @Router /computeruse/stop [post] // // @id StopComputerUse func (h *Handler) StopComputerUse(ctx *gin.Context) { _, err := h.ComputerUse.Stop() if err != nil { ctx.JSON(http.StatusServiceUnavailable, gin.H{ "error": "Failed to stop computer use", "details": err.Error(), }) return } status, err := h.ComputerUse.GetProcessStatus() if err != nil { ctx.JSON(http.StatusServiceUnavailable, gin.H{ "error": "Failed to get computer use status", "details": err.Error(), }) return } ctx.JSON(http.StatusOK, gin.H{ "message": "Computer use processes stopped successfully", "status": status, }) } // GetComputerUseStatus godoc // // @Summary Get computer use process status // @Description Get the status of all computer use processes // @Tags computer-use // @Produce json // @Success 200 {object} ComputerUseStatusResponse // @Router /computeruse/process-status [get] // // @id GetComputerUseStatus func (h *Handler) GetComputerUseStatus(ctx *gin.Context) { status, err := h.ComputerUse.GetStatus() if err != nil { ctx.JSON(http.StatusServiceUnavailable, gin.H{ "error": "Failed to get computer use status", "details": err.Error(), }) return } if status == nil { ctx.JSON(http.StatusOK, gin.H{ "status": "unknown", }) return } ctx.JSON(http.StatusOK, *status) } // GetProcessStatus godoc // // @Summary Get specific process status // @Description Check if a specific computer use process is running // @Tags computer-use // @Produce json // @Param processName path string true "Process name to check" // @Success 200 {object} ProcessStatusResponse // @Router /computeruse/process/{processName}/status [get] // // @id GetProcessStatus func (h *Handler) GetProcessStatus(ctx *gin.Context) { processName := ctx.Param("processName") req := &ProcessRequest{ ProcessName: processName, } isRunning, err := h.ComputerUse.IsProcessRunning(req) if err != nil { ctx.JSON(http.StatusServiceUnavailable, gin.H{ "error": "Failed to get process status", "details": err.Error(), }) return } ctx.JSON(http.StatusOK, gin.H{ "processName": processName, "running": isRunning, }) } // RestartProcess godoc // // @Summary Restart specific process // @Description Restart a specific computer use process // @Tags computer-use // @Produce json // @Param processName path string true "Process name to restart" // @Success 200 {object} ProcessRestartResponse // @Router /computeruse/process/{processName}/restart [post] // // @id RestartProcess func (h *Handler) RestartProcess(ctx *gin.Context) { processName := ctx.Param("processName") req := &ProcessRequest{ ProcessName: processName, } _, err := h.ComputerUse.RestartProcess(req) if err != nil { ctx.JSON(http.StatusBadRequest, gin.H{ "error": err.Error(), }) return } ctx.JSON(http.StatusOK, gin.H{ "message": fmt.Sprintf("Process %s restarted successfully", processName), "processName": processName, }) } // GetProcessLogs godoc // // @Summary Get process logs // @Description Get logs for a specific computer use process // @Tags computer-use // @Produce json // @Param processName path string true "Process name to get logs for" // @Success 200 {object} ProcessLogsResponse // @Router /computeruse/process/{processName}/logs [get] // // @id GetProcessLogs func (h *Handler) GetProcessLogs(ctx *gin.Context) { processName := ctx.Param("processName") req := &ProcessRequest{ ProcessName: processName, } logs, err := h.ComputerUse.GetProcessLogs(req) if err != nil { ctx.JSON(http.StatusBadRequest, gin.H{ "error": err.Error(), }) return } ctx.JSON(http.StatusOK, gin.H{ "processName": processName, "logs": logs, }) } // GetProcessErrors godoc // // @Summary Get process errors // @Description Get errors for a specific computer use process // @Tags computer-use // @Produce json // @Param processName path string true "Process name to get errors for" // @Success 200 {object} ProcessErrorsResponse // @Router /computeruse/process/{processName}/errors [get] // // @id GetProcessErrors func (h *Handler) GetProcessErrors(ctx *gin.Context) { processName := ctx.Param("processName") req := &ProcessRequest{ ProcessName: processName, } errors, err := h.ComputerUse.GetProcessErrors(req) if err != nil { ctx.JSON(http.StatusBadRequest, gin.H{ "error": err.Error(), }) return } ctx.JSON(http.StatusOK, gin.H{ "processName": processName, "errors": errors, }) } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/interface.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package computeruse import ( "net/http" "net/rpc" "strconv" "github.com/gin-gonic/gin" "github.com/hashicorp/go-plugin" ) // PluginInterface defines the interface that the computeruse plugin must implement type IComputerUse interface { // Process management Initialize() (*Empty, error) Start() (*Empty, error) Stop() (*Empty, error) GetProcessStatus() (map[string]ProcessStatus, error) IsProcessRunning(req *ProcessRequest) (bool, error) RestartProcess(req *ProcessRequest) (*Empty, error) GetProcessLogs(req *ProcessRequest) (string, error) GetProcessErrors(req *ProcessRequest) (string, error) // Screenshot methods TakeScreenshot(*ScreenshotRequest) (*ScreenshotResponse, error) TakeRegionScreenshot(*RegionScreenshotRequest) (*ScreenshotResponse, error) TakeCompressedScreenshot(*CompressedScreenshotRequest) (*ScreenshotResponse, error) TakeCompressedRegionScreenshot(*CompressedRegionScreenshotRequest) (*ScreenshotResponse, error) // Mouse control methods GetMousePosition() (*MousePositionResponse, error) MoveMouse(*MouseMoveRequest) (*MousePositionResponse, error) Click(*MouseClickRequest) (*MouseClickResponse, error) Drag(*MouseDragRequest) (*MouseDragResponse, error) Scroll(*MouseScrollRequest) (*ScrollResponse, error) // Keyboard control methods TypeText(*KeyboardTypeRequest) (*Empty, error) PressKey(*KeyboardPressRequest) (*Empty, error) PressHotkey(*KeyboardHotkeyRequest) (*Empty, error) // Display info methods GetDisplayInfo() (*DisplayInfoResponse, error) GetWindows() (*WindowsResponse, error) // Status method GetStatus() (*ComputerUseStatusResponse, error) } type ComputerUsePlugin struct { Impl IComputerUse } // Common structs for better composition type Position struct { X int `json:"x"` Y int `json:"y"` } // @name Position type Size struct { Width int `json:"width"` Height int `json:"height"` } // @name Size // Screenshot parameter structs type ScreenshotRequest struct { ShowCursor bool `json:"showCursor"` } // @name ScreenshotRequest type RegionScreenshotRequest struct { Position Size ShowCursor bool `json:"showCursor"` } // @name RegionScreenshotRequest type CompressedScreenshotRequest struct { ShowCursor bool `json:"showCursor"` Format string `json:"format"` // "png" or "jpeg" Quality int `json:"quality"` // 1-100 for JPEG quality Scale float64 `json:"scale"` // 0.1-1.0 for scaling down } // @name CompressedScreenshotRequest type CompressedRegionScreenshotRequest struct { Position Size ShowCursor bool `json:"showCursor"` Format string `json:"format"` // "png" or "jpeg" Quality int `json:"quality"` // 1-100 for JPEG quality Scale float64 `json:"scale"` // 0.1-1.0 for scaling down } // @name CompressedRegionScreenshotRequest // Mouse parameter structs type MouseMoveRequest struct { Position } // @name MouseMoveRequest type MouseClickRequest struct { Position Button string `json:"button"` // left, right, middle Double bool `json:"double"` } // @name MouseClickRequest type MouseDragRequest struct { StartX int `json:"startX"` StartY int `json:"startY"` EndX int `json:"endX"` EndY int `json:"endY"` Button string `json:"button"` } // @name MouseDragRequest type MouseScrollRequest struct { Position Direction string `json:"direction"` // up, down Amount int `json:"amount"` } // @name MouseScrollRequest // Keyboard parameter structs type KeyboardTypeRequest struct { Text string `json:"text"` Delay int `json:"delay"` // milliseconds between keystrokes } // @name KeyboardTypeRequest type KeyboardPressRequest struct { Key string `json:"key"` Modifiers []string `json:"modifiers"` // ctrl, alt, shift, cmd } // @name KeyboardPressRequest type KeyboardHotkeyRequest struct { Keys string `json:"keys"` // e.g., "ctrl+c", "cmd+v" } // @name KeyboardHotkeyRequest // Response structs for keyboard operations type ScrollResponse struct { Success bool `json:"success"` } // @name ScrollResponse // Response structs type ScreenshotResponse struct { Screenshot string `json:"screenshot"` CursorPosition *Position `json:"cursorPosition,omitempty"` SizeBytes int `json:"sizeBytes,omitempty"` } // @name ScreenshotResponse // Mouse response structs - separated by operation type type MousePositionResponse struct { Position } // @name MousePositionResponse type MouseClickResponse struct { Position } // @name MouseClickResponse type MouseDragResponse struct { Position // Final position } // @name MouseDragResponse type DisplayInfoResponse struct { Displays []DisplayInfo `json:"displays"` } // @name DisplayInfoResponse type DisplayInfo struct { ID int `json:"id"` Position Size IsActive bool `json:"isActive"` } // @name DisplayInfo type WindowsResponse struct { Windows []WindowInfo `json:"windows"` } // @name WindowsResponse type WindowInfo struct { ID int `json:"id"` Title string `json:"title"` Position Size IsActive bool `json:"isActive"` } // @name WindowInfo type ComputerUseStatusResponse struct { Status string `json:"status"` } // @name ComputerUseStatusResponse type ComputerUseStartResponse struct { Message string `json:"message"` Status map[string]ProcessStatus `json:"status"` } // @name ComputerUseStartResponse type ComputerUseStopResponse struct { Message string `json:"message"` Status map[string]ProcessStatus `json:"status"` } // @name ComputerUseStopResponse type ProcessStatus struct { Running bool Priority int AutoRestart bool Pid *int } // @name ProcessStatus type ProcessStatusResponse struct { ProcessName string `json:"processName"` Running bool `json:"running"` } // @name ProcessStatusResponse type ProcessRestartResponse struct { Message string `json:"message"` ProcessName string `json:"processName"` } // @name ProcessRestartResponse type ProcessLogsResponse struct { ProcessName string `json:"processName"` Logs string `json:"logs"` } // @name ProcessLogsResponse type ProcessErrorsResponse struct { ProcessName string `json:"processName"` Errors string `json:"errors"` } // @name ProcessErrorsResponse type ProcessRequest struct { ProcessName string } // @name ProcessRequest type Empty struct{} // @name Empty func (p *ComputerUsePlugin) Server(*plugin.MuxBroker) (any, error) { return &ComputerUseRPCServer{Impl: p.Impl}, nil } func (p *ComputerUsePlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (any, error) { return &ComputerUseRPCClient{client: c}, nil } // TakeScreenshot godoc // // @Summary Take a screenshot // @Description Take a screenshot of the entire screen // @Tags computer-use // @Produce json // @Param showCursor query bool false "Whether to show cursor in screenshot" // @Success 200 {object} ScreenshotResponse // @Router /computeruse/screenshot [get] // // @id TakeScreenshot func WrapScreenshotHandler(fn func(*ScreenshotRequest) (*ScreenshotResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { req := &ScreenshotRequest{ ShowCursor: c.Query("showCursor") == "true", } response, err := fn(req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // TakeRegionScreenshot godoc // // @Summary Take a region screenshot // @Description Take a screenshot of a specific region of the screen // @Tags computer-use // @Produce json // @Param x query int true "X coordinate of the region" // @Param y query int true "Y coordinate of the region" // @Param width query int true "Width of the region" // @Param height query int true "Height of the region" // @Param showCursor query bool false "Whether to show cursor in screenshot" // @Success 200 {object} ScreenshotResponse // @Router /computeruse/screenshot/region [get] // // @id TakeRegionScreenshot func WrapRegionScreenshotHandler(fn func(*RegionScreenshotRequest) (*ScreenshotResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { var req RegionScreenshotRequest if err := c.ShouldBindQuery(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid parameters"}) return } req.ShowCursor = c.Query("showCursor") == "true" response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // TakeCompressedScreenshot godoc // // @Summary Take a compressed screenshot // @Description Take a compressed screenshot of the entire screen // @Tags computer-use // @Produce json // @Param showCursor query bool false "Whether to show cursor in screenshot" // @Param format query string false "Image format (png or jpeg)" // @Param quality query int false "JPEG quality (1-100)" // @Param scale query float64 false "Scale factor (0.1-1.0)" // @Success 200 {object} ScreenshotResponse // @Router /computeruse/screenshot/compressed [get] // // @id TakeCompressedScreenshot func WrapCompressedScreenshotHandler(fn func(*CompressedScreenshotRequest) (*ScreenshotResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { req := &CompressedScreenshotRequest{ ShowCursor: c.Query("showCursor") == "true", Format: c.Query("format"), Quality: 85, Scale: 1.0, } // Parse quality if qualityStr := c.Query("quality"); qualityStr != "" { if quality, err := strconv.Atoi(qualityStr); err == nil && quality >= 1 && quality <= 100 { req.Quality = quality } } // Parse scale if scaleStr := c.Query("scale"); scaleStr != "" { if scale, err := strconv.ParseFloat(scaleStr, 64); err == nil && scale >= 0.1 && scale <= 1.0 { req.Scale = scale } } response, err := fn(req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // TakeCompressedRegionScreenshot godoc // // @Summary Take a compressed region screenshot // @Description Take a compressed screenshot of a specific region of the screen // @Tags computer-use // @Produce json // @Param x query int true "X coordinate of the region" // @Param y query int true "Y coordinate of the region" // @Param width query int true "Width of the region" // @Param height query int true "Height of the region" // @Param showCursor query bool false "Whether to show cursor in screenshot" // @Param format query string false "Image format (png or jpeg)" // @Param quality query int false "JPEG quality (1-100)" // @Param scale query float64 false "Scale factor (0.1-1.0)" // @Success 200 {object} ScreenshotResponse // @Router /computeruse/screenshot/region/compressed [get] // // @id TakeCompressedRegionScreenshot func WrapCompressedRegionScreenshotHandler(fn func(*CompressedRegionScreenshotRequest) (*ScreenshotResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { var req CompressedRegionScreenshotRequest if err := c.ShouldBindQuery(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid parameters"}) return } req.ShowCursor = c.Query("showCursor") == "true" req.Format = c.Query("format") req.Quality = 85 req.Scale = 1.0 // Parse quality if qualityStr := c.Query("quality"); qualityStr != "" { if quality, err := strconv.Atoi(qualityStr); err == nil && quality >= 1 && quality <= 100 { req.Quality = quality } } // Parse scale if scaleStr := c.Query("scale"); scaleStr != "" { if scale, err := strconv.ParseFloat(scaleStr, 64); err == nil && scale >= 0.1 && scale <= 1.0 { req.Scale = scale } } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // GetMousePosition godoc // // @Summary Get mouse position // @Description Get the current mouse cursor position // @Tags computer-use // @Produce json // @Success 200 {object} MousePositionResponse // @Router /computeruse/mouse/position [get] // // @id GetMousePosition func WrapMousePositionHandler(fn func() (*MousePositionResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { response, err := fn() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // MoveMouse godoc // // @Summary Move mouse cursor // @Description Move the mouse cursor to the specified coordinates // @Tags computer-use // @Accept json // @Produce json // @Param request body MouseMoveRequest true "Mouse move request" // @Success 200 {object} MousePositionResponse // @Router /computeruse/mouse/move [post] // // @id MoveMouse func WrapMoveMouseHandler(fn func(*MouseMoveRequest) (*MousePositionResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { var req MouseMoveRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid coordinates"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // Click godoc // // @Summary Click mouse button // @Description Click the mouse button at the specified coordinates // @Tags computer-use // @Accept json // @Produce json // @Param request body MouseClickRequest true "Mouse click request" // @Success 200 {object} MouseClickResponse // @Router /computeruse/mouse/click [post] // // @id Click func WrapClickHandler(fn func(*MouseClickRequest) (*MouseClickResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { var req MouseClickRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid click parameters"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // Drag godoc // // @Summary Drag mouse // @Description Drag the mouse from start to end coordinates // @Tags computer-use // @Accept json // @Produce json // @Param request body MouseDragRequest true "Mouse drag request" // @Success 200 {object} MouseDragResponse // @Router /computeruse/mouse/drag [post] // // @id Drag func WrapDragHandler(fn func(*MouseDragRequest) (*MouseDragResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { var req MouseDragRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid drag parameters"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // Scroll godoc // // @Summary Scroll mouse wheel // @Description Scroll the mouse wheel at the specified coordinates // @Tags computer-use // @Accept json // @Produce json // @Param request body MouseScrollRequest true "Mouse scroll request" // @Success 200 {object} ScrollResponse // @Router /computeruse/mouse/scroll [post] // // @id Scroll func WrapScrollHandler(fn func(*MouseScrollRequest) (*ScrollResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { var req MouseScrollRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid scroll parameters"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // TypeText godoc // // @Summary Type text // @Description Type text with optional delay between keystrokes // @Tags computer-use // @Accept json // @Produce json // @Param request body KeyboardTypeRequest true "Text typing request" // @Success 200 {object} Empty // @Router /computeruse/keyboard/type [post] // // @id TypeText func WrapTypeTextHandler(fn func(*KeyboardTypeRequest) (*Empty, error)) gin.HandlerFunc { return func(c *gin.Context) { var req KeyboardTypeRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid input"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // PressKey godoc // // @Summary Press key // @Description Press a key with optional modifiers // @Tags computer-use // @Accept json // @Produce json // @Param request body KeyboardPressRequest true "Key press request" // @Success 200 {object} Empty // @Router /computeruse/keyboard/key [post] // // @id PressKey func WrapPressKeyHandler(fn func(*KeyboardPressRequest) (*Empty, error)) gin.HandlerFunc { return func(c *gin.Context) { var req KeyboardPressRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid key"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // PressHotkey godoc // // @Summary Press hotkey // @Description Press a hotkey combination (e.g., ctrl+c, cmd+v) // @Tags computer-use // @Accept json // @Produce json // @Param request body KeyboardHotkeyRequest true "Hotkey press request" // @Success 200 {object} Empty // @Router /computeruse/keyboard/hotkey [post] // // @id PressHotkey func WrapPressHotkeyHandler(fn func(*KeyboardHotkeyRequest) (*Empty, error)) gin.HandlerFunc { return func(c *gin.Context) { var req KeyboardHotkeyRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid hotkey"}) return } response, err := fn(&req) if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // GetDisplayInfo godoc // // @Summary Get display information // @Description Get information about all available displays // @Tags computer-use // @Produce json // @Success 200 {object} DisplayInfoResponse // @Router /computeruse/display/info [get] // // @id GetDisplayInfo func WrapDisplayInfoHandler(fn func() (*DisplayInfoResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { response, err := fn() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // GetWindows godoc // // @Summary Get windows information // @Description Get information about all open windows // @Tags computer-use // @Produce json // @Success 200 {object} WindowsResponse // @Router /computeruse/display/windows [get] // // @id GetWindows func WrapWindowsHandler(fn func() (*WindowsResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { response, err := fn() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } // GetStatus godoc // // @Summary Get computer use status // @Description Get the current status of the computer use system // @Tags computer-use // @Produce json // @Success 200 {object} ComputerUseStatusResponse // @Router /computeruse/status [get] // // @id GetComputerUseSystemStatus func WrapStatusHandler(fn func() (*ComputerUseStatusResponse, error)) gin.HandlerFunc { return func(c *gin.Context) { response, err := fn() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } c.JSON(http.StatusOK, response) } } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/lazy.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package computeruse import ( "errors" "net/http" "sync" "github.com/gin-gonic/gin" ) var errNotLoaded = errors.New("computer-use plugin is not loaded yet") // Compile-time check that LazyComputerUse implements IComputerUse. var _ IComputerUse = &LazyComputerUse{} // LazyComputerUse is a proxy that implements IComputerUse and delegates to the // real implementation once it has been set. Before Set is called, every method // returns errNotLoaded. type LazyComputerUse struct { mu sync.RWMutex impl IComputerUse } func NewLazyComputerUse() *LazyComputerUse { return &LazyComputerUse{} } // Set stores the real implementation. It is safe to call from any goroutine. func (l *LazyComputerUse) Set(impl IComputerUse) { l.mu.Lock() defer l.mu.Unlock() l.impl = impl } // IsReady reports whether the real implementation has been loaded. func (l *LazyComputerUse) IsReady() bool { l.mu.RLock() defer l.mu.RUnlock() return l.impl != nil } func (l *LazyComputerUse) get() (IComputerUse, error) { l.mu.RLock() defer l.mu.RUnlock() if l.impl == nil { return nil, errNotLoaded } return l.impl, nil } // --- IComputerUse implementation --- func (l *LazyComputerUse) Initialize() (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.Initialize() } func (l *LazyComputerUse) Start() (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.Start() } func (l *LazyComputerUse) Stop() (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.Stop() } func (l *LazyComputerUse) GetProcessStatus() (map[string]ProcessStatus, error) { impl, err := l.get() if err != nil { return nil, err } return impl.GetProcessStatus() } func (l *LazyComputerUse) IsProcessRunning(req *ProcessRequest) (bool, error) { impl, err := l.get() if err != nil { return false, err } return impl.IsProcessRunning(req) } func (l *LazyComputerUse) RestartProcess(req *ProcessRequest) (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.RestartProcess(req) } func (l *LazyComputerUse) GetProcessLogs(req *ProcessRequest) (string, error) { impl, err := l.get() if err != nil { return "", err } return impl.GetProcessLogs(req) } func (l *LazyComputerUse) GetProcessErrors(req *ProcessRequest) (string, error) { impl, err := l.get() if err != nil { return "", err } return impl.GetProcessErrors(req) } func (l *LazyComputerUse) TakeScreenshot(req *ScreenshotRequest) (*ScreenshotResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.TakeScreenshot(req) } func (l *LazyComputerUse) TakeRegionScreenshot(req *RegionScreenshotRequest) (*ScreenshotResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.TakeRegionScreenshot(req) } func (l *LazyComputerUse) TakeCompressedScreenshot(req *CompressedScreenshotRequest) (*ScreenshotResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.TakeCompressedScreenshot(req) } func (l *LazyComputerUse) TakeCompressedRegionScreenshot(req *CompressedRegionScreenshotRequest) (*ScreenshotResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.TakeCompressedRegionScreenshot(req) } func (l *LazyComputerUse) GetMousePosition() (*MousePositionResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.GetMousePosition() } func (l *LazyComputerUse) MoveMouse(req *MouseMoveRequest) (*MousePositionResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.MoveMouse(req) } func (l *LazyComputerUse) Click(req *MouseClickRequest) (*MouseClickResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.Click(req) } func (l *LazyComputerUse) Drag(req *MouseDragRequest) (*MouseDragResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.Drag(req) } func (l *LazyComputerUse) Scroll(req *MouseScrollRequest) (*ScrollResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.Scroll(req) } func (l *LazyComputerUse) TypeText(req *KeyboardTypeRequest) (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.TypeText(req) } func (l *LazyComputerUse) PressKey(req *KeyboardPressRequest) (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.PressKey(req) } func (l *LazyComputerUse) PressHotkey(req *KeyboardHotkeyRequest) (*Empty, error) { impl, err := l.get() if err != nil { return nil, err } return impl.PressHotkey(req) } func (l *LazyComputerUse) GetDisplayInfo() (*DisplayInfoResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.GetDisplayInfo() } func (l *LazyComputerUse) GetWindows() (*WindowsResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.GetWindows() } func (l *LazyComputerUse) GetStatus() (*ComputerUseStatusResponse, error) { impl, err := l.get() if err != nil { return nil, err } return impl.GetStatus() } // LazyCheckMiddleware returns 503 if the computer-use plugin has not loaded yet. func LazyCheckMiddleware(lazy *LazyComputerUse) gin.HandlerFunc { return func(c *gin.Context) { if !lazy.IsReady() { c.JSON(http.StatusServiceUnavailable, gin.H{ "message": "Computer-use functionality is not available", "details": "The computer-use plugin is still loading or failed to initialize.", "solution": "Retry shortly. If the problem persists, check the daemon logs for specific error details.", }) c.Abort() return } c.Next() } } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/manager/manager.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package manager import ( "bytes" "fmt" "log/slog" "os" "os/exec" "path/filepath" "runtime" "strings" "github.com/daytonaio/daemon/pkg/toolbox/computeruse" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" ) type pluginRef struct { client *plugin.Client impl computeruse.IComputerUse path string } var ComputerUseHandshakeConfig = plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "DAYTONA_COMPUTER_USE_PLUGIN", MagicCookieValue: "daytona_computer_use", } var computerUse = &pluginRef{} // ComputerUseError represents a computer-use plugin error with context type ComputerUseError struct { Type string // "dependency", "system", "plugin" Message string Details string } func (e *ComputerUseError) Error() string { return e.Message } // detectPluginError tries to execute the plugin binary directly to get detailed error information func detectPluginError(logger *slog.Logger, path string) *ComputerUseError { // Try to execute the plugin directly to get error output cmd := exec.Command(path) // Capture both stdout and stderr var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr // Execute the command err := cmd.Run() // Get the combined output output := stdout.String() + stderr.String() if err == nil { // Plugin executed successfully, this shouldn't happen in normal flow return &ComputerUseError{ Type: "plugin", Message: "Plugin executed successfully but failed during handshake", Details: "This may indicate a protocol version mismatch or plugin configuration issue.", } } // Get exit code if available exitCode := -1 if exitErr, ok := err.(*exec.ExitError); ok { exitCode = exitErr.ExitCode() } // Log the raw error for debugging logger.Debug("Plugin execution failed", "exitCode", exitCode, "error", err) logger.Debug("Plugin stdout", "stdout", stdout.String()) logger.Debug("Plugin stderr", "stderr", stderr.String()) // Check for missing X11 runtime dependencies if strings.Contains(output, "libX11.so.6") || strings.Contains(output, "libXext.so.6") || strings.Contains(output, "libXtst.so.6") || strings.Contains(output, "libXrandr.so.2") || strings.Contains(output, "libXrender.so.1") || strings.Contains(output, "libXfixes.so.3") || strings.Contains(output, "libXss.so.1") || strings.Contains(output, "libXi.so.6") || strings.Contains(output, "libXinerama.so.1") { missingLibs := []string{} if strings.Contains(output, "libX11.so.6") { missingLibs = append(missingLibs, "libX11") } if strings.Contains(output, "libXext.so.6") { missingLibs = append(missingLibs, "libXext") } if strings.Contains(output, "libXtst.so.6") { missingLibs = append(missingLibs, "libXtst") } if strings.Contains(output, "libXrandr.so.2") { missingLibs = append(missingLibs, "libXrandr") } if strings.Contains(output, "libXrender.so.1") { missingLibs = append(missingLibs, "libXrender") } if strings.Contains(output, "libXfixes.so.3") { missingLibs = append(missingLibs, "libXfixes") } if strings.Contains(output, "libXss.so.1") { missingLibs = append(missingLibs, "libXScrnSaver") } if strings.Contains(output, "libXi.so.6") { missingLibs = append(missingLibs, "libXi") } if strings.Contains(output, "libXinerama.so.1") { missingLibs = append(missingLibs, "libXinerama") } return &ComputerUseError{ Type: "dependency", Message: fmt.Sprintf("Computer-use plugin requires X11 runtime libraries that are not available (missing: %s)", strings.Join(missingLibs, ", ")), Details: fmt.Sprintf(`To enable computer-use functionality, install the required dependencies: For Ubuntu/Debian: sudo apt-get update && sudo apt-get install -y \\ libx11-6 libxrandr2 libxext6 libxrender1 libxfixes3 libxss1 libxtst6 libxi6 libxinerama1 \\ xvfb x11vnc novnc xfce4 xfce4-terminal dbus-x11 For CentOS/RHEL/Fedora: sudo yum install -y libX11 libXrandr libXext libXrender libXfixes libXScrnSaver libXtst libXi libXinerama \\ xorg-x11-server-Xvfb x11vnc novnc xfce4 xfce4-terminal dbus-x11 For Alpine: apk add --no-cache \\ libx11 libxrandr libxext libxrender libxfixes libxss libxtst libxi libxinerama \\ xvfb x11vnc novnc xfce4 xfce4-terminal dbus-x11 Raw error output: %s Note: Computer-use features will be disabled until dependencies are installed.`, output), } } // Check for missing development libraries (build-time dependencies) if strings.Contains(output, "X11/extensions/XTest.h") || strings.Contains(output, "X11/Xlib.h") || strings.Contains(output, "X11/Xutil.h") || strings.Contains(output, "X11/X.h") { return &ComputerUseError{ Type: "dependency", Message: "Computer-use plugin requires X11 development libraries", Details: fmt.Sprintf(`To build computer-use functionality, install the required development dependencies: For Ubuntu/Debian: sudo apt-get update && sudo apt-get install -y \\ libx11-dev libxtst-dev libxext-dev libxrandr-dev libxinerama-dev libxi-dev For CentOS/RHEL/Fedora: sudo yum install -y libX11-devel libXtst-devel libXext-devel libXrandr-devel libXinerama-devel libXi-devel For Alpine: apk add --no-cache \\ libx11-dev libxtst-dev libxext-dev libxrandr-dev libxinerama-dev libxi-dev Raw error output: %s Note: Computer-use features will be disabled until dependencies are installed.`, output), } } // Check for permission issues if strings.Contains(output, "Permission denied") || strings.Contains(output, "not executable") || strings.Contains(output, "EACCES") { return &ComputerUseError{ Type: "system", Message: "Computer-use plugin has permission issues", Details: fmt.Sprintf("The plugin at %s is not executable. Please check file permissions and ensure the binary is executable.\n\nRaw error output: %s", path, output), } } // Check for architecture mismatch if strings.Contains(output, "wrong ELF class") || strings.Contains(output, "architecture") || strings.Contains(output, "ELF") || strings.Contains(output, "exec format error") { return &ComputerUseError{ Type: "system", Message: "Computer-use plugin architecture mismatch", Details: fmt.Sprintf("The plugin was compiled for a different architecture. Please rebuild the plugin for the current system architecture.\n\nRaw error output: %s", output), } } // Check for missing system libraries if strings.Contains(output, "libc.so") || strings.Contains(output, "libm.so") || strings.Contains(output, "libdl.so") || strings.Contains(output, "libpthread.so") { return &ComputerUseError{ Type: "system", Message: "Computer-use plugin requires basic system libraries", Details: fmt.Sprintf("The plugin is missing basic system libraries. This may indicate a corrupted binary or system issue.\n\nRaw error output: %s", output), } } // Check for file not found if strings.Contains(output, "No such file or directory") || strings.Contains(output, "ENOENT") { return &ComputerUseError{ Type: "system", Message: "Computer-use plugin file not found", Details: fmt.Sprintf("The plugin file at %s could not be found or accessed.\n\nRaw error output: %s", path, output), } } // Check for Go runtime issues if strings.Contains(output, "go:") || strings.Contains(output, "runtime:") || strings.Contains(output, "panic:") { return &ComputerUseError{ Type: "plugin", Message: "Computer-use plugin has Go runtime issues", Details: fmt.Sprintf("The plugin encountered a Go runtime error.\n\nRaw error output: %s", output), } } // Generic plugin error with full details return &ComputerUseError{ Type: "plugin", Message: fmt.Sprintf("Computer-use plugin failed to start (exit code: %d)", exitCode), Details: fmt.Sprintf("Error: %v\nExit Code: %d\nOutput: %s", err, exitCode, output), } } func GetComputerUse(logger *slog.Logger, path string) (computeruse.IComputerUse, error) { if computerUse.impl != nil { return computerUse.impl, nil } if _, err := os.Stat(path); os.IsNotExist(err) { return nil, fmt.Errorf("computer-use plugin not found at path: %s", path) } pluginName := filepath.Base(path) pluginBasePath := filepath.Dir(path) if runtime.GOOS == "windows" && strings.HasSuffix(path, ".exe") { pluginName = strings.TrimSuffix(pluginName, ".exe") } // Pre-flight check: detect critical issues (missing shared libraries, wrong // architecture, permission errors) before starting the go-plugin client. // When the plugin binary exits immediately, go-plugin panics due to a // WaitGroup race condition in its goroutine, crashing the entire daemon. pluginErr := detectPluginError(logger, path) if pluginErr != nil && pluginErr.Type != "plugin" { return nil, fmt.Errorf("failed to start computer-use plugin - detailed error\n[type]: %s - [error]: %s - [details]: %s", pluginErr.Type, pluginErr.Message, pluginErr.Details) } hclogger := hclog.New(&hclog.LoggerOptions{ Name: pluginName, // Output: log.New().WriterLevel(log.DebugLevel), Output: os.Stdout, Level: hclog.Debug, }) pluginMap := map[string]plugin.Plugin{} pluginMap[pluginName] = &computeruse.ComputerUsePlugin{} client := plugin.NewClient(&plugin.ClientConfig{ HandshakeConfig: ComputerUseHandshakeConfig, Plugins: pluginMap, Cmd: exec.Command(path), Logger: hclogger, Managed: true, }) success := false defer func() { if !success { client.Kill() } }() logger.Info("Computer use registered", "pluginName", pluginName) rpcClient, err := client.Client() if err != nil { pluginErr := detectPluginError(logger, path) if pluginErr != nil { return nil, fmt.Errorf("failed to get RPC client for computer-use plugin - detailed error\n[type]: %s - [error]: %s - [details]: %s", pluginErr.Type, pluginErr.Message, pluginErr.Details) } return nil, fmt.Errorf("failed to get RPC client for computer-use plugin: %w", err) } raw, err := rpcClient.Dispense(pluginName) if err != nil { return nil, fmt.Errorf("failed to dispense computer-use plugin: %w", err) } impl, ok := raw.(computeruse.IComputerUse) if !ok { return nil, fmt.Errorf("unexpected type from computer-use plugin") } _, err = impl.Initialize() if err != nil { return nil, fmt.Errorf("failed to initialize computer-use plugin: %w", err) } success = true logger.Info("Computer-use plugin initialized successfully") computerUse.client = client computerUse.impl = impl computerUse.path = pluginBasePath return impl, nil } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/recording/controller.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "github.com/daytonaio/daemon/pkg/recording" ) type RecordingController struct { recordingService *recording.RecordingService } func NewRecordingController(recordingService *recording.RecordingService) *RecordingController { return &RecordingController{ recordingService: recordingService, } } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/recording/download.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "errors" "net/http" "os" "github.com/daytonaio/daemon/pkg/recording" "github.com/gin-gonic/gin" ) // DownloadRecording godoc // // @Summary Download a recording // @Description Download a recording by providing its ID // @Tags computer-use // @Produce octet-stream // @Param id path string true "Recording ID" // @Success 200 {file} binary // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /computeruse/recordings/{id}/download [get] // // @id DownloadRecording func (r *RecordingController) DownloadRecording(ctx *gin.Context) { id := ctx.Param("id") rec, err := r.recordingService.GetRecording(id) if err != nil { if errors.Is(err, recording.ErrRecordingNotFound) { ctx.JSON(http.StatusNotFound, gin.H{"error": "recording not found"}) return } ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } if _, err := os.Stat(rec.FilePath); os.IsNotExist(err) { ctx.JSON(http.StatusNotFound, gin.H{"error": "file not found"}) return } ctx.File(rec.FilePath) } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/recording/recording.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "errors" "net/http" "github.com/gin-gonic/gin" recordingservice "github.com/daytonaio/daemon/pkg/recording" ) // ListRecordings godoc // // @Summary List all recordings // @Description Get a list of all recordings (active and completed) // @Tags computer-use // @Produce json // @Success 200 {object} ListRecordingsResponse // @Failure 500 {object} map[string]string // @Router /computeruse/recordings [get] // // @id ListRecordings func (r *RecordingController) ListRecordings(ctx *gin.Context) { recordings, err := r.recordingService.ListRecordings() if err != nil { ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } recordingDTOs := make([]RecordingDTO, 0, len(recordings)) for _, rec := range recordings { recordingDTOs = append(recordingDTOs, *RecordingToDTO(&rec)) } response := ListRecordingsResponse{ Recordings: recordingDTOs, } ctx.JSON(http.StatusOK, response) } // GetRecording godoc // // @Summary Get recording details // @Description Get details of a specific recording by ID // @Tags computer-use // @Produce json // @Param id path string true "Recording ID" // @Success 200 {object} RecordingDTO // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /computeruse/recordings/{id} [get] // // @id GetRecording func (r *RecordingController) GetRecording(ctx *gin.Context) { id := ctx.Param("id") if id == "" { ctx.JSON(http.StatusBadRequest, gin.H{"error": "id is required"}) return } recording, err := r.recordingService.GetRecording(id) if err != nil { if errors.Is(err, recordingservice.ErrRecordingNotFound) { ctx.JSON(http.StatusNotFound, gin.H{"error": "recording not found"}) return } ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } ctx.JSON(http.StatusOK, *RecordingToDTO(recording)) } // DeleteRecording godoc // // @Summary Delete a recording // @Description Delete a recording file by ID // @Tags computer-use // @Param id path string true "Recording ID" // @Success 204 // @Failure 400 {object} map[string]string // @Failure 404 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /computeruse/recordings/{id} [delete] // // @id DeleteRecording func (r *RecordingController) DeleteRecording(ctx *gin.Context) { id := ctx.Param("id") if id == "" { ctx.JSON(http.StatusBadRequest, gin.H{"error": "id is required"}) return } err := r.recordingService.DeleteRecording(id) if err != nil { if errors.Is(err, recordingservice.ErrRecordingNotFound) { ctx.JSON(http.StatusNotFound, gin.H{"error": "recording not found"}) return } if errors.Is(err, recordingservice.ErrRecordingStillActive) { ctx.JSON(http.StatusBadRequest, gin.H{"error": "cannot delete an active recording, stop it first"}) return } ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } ctx.Status(http.StatusNoContent) } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/recording/start.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "errors" "net/http" "github.com/gin-gonic/gin" recordingservice "github.com/daytonaio/daemon/pkg/recording" ) // StartRecording godoc // // @Summary Start a new recording // @Description Start a new screen recording session // @Tags computer-use // @Accept json // @Produce json // @Param request body StartRecordingRequest false "Recording options" // @Success 201 {object} RecordingDTO // @Failure 400 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /computeruse/recordings/start [post] // // @id StartRecording func (h *RecordingController) StartRecording(ctx *gin.Context) { var request StartRecordingRequest if err := ctx.ShouldBindJSON(&request); err != nil { // Allow empty body - label is optional request = StartRecordingRequest{} } recording, err := h.recordingService.StartRecording(request.Label) if err != nil { if errors.Is(err, recordingservice.ErrFFmpegNotFound) { ctx.JSON(http.StatusBadRequest, gin.H{ "error": "ffmpeg_not_found", "message": "FFmpeg must be installed and available in PATH to use screen recording", }) return } if errors.Is(err, recordingservice.ErrInvalidLabel) { ctx.JSON(http.StatusBadRequest, gin.H{ "error": "invalid_label", "message": err.Error(), }) return } ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } ctx.JSON(http.StatusCreated, *RecordingToDTO(recording)) } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/recording/stop.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "errors" "net/http" "github.com/gin-gonic/gin" recordingservice "github.com/daytonaio/daemon/pkg/recording" ) // StopRecording godoc // // @Summary Stop a recording // @Description Stop an active screen recording session // @Tags computer-use // @Accept json // @Produce json // @Param request body StopRecordingRequest true "Recording ID to stop" // @Success 200 {object} RecordingDTO // @Failure 400 {object} map[string]string // @Failure 404 {object} map[string]string // @Router /computeruse/recordings/stop [post] // // @id StopRecording func (r *RecordingController) StopRecording(ctx *gin.Context) { var request StopRecordingRequest if err := ctx.ShouldBindJSON(&request); err != nil { ctx.JSON(http.StatusBadRequest, gin.H{"error": "invalid request: id is required"}) return } if request.ID == "" { ctx.JSON(http.StatusBadRequest, gin.H{"error": "id is required"}) return } recording, err := r.recordingService.StopRecording(request.ID) if err != nil { if errors.Is(err, recordingservice.ErrRecordingNotFound) { ctx.JSON(http.StatusNotFound, gin.H{"error": "recording not found"}) return } ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } ctx.JSON(http.StatusOK, *RecordingToDTO(recording)) } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/recording/types.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package recording import ( "time" "github.com/daytonaio/daemon/pkg/recording" ) // Recording represents a recording session (active or completed) type RecordingDTO struct { ID string `json:"id" validate:"required"` FileName string `json:"fileName" validate:"required"` FilePath string `json:"filePath" validate:"required"` StartTime time.Time `json:"startTime" validate:"required"` EndTime *time.Time `json:"endTime,omitempty"` Status string `json:"status" validate:"required"` DurationSeconds *float64 `json:"durationSeconds,omitempty"` SizeBytes *int64 `json:"sizeBytes,omitempty"` } // @name Recording // StartRecordingRequest represents the request to start a new recording type StartRecordingRequest struct { Label *string `json:"label,omitempty"` } // @name StartRecordingRequest // StopRecordingRequest represents the request to stop an active recording type StopRecordingRequest struct { ID string `json:"id" validate:"required"` } // @name StopRecordingRequest // ListRecordingsResponse represents the response containing all recordings type ListRecordingsResponse struct { Recordings []RecordingDTO `json:"recordings" validate:"required"` } // @name ListRecordingsResponse func RecordingToDTO(r *recording.Recording) *RecordingDTO { return &RecordingDTO{ ID: r.ID, FileName: r.FileName, FilePath: r.FilePath, StartTime: r.StartTime, EndTime: r.EndTime, Status: r.Status, DurationSeconds: r.DurationSeconds, SizeBytes: r.SizeBytes, } } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/rpc_client.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package computeruse import ( "net/rpc" ) type ComputerUseRPCClient struct { client *rpc.Client } // Type check var _ IComputerUse = &ComputerUseRPCClient{} // Process management methods func (m *ComputerUseRPCClient) Initialize() (*Empty, error) { err := m.client.Call("Plugin.Initialize", new(any), new(Empty)) return new(Empty), err } func (m *ComputerUseRPCClient) Start() (*Empty, error) { err := m.client.Call("Plugin.Start", new(any), new(Empty)) return new(Empty), err } func (m *ComputerUseRPCClient) Stop() (*Empty, error) { err := m.client.Call("Plugin.Stop", new(any), new(Empty)) return new(Empty), err } func (m *ComputerUseRPCClient) GetProcessStatus() (map[string]ProcessStatus, error) { resp := map[string]ProcessStatus{} err := m.client.Call("Plugin.GetProcessStatus", new(any), &resp) return resp, err } func (m *ComputerUseRPCClient) IsProcessRunning(req *ProcessRequest) (bool, error) { var resp bool err := m.client.Call("Plugin.IsProcessRunning", req, &resp) return resp, err } func (m *ComputerUseRPCClient) RestartProcess(req *ProcessRequest) (*Empty, error) { err := m.client.Call("Plugin.RestartProcess", req, new(Empty)) return new(Empty), err } func (m *ComputerUseRPCClient) GetProcessLogs(req *ProcessRequest) (string, error) { var resp string err := m.client.Call("Plugin.GetProcessLogs", req, &resp) return resp, err } func (m *ComputerUseRPCClient) GetProcessErrors(req *ProcessRequest) (string, error) { var resp string err := m.client.Call("Plugin.GetProcessErrors", req, &resp) return resp, err } // Screenshot methods func (m *ComputerUseRPCClient) TakeScreenshot(request *ScreenshotRequest) (*ScreenshotResponse, error) { var resp ScreenshotResponse err := m.client.Call("Plugin.TakeScreenshot", request, &resp) return &resp, err } func (m *ComputerUseRPCClient) TakeRegionScreenshot(request *RegionScreenshotRequest) (*ScreenshotResponse, error) { var resp ScreenshotResponse err := m.client.Call("Plugin.TakeRegionScreenshot", request, &resp) return &resp, err } func (m *ComputerUseRPCClient) TakeCompressedScreenshot(request *CompressedScreenshotRequest) (*ScreenshotResponse, error) { var resp ScreenshotResponse err := m.client.Call("Plugin.TakeCompressedScreenshot", request, &resp) return &resp, err } func (m *ComputerUseRPCClient) TakeCompressedRegionScreenshot(request *CompressedRegionScreenshotRequest) (*ScreenshotResponse, error) { var resp ScreenshotResponse err := m.client.Call("Plugin.TakeCompressedRegionScreenshot", request, &resp) return &resp, err } // Mouse control methods func (m *ComputerUseRPCClient) GetMousePosition() (*MousePositionResponse, error) { var resp MousePositionResponse err := m.client.Call("Plugin.GetMousePosition", new(any), &resp) return &resp, err } func (m *ComputerUseRPCClient) MoveMouse(request *MouseMoveRequest) (*MousePositionResponse, error) { var resp MousePositionResponse err := m.client.Call("Plugin.MoveMouse", request, &resp) return &resp, err } func (m *ComputerUseRPCClient) Click(request *MouseClickRequest) (*MouseClickResponse, error) { var resp MouseClickResponse err := m.client.Call("Plugin.Click", request, &resp) return &resp, err } func (m *ComputerUseRPCClient) Drag(request *MouseDragRequest) (*MouseDragResponse, error) { var resp MouseDragResponse err := m.client.Call("Plugin.Drag", request, &resp) return &resp, err } func (m *ComputerUseRPCClient) Scroll(request *MouseScrollRequest) (*ScrollResponse, error) { var resp ScrollResponse err := m.client.Call("Plugin.Scroll", request, &resp) return &resp, err } // Keyboard control methods func (m *ComputerUseRPCClient) TypeText(request *KeyboardTypeRequest) (*Empty, error) { err := m.client.Call("Plugin.TypeText", request, new(Empty)) return new(Empty), err } func (m *ComputerUseRPCClient) PressKey(request *KeyboardPressRequest) (*Empty, error) { err := m.client.Call("Plugin.PressKey", request, new(Empty)) return new(Empty), err } func (m *ComputerUseRPCClient) PressHotkey(request *KeyboardHotkeyRequest) (*Empty, error) { err := m.client.Call("Plugin.PressHotkey", request, new(Empty)) return new(Empty), err } // Display info methods func (m *ComputerUseRPCClient) GetDisplayInfo() (*DisplayInfoResponse, error) { var resp DisplayInfoResponse err := m.client.Call("Plugin.GetDisplayInfo", new(any), &resp) return &resp, err } func (m *ComputerUseRPCClient) GetWindows() (*WindowsResponse, error) { var resp WindowsResponse err := m.client.Call("Plugin.GetWindows", new(any), &resp) return &resp, err } // Status method func (m *ComputerUseRPCClient) GetStatus() (*ComputerUseStatusResponse, error) { var resp ComputerUseStatusResponse err := m.client.Call("Plugin.GetStatus", new(any), &resp) return &resp, err } ================================================ FILE: apps/daemon/pkg/toolbox/computeruse/rpc_server.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package computeruse type ComputerUseRPCServer struct { Impl IComputerUse } // Process management methods func (m *ComputerUseRPCServer) Initialize(arg any, resp *Empty) error { _, err := m.Impl.Initialize() return err } func (m *ComputerUseRPCServer) Start(arg any, resp *Empty) error { _, err := m.Impl.Start() return err } func (m *ComputerUseRPCServer) Stop(arg any, resp *Empty) error { _, err := m.Impl.Stop() return err } func (m *ComputerUseRPCServer) GetProcessStatus(arg any, resp *map[string]ProcessStatus) error { status, err := m.Impl.GetProcessStatus() if err != nil { return err } *resp = status return nil } func (m *ComputerUseRPCServer) IsProcessRunning(arg *ProcessRequest, resp *bool) error { isRunning, err := m.Impl.IsProcessRunning(arg) if err != nil { return err } *resp = isRunning return nil } func (m *ComputerUseRPCServer) RestartProcess(arg *ProcessRequest, resp *Empty) error { _, err := m.Impl.RestartProcess(arg) return err } func (m *ComputerUseRPCServer) GetProcessLogs(arg *ProcessRequest, resp *string) error { logs, err := m.Impl.GetProcessLogs(arg) if err != nil { return err } *resp = logs return nil } func (m *ComputerUseRPCServer) GetProcessErrors(arg *ProcessRequest, resp *string) error { errors, err := m.Impl.GetProcessErrors(arg) if err != nil { return err } *resp = errors return nil } // Screenshot methods func (m *ComputerUseRPCServer) TakeScreenshot(arg *ScreenshotRequest, resp *ScreenshotResponse) error { response, err := m.Impl.TakeScreenshot(arg) if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) TakeRegionScreenshot(arg *RegionScreenshotRequest, resp *ScreenshotResponse) error { response, err := m.Impl.TakeRegionScreenshot(arg) if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) TakeCompressedScreenshot(arg *CompressedScreenshotRequest, resp *ScreenshotResponse) error { response, err := m.Impl.TakeCompressedScreenshot(arg) if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) TakeCompressedRegionScreenshot(arg *CompressedRegionScreenshotRequest, resp *ScreenshotResponse) error { response, err := m.Impl.TakeCompressedRegionScreenshot(arg) if err != nil { return err } *resp = *response return nil } // Mouse control methods func (m *ComputerUseRPCServer) GetMousePosition(arg any, resp *MousePositionResponse) error { response, err := m.Impl.GetMousePosition() if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) MoveMouse(arg *MouseMoveRequest, resp *MousePositionResponse) error { response, err := m.Impl.MoveMouse(arg) if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) Click(arg *MouseClickRequest, resp *MouseClickResponse) error { response, err := m.Impl.Click(arg) if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) Drag(arg *MouseDragRequest, resp *MouseDragResponse) error { response, err := m.Impl.Drag(arg) if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) Scroll(arg *MouseScrollRequest, resp *ScrollResponse) error { response, err := m.Impl.Scroll(arg) if err != nil { return err } *resp = *response return nil } // Keyboard control methods func (m *ComputerUseRPCServer) TypeText(arg *KeyboardTypeRequest, resp *Empty) error { _, err := m.Impl.TypeText(arg) return err } func (m *ComputerUseRPCServer) PressKey(arg *KeyboardPressRequest, resp *Empty) error { _, err := m.Impl.PressKey(arg) return err } func (m *ComputerUseRPCServer) PressHotkey(arg *KeyboardHotkeyRequest, resp *Empty) error { _, err := m.Impl.PressHotkey(arg) return err } // Display info methods func (m *ComputerUseRPCServer) GetDisplayInfo(arg any, resp *DisplayInfoResponse) error { response, err := m.Impl.GetDisplayInfo() if err != nil { return err } *resp = *response return nil } func (m *ComputerUseRPCServer) GetWindows(arg any, resp *WindowsResponse) error { response, err := m.Impl.GetWindows() if err != nil { return err } *resp = *response return nil } // Status method func (m *ComputerUseRPCServer) GetStatus(arg any, resp *ComputerUseStatusResponse) error { response, err := m.Impl.GetStatus() if err != nil { return err } *resp = *response return nil } ================================================ FILE: apps/daemon/pkg/toolbox/config/config.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package config const TOOLBOX_API_PORT = 2280 const RECORDING_DASHBOARD_PORT = 33333 ================================================ FILE: apps/daemon/pkg/toolbox/controller.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package toolbox import ( "net/http" "os" "github.com/daytonaio/daemon/internal" "github.com/gin-gonic/gin" ) // Initialize godoc // // @Summary Initialize toolbox server // @Description Set the auth token and initialize telemetry for the toolbox server // @Tags server // @Produce json // @Param request body InitializeRequest true "Initialization request" // @Success 200 {object} map[string]string // @Router /init [post] // // @id Initialize func (s *server) Initialize(otelServiceName string, entrypointLogFilePath string, organizationId, regionId *string) gin.HandlerFunc { return func(ctx *gin.Context) { var req InitializeRequest if err := ctx.ShouldBindJSON(&req); err != nil { ctx.AbortWithError(http.StatusBadRequest, err) return } s.authToken = req.Token err := s.initTelemetry(ctx.Request.Context(), otelServiceName, entrypointLogFilePath, organizationId, regionId) if err != nil { ctx.AbortWithError(http.StatusBadRequest, err) return } ctx.JSON(http.StatusOK, gin.H{ "message": "Auth token set and telemetry initialized successfully", }) } } // GetWorkDir godoc // // @Summary Get working directory // @Description Get the current working directory path. This is default directory used for running commands. // @Tags info // @Produce json // @Success 200 {object} WorkDirResponse // @Router /work-dir [get] // // @id GetWorkDir func (s *server) GetWorkDir(ctx *gin.Context) { workDir := WorkDirResponse{ Dir: s.WorkDir, } ctx.JSON(http.StatusOK, workDir) } // GetUserHomeDir godoc // // @Summary Get user home directory // @Description Get the current user home directory path. // @Tags info // @Produce json // @Success 200 {object} UserHomeDirResponse // @Router /user-home-dir [get] // // @id GetUserHomeDir func (s *server) GetUserHomeDir(ctx *gin.Context) { userHomeDir, err := os.UserHomeDir() if err != nil { ctx.AbortWithError(http.StatusInternalServerError, err) return } userHomeDirResponse := UserHomeDirResponse{ Dir: userHomeDir, } ctx.JSON(http.StatusOK, userHomeDirResponse) } // GetVersion godoc // // @Summary Get version // @Description Get the current daemon version // @Tags info // @Produce json // @Success 200 {object} map[string]string // @Router /version [get] // // @id GetVersion func (s *server) GetVersion(ctx *gin.Context) { ctx.JSON(http.StatusOK, gin.H{ "version": internal.Version, }) } ================================================ FILE: apps/daemon/pkg/toolbox/docs/docs.go ================================================ // Package docs Code generated by swaggo/swag. DO NOT EDIT package docs import "github.com/swaggo/swag" const docTemplate = `{ "schemes": {{ marshal .Schemes }}, "swagger": "2.0", "info": { "description": "{{escape .Description}}", "title": "{{.Title}}", "contact": {}, "version": "{{.Version}}" }, "host": "{{.Host}}", "basePath": "{{.BasePath}}", "paths": { "/computeruse/display/info": { "get": { "description": "Get information about all available displays", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get display information", "operationId": "GetDisplayInfo", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/DisplayInfoResponse" } } } } }, "/computeruse/display/windows": { "get": { "description": "Get information about all open windows", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get windows information", "operationId": "GetWindows", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/WindowsResponse" } } } } }, "/computeruse/keyboard/hotkey": { "post": { "description": "Press a hotkey combination (e.g., ctrl+c, cmd+v)", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Press hotkey", "operationId": "PressHotkey", "parameters": [ { "description": "Hotkey press request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/KeyboardHotkeyRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Empty" } } } } }, "/computeruse/keyboard/key": { "post": { "description": "Press a key with optional modifiers", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Press key", "operationId": "PressKey", "parameters": [ { "description": "Key press request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/KeyboardPressRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Empty" } } } } }, "/computeruse/keyboard/type": { "post": { "description": "Type text with optional delay between keystrokes", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Type text", "operationId": "TypeText", "parameters": [ { "description": "Text typing request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/KeyboardTypeRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Empty" } } } } }, "/computeruse/mouse/click": { "post": { "description": "Click the mouse button at the specified coordinates", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Click mouse button", "operationId": "Click", "parameters": [ { "description": "Mouse click request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseClickRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MouseClickResponse" } } } } }, "/computeruse/mouse/drag": { "post": { "description": "Drag the mouse from start to end coordinates", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Drag mouse", "operationId": "Drag", "parameters": [ { "description": "Mouse drag request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseDragRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MouseDragResponse" } } } } }, "/computeruse/mouse/move": { "post": { "description": "Move the mouse cursor to the specified coordinates", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Move mouse cursor", "operationId": "MoveMouse", "parameters": [ { "description": "Mouse move request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseMoveRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MousePositionResponse" } } } } }, "/computeruse/mouse/position": { "get": { "description": "Get the current mouse cursor position", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get mouse position", "operationId": "GetMousePosition", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MousePositionResponse" } } } } }, "/computeruse/mouse/scroll": { "post": { "description": "Scroll the mouse wheel at the specified coordinates", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Scroll mouse wheel", "operationId": "Scroll", "parameters": [ { "description": "Mouse scroll request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseScrollRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScrollResponse" } } } } }, "/computeruse/process-status": { "get": { "description": "Get the status of all computer use processes", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get computer use process status", "operationId": "GetComputerUseStatus", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStatusResponse" } } } } }, "/computeruse/process/{processName}/errors": { "get": { "description": "Get errors for a specific computer use process", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get process errors", "operationId": "GetProcessErrors", "parameters": [ { "type": "string", "description": "Process name to get errors for", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessErrorsResponse" } } } } }, "/computeruse/process/{processName}/logs": { "get": { "description": "Get logs for a specific computer use process", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get process logs", "operationId": "GetProcessLogs", "parameters": [ { "type": "string", "description": "Process name to get logs for", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessLogsResponse" } } } } }, "/computeruse/process/{processName}/restart": { "post": { "description": "Restart a specific computer use process", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Restart specific process", "operationId": "RestartProcess", "parameters": [ { "type": "string", "description": "Process name to restart", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessRestartResponse" } } } } }, "/computeruse/process/{processName}/status": { "get": { "description": "Check if a specific computer use process is running", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get specific process status", "operationId": "GetProcessStatus", "parameters": [ { "type": "string", "description": "Process name to check", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessStatusResponse" } } } } }, "/computeruse/recordings": { "get": { "description": "Get a list of all recordings (active and completed)", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "List all recordings", "operationId": "ListRecordings", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ListRecordingsResponse" } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/start": { "post": { "description": "Start a new screen recording session", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Start a new recording", "operationId": "StartRecording", "parameters": [ { "description": "Recording options", "name": "request", "in": "body", "schema": { "$ref": "#/definitions/StartRecordingRequest" } } ], "responses": { "201": { "description": "Created", "schema": { "$ref": "#/definitions/Recording" } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/stop": { "post": { "description": "Stop an active screen recording session", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Stop a recording", "operationId": "StopRecording", "parameters": [ { "description": "Recording ID to stop", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/StopRecordingRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Recording" } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/{id}": { "get": { "description": "Get details of a specific recording by ID", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get recording details", "operationId": "GetRecording", "parameters": [ { "type": "string", "description": "Recording ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Recording" } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } }, "delete": { "description": "Delete a recording file by ID", "tags": [ "computer-use" ], "summary": "Delete a recording", "operationId": "DeleteRecording", "parameters": [ { "type": "string", "description": "Recording ID", "name": "id", "in": "path", "required": true } ], "responses": { "204": { "description": "No Content" }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/{id}/download": { "get": { "description": "Download a recording by providing its ID", "produces": [ "application/octet-stream" ], "tags": [ "computer-use" ], "summary": "Download a recording", "operationId": "DownloadRecording", "parameters": [ { "type": "string", "description": "Recording ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "file" } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/screenshot": { "get": { "description": "Take a screenshot of the entire screen", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Take a screenshot", "operationId": "TakeScreenshot", "parameters": [ { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/screenshot/compressed": { "get": { "description": "Take a compressed screenshot of the entire screen", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Take a compressed screenshot", "operationId": "TakeCompressedScreenshot", "parameters": [ { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" }, { "type": "string", "description": "Image format (png or jpeg)", "name": "format", "in": "query" }, { "type": "integer", "description": "JPEG quality (1-100)", "name": "quality", "in": "query" }, { "type": "number", "description": "Scale factor (0.1-1.0)", "name": "scale", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/screenshot/region": { "get": { "description": "Take a screenshot of a specific region of the screen", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Take a region screenshot", "operationId": "TakeRegionScreenshot", "parameters": [ { "type": "integer", "description": "X coordinate of the region", "name": "x", "in": "query", "required": true }, { "type": "integer", "description": "Y coordinate of the region", "name": "y", "in": "query", "required": true }, { "type": "integer", "description": "Width of the region", "name": "width", "in": "query", "required": true }, { "type": "integer", "description": "Height of the region", "name": "height", "in": "query", "required": true }, { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/screenshot/region/compressed": { "get": { "description": "Take a compressed screenshot of a specific region of the screen", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Take a compressed region screenshot", "operationId": "TakeCompressedRegionScreenshot", "parameters": [ { "type": "integer", "description": "X coordinate of the region", "name": "x", "in": "query", "required": true }, { "type": "integer", "description": "Y coordinate of the region", "name": "y", "in": "query", "required": true }, { "type": "integer", "description": "Width of the region", "name": "width", "in": "query", "required": true }, { "type": "integer", "description": "Height of the region", "name": "height", "in": "query", "required": true }, { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" }, { "type": "string", "description": "Image format (png or jpeg)", "name": "format", "in": "query" }, { "type": "integer", "description": "JPEG quality (1-100)", "name": "quality", "in": "query" }, { "type": "number", "description": "Scale factor (0.1-1.0)", "name": "scale", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/start": { "post": { "description": "Start all computer use processes and return their status", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Start computer use processes", "operationId": "StartComputerUse", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStartResponse" } } } } }, "/computeruse/status": { "get": { "description": "Get the current status of the computer use system", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Get computer use status", "operationId": "GetComputerUseSystemStatus", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStatusResponse" } } } } }, "/computeruse/stop": { "post": { "description": "Stop all computer use processes and return their status", "produces": [ "application/json" ], "tags": [ "computer-use" ], "summary": "Stop computer use processes", "operationId": "StopComputerUse", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStopResponse" } } } } }, "/files": { "get": { "description": "List files and directories in the specified path", "produces": [ "application/json" ], "tags": [ "file-system" ], "summary": "List files and directories", "operationId": "ListFiles", "parameters": [ { "type": "string", "description": "Directory path to list (defaults to working directory)", "name": "path", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/FileInfo" } } } } }, "delete": { "description": "Delete a file or directory at the specified path", "tags": [ "file-system" ], "summary": "Delete a file or directory", "operationId": "DeleteFile", "parameters": [ { "type": "string", "description": "File or directory path to delete", "name": "path", "in": "query", "required": true }, { "type": "boolean", "description": "Enable recursive deletion for directories", "name": "recursive", "in": "query" } ], "responses": { "204": { "description": "No Content" } } } }, "/files/bulk-download": { "post": { "description": "Download multiple files by providing their paths", "consumes": [ "application/json" ], "produces": [ "multipart/form-data" ], "tags": [ "file-system" ], "summary": "Download multiple files", "operationId": "DownloadFiles", "parameters": [ { "description": "Paths of files to download", "name": "downloadFiles", "in": "body", "required": true, "schema": { "$ref": "#/definitions/FilesDownloadRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/gin.H" } } } } }, "/files/bulk-upload": { "post": { "description": "Upload multiple files with their destination paths", "consumes": [ "multipart/form-data" ], "tags": [ "file-system" ], "summary": "Upload multiple files", "operationId": "UploadFiles", "responses": { "200": { "description": "OK" } } } }, "/files/download": { "get": { "description": "Download a file by providing its path", "produces": [ "application/octet-stream" ], "tags": [ "file-system" ], "summary": "Download a file", "operationId": "DownloadFile", "parameters": [ { "type": "string", "description": "File path to download", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "file" } } } } }, "/files/find": { "get": { "description": "Search for text pattern within files in a directory", "produces": [ "application/json" ], "tags": [ "file-system" ], "summary": "Find text in files", "operationId": "FindInFiles", "parameters": [ { "type": "string", "description": "Directory path to search in", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "Text pattern to search for", "name": "pattern", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/Match" } } } } } }, "/files/folder": { "post": { "description": "Create a folder with the specified path and optional permissions", "consumes": [ "application/json" ], "tags": [ "file-system" ], "summary": "Create a folder", "operationId": "CreateFolder", "parameters": [ { "type": "string", "description": "Folder path to create", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "Octal permission mode (default: 0755)", "name": "mode", "in": "query", "required": true } ], "responses": { "201": { "description": "Created" } } } }, "/files/info": { "get": { "description": "Get detailed information about a file or directory", "produces": [ "application/json" ], "tags": [ "file-system" ], "summary": "Get file information", "operationId": "GetFileInfo", "parameters": [ { "type": "string", "description": "File or directory path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/FileInfo" } } } } }, "/files/move": { "post": { "description": "Move or rename a file or directory from source to destination", "tags": [ "file-system" ], "summary": "Move or rename file/directory", "operationId": "MoveFile", "parameters": [ { "type": "string", "description": "Source file or directory path", "name": "source", "in": "query", "required": true }, { "type": "string", "description": "Destination file or directory path", "name": "destination", "in": "query", "required": true } ], "responses": { "200": { "description": "OK" } } } }, "/files/permissions": { "post": { "description": "Set file permissions, ownership, and group for a file or directory", "tags": [ "file-system" ], "summary": "Set file permissions", "operationId": "SetFilePermissions", "parameters": [ { "type": "string", "description": "File or directory path", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "Owner (username or UID)", "name": "owner", "in": "query" }, { "type": "string", "description": "Group (group name or GID)", "name": "group", "in": "query" }, { "type": "string", "description": "File mode in octal format (e.g., 0755)", "name": "mode", "in": "query" } ], "responses": { "200": { "description": "OK" } } } }, "/files/replace": { "post": { "description": "Replace text pattern with new value in multiple files", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "file-system" ], "summary": "Replace text in files", "operationId": "ReplaceInFiles", "parameters": [ { "description": "Replace request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/ReplaceRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/ReplaceResult" } } } } } }, "/files/search": { "get": { "description": "Search for files matching a specific pattern in a directory", "produces": [ "application/json" ], "tags": [ "file-system" ], "summary": "Search files by pattern", "operationId": "SearchFiles", "parameters": [ { "type": "string", "description": "Directory path to search in", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "File pattern to match (e.g., *.txt, *.go)", "name": "pattern", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/SearchFilesResponse" } } } } }, "/files/upload": { "post": { "description": "Upload a file to the specified path", "consumes": [ "multipart/form-data" ], "tags": [ "file-system" ], "summary": "Upload a file", "operationId": "UploadFile", "parameters": [ { "type": "string", "description": "Destination path for the uploaded file", "name": "path", "in": "query", "required": true }, { "type": "file", "description": "File to upload", "name": "file", "in": "formData", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/gin.H" } } } } }, "/git/add": { "post": { "description": "Add files to the Git staging area", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Add files to Git staging", "operationId": "AddFiles", "parameters": [ { "description": "Add files request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitAddRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/branches": { "get": { "description": "Get a list of all branches in the Git repository", "produces": [ "application/json" ], "tags": [ "git" ], "summary": "List branches", "operationId": "ListBranches", "parameters": [ { "type": "string", "description": "Repository path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ListBranchResponse" } } } }, "post": { "description": "Create a new branch in the Git repository", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Create a new branch", "operationId": "CreateBranch", "parameters": [ { "description": "Create branch request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitBranchRequest" } } ], "responses": { "201": { "description": "Created" } } }, "delete": { "description": "Delete a branch from the Git repository", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Delete a branch", "operationId": "DeleteBranch", "parameters": [ { "description": "Delete branch request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/git.GitDeleteBranchRequest" } } ], "responses": { "204": { "description": "No Content" } } } }, "/git/checkout": { "post": { "description": "Switch to a different branch or commit in the Git repository", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Checkout branch or commit", "operationId": "CheckoutBranch", "parameters": [ { "description": "Checkout request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitCheckoutRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/clone": { "post": { "description": "Clone a Git repository to the specified path", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Clone a Git repository", "operationId": "CloneRepository", "parameters": [ { "description": "Clone repository request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitCloneRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/commit": { "post": { "description": "Commit staged changes to the Git repository", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Commit changes", "operationId": "CommitChanges", "parameters": [ { "description": "Commit request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitCommitRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/GitCommitResponse" } } } } }, "/git/history": { "get": { "description": "Get the commit history of the Git repository", "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Get commit history", "operationId": "GetCommitHistory", "parameters": [ { "type": "string", "description": "Repository path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/GitCommitInfo" } } } } } }, "/git/pull": { "post": { "description": "Pull changes from the remote Git repository", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Pull changes from remote", "operationId": "PullChanges", "parameters": [ { "description": "Pull request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitRepoRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/push": { "post": { "description": "Push local changes to the remote Git repository", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Push changes to remote", "operationId": "PushChanges", "parameters": [ { "description": "Push request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitRepoRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/status": { "get": { "description": "Get the Git status of the repository at the specified path", "produces": [ "application/json" ], "tags": [ "git" ], "summary": "Get Git status", "operationId": "GetStatus", "parameters": [ { "type": "string", "description": "Repository path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/GitStatus" } } } } }, "/init": { "post": { "description": "Set the auth token and initialize telemetry for the toolbox server", "produces": [ "application/json" ], "tags": [ "server" ], "summary": "Initialize toolbox server", "operationId": "Initialize", "parameters": [ { "description": "Initialization request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/InitializeRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/lsp/completions": { "post": { "description": "Get code completion suggestions from the LSP server", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Get code completions", "operationId": "Completions", "parameters": [ { "description": "Completion request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspCompletionParams" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/CompletionList" } } } } }, "/lsp/did-close": { "post": { "description": "Notify the LSP server that a document has been closed", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Notify document closed", "operationId": "DidClose", "parameters": [ { "description": "Document request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspDocumentRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/did-open": { "post": { "description": "Notify the LSP server that a document has been opened", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Notify document opened", "operationId": "DidOpen", "parameters": [ { "description": "Document request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspDocumentRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/document-symbols": { "get": { "description": "Get symbols (functions, classes, etc.) from a document", "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Get document symbols", "operationId": "DocumentSymbols", "parameters": [ { "type": "string", "description": "Language ID (e.g., python, typescript)", "name": "languageId", "in": "query", "required": true }, { "type": "string", "description": "Path to project", "name": "pathToProject", "in": "query", "required": true }, { "type": "string", "description": "Document URI", "name": "uri", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/LspSymbol" } } } } } }, "/lsp/start": { "post": { "description": "Start a Language Server Protocol server for the specified language", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Start LSP server", "operationId": "Start", "parameters": [ { "description": "LSP server request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspServerRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/stop": { "post": { "description": "Stop a Language Server Protocol server", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Stop LSP server", "operationId": "Stop", "parameters": [ { "description": "LSP server request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspServerRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/workspacesymbols": { "get": { "description": "Search for symbols across the entire workspace", "produces": [ "application/json" ], "tags": [ "lsp" ], "summary": "Get workspace symbols", "operationId": "WorkspaceSymbols", "parameters": [ { "type": "string", "description": "Search query", "name": "query", "in": "query", "required": true }, { "type": "string", "description": "Language ID (e.g., python, typescript)", "name": "languageId", "in": "query", "required": true }, { "type": "string", "description": "Path to project", "name": "pathToProject", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/LspSymbol" } } } } } }, "/port": { "get": { "description": "Get a list of all currently active ports", "produces": [ "application/json" ], "tags": [ "port" ], "summary": "Get active ports", "operationId": "GetPorts", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PortList" } } } } }, "/port/{port}/in-use": { "get": { "description": "Check if a specific port is currently in use", "produces": [ "application/json" ], "tags": [ "port" ], "summary": "Check if port is in use", "operationId": "IsPortInUse", "parameters": [ { "type": "integer", "description": "Port number (3000-9999)", "name": "port", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/IsPortInUseResponse" } } } } }, "/process/execute": { "post": { "description": "Execute a shell command and return the output and exit code", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Execute a command", "operationId": "ExecuteCommand", "parameters": [ { "description": "Command execution request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/ExecuteRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ExecuteResponse" } } } } }, "/process/interpreter/context": { "get": { "description": "Returns information about all user-created interpreter contexts (excludes default context)", "produces": [ "application/json" ], "tags": [ "interpreter" ], "summary": "List all user-created interpreter contexts", "operationId": "ListInterpreterContexts", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ListContextsResponse" } } } }, "post": { "description": "Creates a new isolated interpreter context with optional working directory and language", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "interpreter" ], "summary": "Create a new interpreter context", "operationId": "CreateInterpreterContext", "parameters": [ { "description": "Context configuration", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/CreateContextRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/InterpreterContext" } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/process/interpreter/context/{id}": { "delete": { "description": "Deletes an interpreter context and shuts down its worker process", "produces": [ "application/json" ], "tags": [ "interpreter" ], "summary": "Delete an interpreter context", "operationId": "DeleteInterpreterContext", "parameters": [ { "type": "string", "description": "Context ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/process/interpreter/execute": { "get": { "description": "Executes code in a specified context (or default context if not specified) via WebSocket streaming", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "interpreter" ], "summary": "Execute code in an interpreter context", "operationId": "ExecuteInterpreterCode", "responses": { "101": { "description": "Switching Protocols", "schema": { "type": "string" }, "headers": { "Connection": { "type": "string", "description": "Upgrade" }, "Upgrade": { "type": "string", "description": "websocket" } } } } } }, "/process/pty": { "get": { "description": "Get a list of all active pseudo-terminal sessions", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "List all PTY sessions", "operationId": "ListPtySessions", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PtyListResponse" } } } }, "post": { "description": "Create a new pseudo-terminal session with specified configuration", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Create a new PTY session", "operationId": "CreatePtySession", "parameters": [ { "description": "PTY session creation request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/PtyCreateRequest" } } ], "responses": { "201": { "description": "Created", "schema": { "$ref": "#/definitions/PtyCreateResponse" } } } } }, "/process/pty/{sessionId}": { "get": { "description": "Get detailed information about a specific pseudo-terminal session", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Get PTY session information", "operationId": "GetPtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PtySessionInfo" } } } }, "delete": { "description": "Delete a pseudo-terminal session and terminate its process", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Delete a PTY session", "operationId": "DeletePtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/gin.H" } } } } }, "/process/pty/{sessionId}/connect": { "get": { "description": "Establish a WebSocket connection to interact with a pseudo-terminal session", "tags": [ "process" ], "summary": "Connect to PTY session via WebSocket", "operationId": "ConnectPtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "101": { "description": "Switching Protocols - WebSocket connection established" } } } }, "/process/pty/{sessionId}/resize": { "post": { "description": "Resize the terminal dimensions of a pseudo-terminal session", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Resize a PTY session", "operationId": "ResizePtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true }, { "description": "Resize request with new dimensions", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/PtyResizeRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PtySessionInfo" } } } } }, "/process/session": { "get": { "description": "Get a list of all active shell sessions", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "List all sessions", "operationId": "ListSessions", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/Session" } } } } }, "post": { "description": "Create a new shell session for command execution", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Create a new session", "operationId": "CreateSession", "parameters": [ { "description": "Session creation request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/CreateSessionRequest" } } ], "responses": { "201": { "description": "Created" } } } }, "/process/session/entrypoint": { "get": { "description": "Get details of an entrypoint session including its commands", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Get entrypoint session details", "operationId": "GetEntrypointSession", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Session" } } } } }, "/process/session/entrypoint/logs": { "get": { "description": "Get logs for a sandbox entrypoint session. Supports both HTTP and WebSocket streaming.", "produces": [ "text/plain" ], "tags": [ "process" ], "summary": "Get entrypoint logs", "operationId": "GetEntrypointLogs", "parameters": [ { "type": "boolean", "description": "Follow logs in real-time (WebSocket only)", "name": "follow", "in": "query" } ], "responses": { "200": { "description": "Entrypoint log content", "schema": { "type": "string" } } } } }, "/process/session/{sessionId}": { "get": { "description": "Get details of a specific session including its commands", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Get session details", "operationId": "GetSession", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Session" } } } }, "delete": { "description": "Delete an existing shell session", "tags": [ "process" ], "summary": "Delete a session", "operationId": "DeleteSession", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "204": { "description": "No Content" } } } }, "/process/session/{sessionId}/command/{commandId}": { "get": { "description": "Get details of a specific command within a session", "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Get session command details", "operationId": "GetSessionCommand", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "type": "string", "description": "Command ID", "name": "commandId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Command" } } } } }, "/process/session/{sessionId}/command/{commandId}/input": { "post": { "description": "Send input data to a running command in a session for interactive execution", "consumes": [ "application/json" ], "tags": [ "process" ], "summary": "Send input to command", "operationId": "SendInput", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "type": "string", "description": "Command ID", "name": "commandId", "in": "path", "required": true }, { "description": "Input send request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/SessionSendInputRequest" } } ], "responses": { "204": { "description": "No Content" } } } }, "/process/session/{sessionId}/command/{commandId}/logs": { "get": { "description": "Get logs for a specific command within a session. Supports both HTTP and WebSocket streaming.", "produces": [ "text/plain" ], "tags": [ "process" ], "summary": "Get session command logs", "operationId": "GetSessionCommandLogs", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "type": "string", "description": "Command ID", "name": "commandId", "in": "path", "required": true }, { "type": "boolean", "description": "Follow logs in real-time (WebSocket only)", "name": "follow", "in": "query" } ], "responses": { "200": { "description": "Log content", "schema": { "type": "string" } } } } }, "/process/session/{sessionId}/exec": { "post": { "description": "Execute a command within an existing shell session", "consumes": [ "application/json" ], "produces": [ "application/json" ], "tags": [ "process" ], "summary": "Execute command in session", "operationId": "SessionExecuteCommand", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "description": "Command execution request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/SessionExecuteRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/SessionExecuteResponse" } }, "202": { "description": "Accepted", "schema": { "$ref": "#/definitions/SessionExecuteResponse" } } } } }, "/user-home-dir": { "get": { "description": "Get the current user home directory path.", "produces": [ "application/json" ], "tags": [ "info" ], "summary": "Get user home directory", "operationId": "GetUserHomeDir", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/UserHomeDirResponse" } } } } }, "/version": { "get": { "description": "Get the current daemon version", "produces": [ "application/json" ], "tags": [ "info" ], "summary": "Get version", "operationId": "GetVersion", "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/work-dir": { "get": { "description": "Get the current working directory path. This is default directory used for running commands.", "produces": [ "application/json" ], "tags": [ "info" ], "summary": "Get working directory", "operationId": "GetWorkDir", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/WorkDirResponse" } } } } } }, "definitions": { "Command": { "type": "object", "required": [ "command", "id" ], "properties": { "command": { "type": "string" }, "exitCode": { "type": "integer" }, "id": { "type": "string" } } }, "CompletionContext": { "type": "object", "required": [ "triggerKind" ], "properties": { "triggerCharacter": { "type": "string" }, "triggerKind": { "type": "integer" } } }, "CompletionItem": { "type": "object", "required": [ "label" ], "properties": { "detail": { "type": "string" }, "documentation": {}, "filterText": { "type": "string" }, "insertText": { "type": "string" }, "kind": { "type": "integer" }, "label": { "type": "string" }, "sortText": { "type": "string" } } }, "CompletionList": { "type": "object", "required": [ "isIncomplete", "items" ], "properties": { "isIncomplete": { "type": "boolean" }, "items": { "type": "array", "items": { "$ref": "#/definitions/CompletionItem" } } } }, "ComputerUseStartResponse": { "type": "object", "properties": { "message": { "type": "string" }, "status": { "type": "object", "additionalProperties": { "$ref": "#/definitions/ProcessStatus" } } } }, "ComputerUseStatusResponse": { "type": "object", "properties": { "status": { "type": "string" } } }, "ComputerUseStopResponse": { "type": "object", "properties": { "message": { "type": "string" }, "status": { "type": "object", "additionalProperties": { "$ref": "#/definitions/ProcessStatus" } } } }, "CreateContextRequest": { "type": "object", "properties": { "cwd": { "type": "string" }, "language": { "type": "string" } } }, "CreateSessionRequest": { "type": "object", "required": [ "sessionId" ], "properties": { "sessionId": { "type": "string" } } }, "DisplayInfo": { "type": "object", "properties": { "height": { "type": "integer" }, "id": { "type": "integer" }, "isActive": { "type": "boolean" }, "width": { "type": "integer" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "DisplayInfoResponse": { "type": "object", "properties": { "displays": { "type": "array", "items": { "$ref": "#/definitions/DisplayInfo" } } } }, "Empty": { "type": "object" }, "ExecuteRequest": { "type": "object", "required": [ "command" ], "properties": { "command": { "type": "string" }, "cwd": { "description": "Current working directory", "type": "string" }, "timeout": { "description": "Timeout in seconds, defaults to 10 seconds", "type": "integer" } } }, "ExecuteResponse": { "type": "object", "required": [ "result" ], "properties": { "exitCode": { "type": "integer" }, "result": { "type": "string" } } }, "FileInfo": { "type": "object", "required": [ "group", "isDir", "modTime", "mode", "name", "owner", "permissions", "size" ], "properties": { "group": { "type": "string" }, "isDir": { "type": "boolean" }, "modTime": { "type": "string" }, "mode": { "type": "string" }, "name": { "type": "string" }, "owner": { "type": "string" }, "permissions": { "type": "string" }, "size": { "type": "integer" } } }, "FileStatus": { "type": "object", "required": [ "extra", "name", "staging", "worktree" ], "properties": { "extra": { "type": "string" }, "name": { "type": "string" }, "staging": { "$ref": "#/definitions/Status" }, "worktree": { "$ref": "#/definitions/Status" } } }, "FilesDownloadRequest": { "type": "object", "required": [ "paths" ], "properties": { "paths": { "type": "array", "items": { "type": "string" } } } }, "GitAddRequest": { "type": "object", "required": [ "files", "path" ], "properties": { "files": { "description": "files to add (use . for all files)", "type": "array", "items": { "type": "string" } }, "path": { "type": "string" } } }, "GitBranchRequest": { "type": "object", "required": [ "name", "path" ], "properties": { "name": { "type": "string" }, "path": { "type": "string" } } }, "GitCheckoutRequest": { "type": "object", "required": [ "branch", "path" ], "properties": { "branch": { "type": "string" }, "path": { "type": "string" } } }, "GitCloneRequest": { "type": "object", "required": [ "path", "url" ], "properties": { "branch": { "type": "string" }, "commit_id": { "type": "string" }, "password": { "type": "string" }, "path": { "type": "string" }, "url": { "type": "string" }, "username": { "type": "string" } } }, "GitCommitInfo": { "type": "object", "required": [ "author", "email", "hash", "message", "timestamp" ], "properties": { "author": { "type": "string" }, "email": { "type": "string" }, "hash": { "type": "string" }, "message": { "type": "string" }, "timestamp": { "type": "string" } } }, "GitCommitRequest": { "type": "object", "required": [ "author", "email", "message", "path" ], "properties": { "allow_empty": { "type": "boolean" }, "author": { "type": "string" }, "email": { "type": "string" }, "message": { "type": "string" }, "path": { "type": "string" } } }, "GitCommitResponse": { "type": "object", "required": [ "hash" ], "properties": { "hash": { "type": "string" } } }, "GitRepoRequest": { "type": "object", "required": [ "path" ], "properties": { "password": { "type": "string" }, "path": { "type": "string" }, "username": { "type": "string" } } }, "GitStatus": { "type": "object", "required": [ "currentBranch", "fileStatus" ], "properties": { "ahead": { "type": "integer" }, "behind": { "type": "integer" }, "branchPublished": { "type": "boolean" }, "currentBranch": { "type": "string" }, "fileStatus": { "type": "array", "items": { "$ref": "#/definitions/FileStatus" } } } }, "InitializeRequest": { "type": "object", "required": [ "token" ], "properties": { "token": { "type": "string" } } }, "InterpreterContext": { "type": "object", "required": [ "active", "createdAt", "cwd", "id", "language" ], "properties": { "active": { "type": "boolean" }, "createdAt": { "type": "string" }, "cwd": { "type": "string" }, "id": { "type": "string" }, "language": { "type": "string" } } }, "IsPortInUseResponse": { "type": "object", "properties": { "isInUse": { "type": "boolean" } } }, "KeyboardHotkeyRequest": { "type": "object", "properties": { "keys": { "description": "e.g., \"ctrl+c\", \"cmd+v\"", "type": "string" } } }, "KeyboardPressRequest": { "type": "object", "properties": { "key": { "type": "string" }, "modifiers": { "description": "ctrl, alt, shift, cmd", "type": "array", "items": { "type": "string" } } } }, "KeyboardTypeRequest": { "type": "object", "properties": { "delay": { "description": "milliseconds between keystrokes", "type": "integer" }, "text": { "type": "string" } } }, "ListBranchResponse": { "type": "object", "required": [ "branches" ], "properties": { "branches": { "type": "array", "items": { "type": "string" } } } }, "ListContextsResponse": { "type": "object", "required": [ "contexts" ], "properties": { "contexts": { "type": "array", "items": { "$ref": "#/definitions/InterpreterContext" } } } }, "ListRecordingsResponse": { "type": "object", "required": [ "recordings" ], "properties": { "recordings": { "type": "array", "items": { "$ref": "#/definitions/Recording" } } } }, "LspCompletionParams": { "type": "object", "required": [ "languageId", "pathToProject", "position", "uri" ], "properties": { "context": { "$ref": "#/definitions/CompletionContext" }, "languageId": { "type": "string" }, "pathToProject": { "type": "string" }, "position": { "$ref": "#/definitions/LspPosition" }, "uri": { "type": "string" } } }, "LspDocumentRequest": { "type": "object", "required": [ "languageId", "pathToProject", "uri" ], "properties": { "languageId": { "type": "string" }, "pathToProject": { "type": "string" }, "uri": { "type": "string" } } }, "LspLocation": { "type": "object", "required": [ "range", "uri" ], "properties": { "range": { "$ref": "#/definitions/LspRange" }, "uri": { "type": "string" } } }, "LspPosition": { "type": "object", "required": [ "character", "line" ], "properties": { "character": { "type": "integer" }, "line": { "type": "integer" } } }, "LspRange": { "type": "object", "required": [ "end", "start" ], "properties": { "end": { "$ref": "#/definitions/LspPosition" }, "start": { "$ref": "#/definitions/LspPosition" } } }, "LspServerRequest": { "type": "object", "required": [ "languageId", "pathToProject" ], "properties": { "languageId": { "type": "string" }, "pathToProject": { "type": "string" } } }, "LspSymbol": { "type": "object", "required": [ "kind", "location", "name" ], "properties": { "kind": { "type": "integer" }, "location": { "$ref": "#/definitions/LspLocation" }, "name": { "type": "string" } } }, "Match": { "type": "object", "required": [ "content", "file", "line" ], "properties": { "content": { "type": "string" }, "file": { "type": "string" }, "line": { "type": "integer" } } }, "MouseClickRequest": { "type": "object", "properties": { "button": { "description": "left, right, middle", "type": "string" }, "double": { "type": "boolean" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseClickResponse": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseDragRequest": { "type": "object", "properties": { "button": { "type": "string" }, "endX": { "type": "integer" }, "endY": { "type": "integer" }, "startX": { "type": "integer" }, "startY": { "type": "integer" } } }, "MouseDragResponse": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseMoveRequest": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MousePositionResponse": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseScrollRequest": { "type": "object", "properties": { "amount": { "type": "integer" }, "direction": { "description": "up, down", "type": "string" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "PortList": { "type": "object", "properties": { "ports": { "type": "array", "items": { "type": "integer" } } } }, "Position": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "ProcessErrorsResponse": { "type": "object", "properties": { "errors": { "type": "string" }, "processName": { "type": "string" } } }, "ProcessLogsResponse": { "type": "object", "properties": { "logs": { "type": "string" }, "processName": { "type": "string" } } }, "ProcessRestartResponse": { "type": "object", "properties": { "message": { "type": "string" }, "processName": { "type": "string" } } }, "ProcessStatus": { "type": "object", "properties": { "autoRestart": { "type": "boolean" }, "pid": { "type": "integer" }, "priority": { "type": "integer" }, "running": { "type": "boolean" } } }, "ProcessStatusResponse": { "type": "object", "properties": { "processName": { "type": "string" }, "running": { "type": "boolean" } } }, "PtyCreateRequest": { "type": "object", "properties": { "cols": { "type": "integer" }, "cwd": { "type": "string" }, "envs": { "type": "object", "additionalProperties": { "type": "string" } }, "id": { "type": "string" }, "lazyStart": { "description": "Don't start PTY until first client connects", "type": "boolean" }, "rows": { "type": "integer" } } }, "PtyCreateResponse": { "type": "object", "required": [ "sessionId" ], "properties": { "sessionId": { "type": "string" } } }, "PtyListResponse": { "type": "object", "required": [ "sessions" ], "properties": { "sessions": { "type": "array", "items": { "$ref": "#/definitions/PtySessionInfo" } } } }, "PtyResizeRequest": { "type": "object", "required": [ "cols", "rows" ], "properties": { "cols": { "type": "integer", "maximum": 1000, "minimum": 1 }, "rows": { "type": "integer", "maximum": 1000, "minimum": 1 } } }, "PtySessionInfo": { "type": "object", "required": [ "active", "cols", "createdAt", "cwd", "envs", "id", "lazyStart", "rows" ], "properties": { "active": { "type": "boolean" }, "cols": { "type": "integer" }, "createdAt": { "type": "string" }, "cwd": { "type": "string" }, "envs": { "type": "object", "additionalProperties": { "type": "string" } }, "id": { "type": "string" }, "lazyStart": { "description": "Whether this session uses lazy start", "type": "boolean" }, "rows": { "type": "integer" } } }, "Recording": { "type": "object", "required": [ "fileName", "filePath", "id", "startTime", "status" ], "properties": { "durationSeconds": { "type": "number" }, "endTime": { "type": "string" }, "fileName": { "type": "string" }, "filePath": { "type": "string" }, "id": { "type": "string" }, "sizeBytes": { "type": "integer" }, "startTime": { "type": "string" }, "status": { "type": "string" } } }, "ReplaceRequest": { "type": "object", "required": [ "files", "newValue", "pattern" ], "properties": { "files": { "type": "array", "items": { "type": "string" } }, "newValue": { "type": "string" }, "pattern": { "type": "string" } } }, "ReplaceResult": { "type": "object", "properties": { "error": { "type": "string" }, "file": { "type": "string" }, "success": { "type": "boolean" } } }, "ScreenshotResponse": { "type": "object", "properties": { "cursorPosition": { "$ref": "#/definitions/Position" }, "screenshot": { "type": "string" }, "sizeBytes": { "type": "integer" } } }, "ScrollResponse": { "type": "object", "properties": { "success": { "type": "boolean" } } }, "SearchFilesResponse": { "type": "object", "required": [ "files" ], "properties": { "files": { "type": "array", "items": { "type": "string" } } } }, "Session": { "type": "object", "required": [ "commands", "sessionId" ], "properties": { "commands": { "type": "array", "items": { "$ref": "#/definitions/Command" } }, "sessionId": { "type": "string" } } }, "SessionExecuteRequest": { "type": "object", "required": [ "command" ], "properties": { "async": { "type": "boolean" }, "command": { "type": "string" }, "runAsync": { "type": "boolean" }, "suppressInputEcho": { "type": "boolean" } } }, "SessionExecuteResponse": { "type": "object", "required": [ "cmdId" ], "properties": { "cmdId": { "type": "string" }, "exitCode": { "type": "integer" }, "output": { "type": "string" }, "stderr": { "type": "string" }, "stdout": { "type": "string" } } }, "SessionSendInputRequest": { "type": "object", "required": [ "data" ], "properties": { "data": { "type": "string" } } }, "StartRecordingRequest": { "type": "object", "properties": { "label": { "type": "string" } } }, "Status": { "type": "string", "enum": [ "Unmodified", "Untracked", "Modified", "Added", "Deleted", "Renamed", "Copied", "Updated but unmerged" ], "x-enum-varnames": [ "Unmodified", "Untracked", "Modified", "Added", "Deleted", "Renamed", "Copied", "UpdatedButUnmerged" ] }, "StopRecordingRequest": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string" } } }, "UserHomeDirResponse": { "type": "object", "required": [ "dir" ], "properties": { "dir": { "type": "string" } } }, "WindowInfo": { "type": "object", "properties": { "height": { "type": "integer" }, "id": { "type": "integer" }, "isActive": { "type": "boolean" }, "title": { "type": "string" }, "width": { "type": "integer" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "WindowsResponse": { "type": "object", "properties": { "windows": { "type": "array", "items": { "$ref": "#/definitions/WindowInfo" } } } }, "WorkDirResponse": { "type": "object", "required": [ "dir" ], "properties": { "dir": { "type": "string" } } }, "gin.H": { "type": "object", "additionalProperties": {} }, "git.GitDeleteBranchRequest": { "type": "object", "required": [ "name", "path" ], "properties": { "name": { "type": "string" }, "path": { "type": "string" } } } } }` // SwaggerInfo holds exported Swagger Info so clients can modify it var SwaggerInfo = &swag.Spec{ Version: "v0.0.0-dev", Host: "", BasePath: "", Schemes: []string{}, Title: "Daytona Toolbox API", Description: "Daytona Toolbox API", InfoInstanceName: "swagger", SwaggerTemplate: docTemplate, LeftDelim: "{{", RightDelim: "}}", } func init() { swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) } ================================================ FILE: apps/daemon/pkg/toolbox/docs/swagger.json ================================================ { "swagger": "2.0", "info": { "description": "Daytona Toolbox API", "title": "Daytona Toolbox API", "contact": {}, "version": "v0.0.0-dev" }, "paths": { "/computeruse/display/info": { "get": { "description": "Get information about all available displays", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get display information", "operationId": "GetDisplayInfo", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/DisplayInfoResponse" } } } } }, "/computeruse/display/windows": { "get": { "description": "Get information about all open windows", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get windows information", "operationId": "GetWindows", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/WindowsResponse" } } } } }, "/computeruse/keyboard/hotkey": { "post": { "description": "Press a hotkey combination (e.g., ctrl+c, cmd+v)", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Press hotkey", "operationId": "PressHotkey", "parameters": [ { "description": "Hotkey press request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/KeyboardHotkeyRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Empty" } } } } }, "/computeruse/keyboard/key": { "post": { "description": "Press a key with optional modifiers", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Press key", "operationId": "PressKey", "parameters": [ { "description": "Key press request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/KeyboardPressRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Empty" } } } } }, "/computeruse/keyboard/type": { "post": { "description": "Type text with optional delay between keystrokes", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Type text", "operationId": "TypeText", "parameters": [ { "description": "Text typing request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/KeyboardTypeRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Empty" } } } } }, "/computeruse/mouse/click": { "post": { "description": "Click the mouse button at the specified coordinates", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Click mouse button", "operationId": "Click", "parameters": [ { "description": "Mouse click request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseClickRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MouseClickResponse" } } } } }, "/computeruse/mouse/drag": { "post": { "description": "Drag the mouse from start to end coordinates", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Drag mouse", "operationId": "Drag", "parameters": [ { "description": "Mouse drag request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseDragRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MouseDragResponse" } } } } }, "/computeruse/mouse/move": { "post": { "description": "Move the mouse cursor to the specified coordinates", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Move mouse cursor", "operationId": "MoveMouse", "parameters": [ { "description": "Mouse move request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseMoveRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MousePositionResponse" } } } } }, "/computeruse/mouse/position": { "get": { "description": "Get the current mouse cursor position", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get mouse position", "operationId": "GetMousePosition", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/MousePositionResponse" } } } } }, "/computeruse/mouse/scroll": { "post": { "description": "Scroll the mouse wheel at the specified coordinates", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Scroll mouse wheel", "operationId": "Scroll", "parameters": [ { "description": "Mouse scroll request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/MouseScrollRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScrollResponse" } } } } }, "/computeruse/process-status": { "get": { "description": "Get the status of all computer use processes", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get computer use process status", "operationId": "GetComputerUseStatus", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStatusResponse" } } } } }, "/computeruse/process/{processName}/errors": { "get": { "description": "Get errors for a specific computer use process", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get process errors", "operationId": "GetProcessErrors", "parameters": [ { "type": "string", "description": "Process name to get errors for", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessErrorsResponse" } } } } }, "/computeruse/process/{processName}/logs": { "get": { "description": "Get logs for a specific computer use process", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get process logs", "operationId": "GetProcessLogs", "parameters": [ { "type": "string", "description": "Process name to get logs for", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessLogsResponse" } } } } }, "/computeruse/process/{processName}/restart": { "post": { "description": "Restart a specific computer use process", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Restart specific process", "operationId": "RestartProcess", "parameters": [ { "type": "string", "description": "Process name to restart", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessRestartResponse" } } } } }, "/computeruse/process/{processName}/status": { "get": { "description": "Check if a specific computer use process is running", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get specific process status", "operationId": "GetProcessStatus", "parameters": [ { "type": "string", "description": "Process name to check", "name": "processName", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ProcessStatusResponse" } } } } }, "/computeruse/recordings": { "get": { "description": "Get a list of all recordings (active and completed)", "produces": ["application/json"], "tags": ["computer-use"], "summary": "List all recordings", "operationId": "ListRecordings", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ListRecordingsResponse" } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/start": { "post": { "description": "Start a new screen recording session", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Start a new recording", "operationId": "StartRecording", "parameters": [ { "description": "Recording options", "name": "request", "in": "body", "schema": { "$ref": "#/definitions/StartRecordingRequest" } } ], "responses": { "201": { "description": "Created", "schema": { "$ref": "#/definitions/Recording" } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/stop": { "post": { "description": "Stop an active screen recording session", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["computer-use"], "summary": "Stop a recording", "operationId": "StopRecording", "parameters": [ { "description": "Recording ID to stop", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/StopRecordingRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Recording" } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/{id}": { "get": { "description": "Get details of a specific recording by ID", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get recording details", "operationId": "GetRecording", "parameters": [ { "type": "string", "description": "Recording ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Recording" } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } }, "delete": { "description": "Delete a recording file by ID", "tags": ["computer-use"], "summary": "Delete a recording", "operationId": "DeleteRecording", "parameters": [ { "type": "string", "description": "Recording ID", "name": "id", "in": "path", "required": true } ], "responses": { "204": { "description": "No Content" }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/recordings/{id}/download": { "get": { "description": "Download a recording by providing its ID", "produces": ["application/octet-stream"], "tags": ["computer-use"], "summary": "Download a recording", "operationId": "DownloadRecording", "parameters": [ { "type": "string", "description": "Recording ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "file" } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/computeruse/screenshot": { "get": { "description": "Take a screenshot of the entire screen", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Take a screenshot", "operationId": "TakeScreenshot", "parameters": [ { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/screenshot/compressed": { "get": { "description": "Take a compressed screenshot of the entire screen", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Take a compressed screenshot", "operationId": "TakeCompressedScreenshot", "parameters": [ { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" }, { "type": "string", "description": "Image format (png or jpeg)", "name": "format", "in": "query" }, { "type": "integer", "description": "JPEG quality (1-100)", "name": "quality", "in": "query" }, { "type": "number", "description": "Scale factor (0.1-1.0)", "name": "scale", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/screenshot/region": { "get": { "description": "Take a screenshot of a specific region of the screen", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Take a region screenshot", "operationId": "TakeRegionScreenshot", "parameters": [ { "type": "integer", "description": "X coordinate of the region", "name": "x", "in": "query", "required": true }, { "type": "integer", "description": "Y coordinate of the region", "name": "y", "in": "query", "required": true }, { "type": "integer", "description": "Width of the region", "name": "width", "in": "query", "required": true }, { "type": "integer", "description": "Height of the region", "name": "height", "in": "query", "required": true }, { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/screenshot/region/compressed": { "get": { "description": "Take a compressed screenshot of a specific region of the screen", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Take a compressed region screenshot", "operationId": "TakeCompressedRegionScreenshot", "parameters": [ { "type": "integer", "description": "X coordinate of the region", "name": "x", "in": "query", "required": true }, { "type": "integer", "description": "Y coordinate of the region", "name": "y", "in": "query", "required": true }, { "type": "integer", "description": "Width of the region", "name": "width", "in": "query", "required": true }, { "type": "integer", "description": "Height of the region", "name": "height", "in": "query", "required": true }, { "type": "boolean", "description": "Whether to show cursor in screenshot", "name": "showCursor", "in": "query" }, { "type": "string", "description": "Image format (png or jpeg)", "name": "format", "in": "query" }, { "type": "integer", "description": "JPEG quality (1-100)", "name": "quality", "in": "query" }, { "type": "number", "description": "Scale factor (0.1-1.0)", "name": "scale", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ScreenshotResponse" } } } } }, "/computeruse/start": { "post": { "description": "Start all computer use processes and return their status", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Start computer use processes", "operationId": "StartComputerUse", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStartResponse" } } } } }, "/computeruse/status": { "get": { "description": "Get the current status of the computer use system", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Get computer use status", "operationId": "GetComputerUseSystemStatus", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStatusResponse" } } } } }, "/computeruse/stop": { "post": { "description": "Stop all computer use processes and return their status", "produces": ["application/json"], "tags": ["computer-use"], "summary": "Stop computer use processes", "operationId": "StopComputerUse", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ComputerUseStopResponse" } } } } }, "/files": { "get": { "description": "List files and directories in the specified path", "produces": ["application/json"], "tags": ["file-system"], "summary": "List files and directories", "operationId": "ListFiles", "parameters": [ { "type": "string", "description": "Directory path to list (defaults to working directory)", "name": "path", "in": "query" } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/FileInfo" } } } } }, "delete": { "description": "Delete a file or directory at the specified path", "tags": ["file-system"], "summary": "Delete a file or directory", "operationId": "DeleteFile", "parameters": [ { "type": "string", "description": "File or directory path to delete", "name": "path", "in": "query", "required": true }, { "type": "boolean", "description": "Enable recursive deletion for directories", "name": "recursive", "in": "query" } ], "responses": { "204": { "description": "No Content" } } } }, "/files/bulk-download": { "post": { "description": "Download multiple files by providing their paths", "consumes": ["application/json"], "produces": ["multipart/form-data"], "tags": ["file-system"], "summary": "Download multiple files", "operationId": "DownloadFiles", "parameters": [ { "description": "Paths of files to download", "name": "downloadFiles", "in": "body", "required": true, "schema": { "$ref": "#/definitions/FilesDownloadRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/gin.H" } } } } }, "/files/bulk-upload": { "post": { "description": "Upload multiple files with their destination paths", "consumes": ["multipart/form-data"], "tags": ["file-system"], "summary": "Upload multiple files", "operationId": "UploadFiles", "responses": { "200": { "description": "OK" } } } }, "/files/download": { "get": { "description": "Download a file by providing its path", "produces": ["application/octet-stream"], "tags": ["file-system"], "summary": "Download a file", "operationId": "DownloadFile", "parameters": [ { "type": "string", "description": "File path to download", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "file" } } } } }, "/files/find": { "get": { "description": "Search for text pattern within files in a directory", "produces": ["application/json"], "tags": ["file-system"], "summary": "Find text in files", "operationId": "FindInFiles", "parameters": [ { "type": "string", "description": "Directory path to search in", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "Text pattern to search for", "name": "pattern", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/Match" } } } } } }, "/files/folder": { "post": { "description": "Create a folder with the specified path and optional permissions", "consumes": ["application/json"], "tags": ["file-system"], "summary": "Create a folder", "operationId": "CreateFolder", "parameters": [ { "type": "string", "description": "Folder path to create", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "Octal permission mode (default: 0755)", "name": "mode", "in": "query", "required": true } ], "responses": { "201": { "description": "Created" } } } }, "/files/info": { "get": { "description": "Get detailed information about a file or directory", "produces": ["application/json"], "tags": ["file-system"], "summary": "Get file information", "operationId": "GetFileInfo", "parameters": [ { "type": "string", "description": "File or directory path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/FileInfo" } } } } }, "/files/move": { "post": { "description": "Move or rename a file or directory from source to destination", "tags": ["file-system"], "summary": "Move or rename file/directory", "operationId": "MoveFile", "parameters": [ { "type": "string", "description": "Source file or directory path", "name": "source", "in": "query", "required": true }, { "type": "string", "description": "Destination file or directory path", "name": "destination", "in": "query", "required": true } ], "responses": { "200": { "description": "OK" } } } }, "/files/permissions": { "post": { "description": "Set file permissions, ownership, and group for a file or directory", "tags": ["file-system"], "summary": "Set file permissions", "operationId": "SetFilePermissions", "parameters": [ { "type": "string", "description": "File or directory path", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "Owner (username or UID)", "name": "owner", "in": "query" }, { "type": "string", "description": "Group (group name or GID)", "name": "group", "in": "query" }, { "type": "string", "description": "File mode in octal format (e.g., 0755)", "name": "mode", "in": "query" } ], "responses": { "200": { "description": "OK" } } } }, "/files/replace": { "post": { "description": "Replace text pattern with new value in multiple files", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["file-system"], "summary": "Replace text in files", "operationId": "ReplaceInFiles", "parameters": [ { "description": "Replace request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/ReplaceRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/ReplaceResult" } } } } } }, "/files/search": { "get": { "description": "Search for files matching a specific pattern in a directory", "produces": ["application/json"], "tags": ["file-system"], "summary": "Search files by pattern", "operationId": "SearchFiles", "parameters": [ { "type": "string", "description": "Directory path to search in", "name": "path", "in": "query", "required": true }, { "type": "string", "description": "File pattern to match (e.g., *.txt, *.go)", "name": "pattern", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/SearchFilesResponse" } } } } }, "/files/upload": { "post": { "description": "Upload a file to the specified path", "consumes": ["multipart/form-data"], "tags": ["file-system"], "summary": "Upload a file", "operationId": "UploadFile", "parameters": [ { "type": "string", "description": "Destination path for the uploaded file", "name": "path", "in": "query", "required": true }, { "type": "file", "description": "File to upload", "name": "file", "in": "formData", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/gin.H" } } } } }, "/git/add": { "post": { "description": "Add files to the Git staging area", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Add files to Git staging", "operationId": "AddFiles", "parameters": [ { "description": "Add files request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitAddRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/branches": { "get": { "description": "Get a list of all branches in the Git repository", "produces": ["application/json"], "tags": ["git"], "summary": "List branches", "operationId": "ListBranches", "parameters": [ { "type": "string", "description": "Repository path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ListBranchResponse" } } } }, "post": { "description": "Create a new branch in the Git repository", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Create a new branch", "operationId": "CreateBranch", "parameters": [ { "description": "Create branch request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitBranchRequest" } } ], "responses": { "201": { "description": "Created" } } }, "delete": { "description": "Delete a branch from the Git repository", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Delete a branch", "operationId": "DeleteBranch", "parameters": [ { "description": "Delete branch request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/git.GitDeleteBranchRequest" } } ], "responses": { "204": { "description": "No Content" } } } }, "/git/checkout": { "post": { "description": "Switch to a different branch or commit in the Git repository", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Checkout branch or commit", "operationId": "CheckoutBranch", "parameters": [ { "description": "Checkout request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitCheckoutRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/clone": { "post": { "description": "Clone a Git repository to the specified path", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Clone a Git repository", "operationId": "CloneRepository", "parameters": [ { "description": "Clone repository request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitCloneRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/commit": { "post": { "description": "Commit staged changes to the Git repository", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Commit changes", "operationId": "CommitChanges", "parameters": [ { "description": "Commit request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitCommitRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/GitCommitResponse" } } } } }, "/git/history": { "get": { "description": "Get the commit history of the Git repository", "produces": ["application/json"], "tags": ["git"], "summary": "Get commit history", "operationId": "GetCommitHistory", "parameters": [ { "type": "string", "description": "Repository path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/GitCommitInfo" } } } } } }, "/git/pull": { "post": { "description": "Pull changes from the remote Git repository", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Pull changes from remote", "operationId": "PullChanges", "parameters": [ { "description": "Pull request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitRepoRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/push": { "post": { "description": "Push local changes to the remote Git repository", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["git"], "summary": "Push changes to remote", "operationId": "PushChanges", "parameters": [ { "description": "Push request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/GitRepoRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/git/status": { "get": { "description": "Get the Git status of the repository at the specified path", "produces": ["application/json"], "tags": ["git"], "summary": "Get Git status", "operationId": "GetStatus", "parameters": [ { "type": "string", "description": "Repository path", "name": "path", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/GitStatus" } } } } }, "/init": { "post": { "description": "Set the auth token and initialize telemetry for the toolbox server", "produces": ["application/json"], "tags": ["server"], "summary": "Initialize toolbox server", "operationId": "Initialize", "parameters": [ { "description": "Initialization request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/InitializeRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/lsp/completions": { "post": { "description": "Get code completion suggestions from the LSP server", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["lsp"], "summary": "Get code completions", "operationId": "Completions", "parameters": [ { "description": "Completion request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspCompletionParams" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/CompletionList" } } } } }, "/lsp/did-close": { "post": { "description": "Notify the LSP server that a document has been closed", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["lsp"], "summary": "Notify document closed", "operationId": "DidClose", "parameters": [ { "description": "Document request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspDocumentRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/did-open": { "post": { "description": "Notify the LSP server that a document has been opened", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["lsp"], "summary": "Notify document opened", "operationId": "DidOpen", "parameters": [ { "description": "Document request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspDocumentRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/document-symbols": { "get": { "description": "Get symbols (functions, classes, etc.) from a document", "produces": ["application/json"], "tags": ["lsp"], "summary": "Get document symbols", "operationId": "DocumentSymbols", "parameters": [ { "type": "string", "description": "Language ID (e.g., python, typescript)", "name": "languageId", "in": "query", "required": true }, { "type": "string", "description": "Path to project", "name": "pathToProject", "in": "query", "required": true }, { "type": "string", "description": "Document URI", "name": "uri", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/LspSymbol" } } } } } }, "/lsp/start": { "post": { "description": "Start a Language Server Protocol server for the specified language", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["lsp"], "summary": "Start LSP server", "operationId": "Start", "parameters": [ { "description": "LSP server request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspServerRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/stop": { "post": { "description": "Stop a Language Server Protocol server", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["lsp"], "summary": "Stop LSP server", "operationId": "Stop", "parameters": [ { "description": "LSP server request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/LspServerRequest" } } ], "responses": { "200": { "description": "OK" } } } }, "/lsp/workspacesymbols": { "get": { "description": "Search for symbols across the entire workspace", "produces": ["application/json"], "tags": ["lsp"], "summary": "Get workspace symbols", "operationId": "WorkspaceSymbols", "parameters": [ { "type": "string", "description": "Search query", "name": "query", "in": "query", "required": true }, { "type": "string", "description": "Language ID (e.g., python, typescript)", "name": "languageId", "in": "query", "required": true }, { "type": "string", "description": "Path to project", "name": "pathToProject", "in": "query", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/LspSymbol" } } } } } }, "/port": { "get": { "description": "Get a list of all currently active ports", "produces": ["application/json"], "tags": ["port"], "summary": "Get active ports", "operationId": "GetPorts", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PortList" } } } } }, "/port/{port}/in-use": { "get": { "description": "Check if a specific port is currently in use", "produces": ["application/json"], "tags": ["port"], "summary": "Check if port is in use", "operationId": "IsPortInUse", "parameters": [ { "type": "integer", "description": "Port number (3000-9999)", "name": "port", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/IsPortInUseResponse" } } } } }, "/process/execute": { "post": { "description": "Execute a shell command and return the output and exit code", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["process"], "summary": "Execute a command", "operationId": "ExecuteCommand", "parameters": [ { "description": "Command execution request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/ExecuteRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ExecuteResponse" } } } } }, "/process/interpreter/context": { "get": { "description": "Returns information about all user-created interpreter contexts (excludes default context)", "produces": ["application/json"], "tags": ["interpreter"], "summary": "List all user-created interpreter contexts", "operationId": "ListInterpreterContexts", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/ListContextsResponse" } } } }, "post": { "description": "Creates a new isolated interpreter context with optional working directory and language", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["interpreter"], "summary": "Create a new interpreter context", "operationId": "CreateInterpreterContext", "parameters": [ { "description": "Context configuration", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/CreateContextRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/InterpreterContext" } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "500": { "description": "Internal Server Error", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/process/interpreter/context/{id}": { "delete": { "description": "Deletes an interpreter context and shuts down its worker process", "produces": ["application/json"], "tags": ["interpreter"], "summary": "Delete an interpreter context", "operationId": "DeleteInterpreterContext", "parameters": [ { "type": "string", "description": "Context ID", "name": "id", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "400": { "description": "Bad Request", "schema": { "type": "object", "additionalProperties": { "type": "string" } } }, "404": { "description": "Not Found", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/process/interpreter/execute": { "get": { "description": "Executes code in a specified context (or default context if not specified) via WebSocket streaming", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["interpreter"], "summary": "Execute code in an interpreter context", "operationId": "ExecuteInterpreterCode", "responses": { "101": { "description": "Switching Protocols", "schema": { "type": "string" }, "headers": { "Connection": { "type": "string", "description": "Upgrade" }, "Upgrade": { "type": "string", "description": "websocket" } } } } } }, "/process/pty": { "get": { "description": "Get a list of all active pseudo-terminal sessions", "produces": ["application/json"], "tags": ["process"], "summary": "List all PTY sessions", "operationId": "ListPtySessions", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PtyListResponse" } } } }, "post": { "description": "Create a new pseudo-terminal session with specified configuration", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["process"], "summary": "Create a new PTY session", "operationId": "CreatePtySession", "parameters": [ { "description": "PTY session creation request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/PtyCreateRequest" } } ], "responses": { "201": { "description": "Created", "schema": { "$ref": "#/definitions/PtyCreateResponse" } } } } }, "/process/pty/{sessionId}": { "get": { "description": "Get detailed information about a specific pseudo-terminal session", "produces": ["application/json"], "tags": ["process"], "summary": "Get PTY session information", "operationId": "GetPtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PtySessionInfo" } } } }, "delete": { "description": "Delete a pseudo-terminal session and terminate its process", "produces": ["application/json"], "tags": ["process"], "summary": "Delete a PTY session", "operationId": "DeletePtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/gin.H" } } } } }, "/process/pty/{sessionId}/connect": { "get": { "description": "Establish a WebSocket connection to interact with a pseudo-terminal session", "tags": ["process"], "summary": "Connect to PTY session via WebSocket", "operationId": "ConnectPtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "101": { "description": "Switching Protocols - WebSocket connection established" } } } }, "/process/pty/{sessionId}/resize": { "post": { "description": "Resize the terminal dimensions of a pseudo-terminal session", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["process"], "summary": "Resize a PTY session", "operationId": "ResizePtySession", "parameters": [ { "type": "string", "description": "PTY session ID", "name": "sessionId", "in": "path", "required": true }, { "description": "Resize request with new dimensions", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/PtyResizeRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/PtySessionInfo" } } } } }, "/process/session": { "get": { "description": "Get a list of all active shell sessions", "produces": ["application/json"], "tags": ["process"], "summary": "List all sessions", "operationId": "ListSessions", "responses": { "200": { "description": "OK", "schema": { "type": "array", "items": { "$ref": "#/definitions/Session" } } } } }, "post": { "description": "Create a new shell session for command execution", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["process"], "summary": "Create a new session", "operationId": "CreateSession", "parameters": [ { "description": "Session creation request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/CreateSessionRequest" } } ], "responses": { "201": { "description": "Created" } } } }, "/process/session/entrypoint": { "get": { "description": "Get details of an entrypoint session including its commands", "produces": ["application/json"], "tags": ["process"], "summary": "Get entrypoint session details", "operationId": "GetEntrypointSession", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Session" } } } } }, "/process/session/entrypoint/logs": { "get": { "description": "Get logs for a sandbox entrypoint session. Supports both HTTP and WebSocket streaming.", "produces": ["text/plain"], "tags": ["process"], "summary": "Get entrypoint logs", "operationId": "GetEntrypointLogs", "parameters": [ { "type": "boolean", "description": "Follow logs in real-time (WebSocket only)", "name": "follow", "in": "query" } ], "responses": { "200": { "description": "Entrypoint log content", "schema": { "type": "string" } } } } }, "/process/session/{sessionId}": { "get": { "description": "Get details of a specific session including its commands", "produces": ["application/json"], "tags": ["process"], "summary": "Get session details", "operationId": "GetSession", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Session" } } } }, "delete": { "description": "Delete an existing shell session", "tags": ["process"], "summary": "Delete a session", "operationId": "DeleteSession", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true } ], "responses": { "204": { "description": "No Content" } } } }, "/process/session/{sessionId}/command/{commandId}": { "get": { "description": "Get details of a specific command within a session", "produces": ["application/json"], "tags": ["process"], "summary": "Get session command details", "operationId": "GetSessionCommand", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "type": "string", "description": "Command ID", "name": "commandId", "in": "path", "required": true } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/Command" } } } } }, "/process/session/{sessionId}/command/{commandId}/input": { "post": { "description": "Send input data to a running command in a session for interactive execution", "consumes": ["application/json"], "tags": ["process"], "summary": "Send input to command", "operationId": "SendInput", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "type": "string", "description": "Command ID", "name": "commandId", "in": "path", "required": true }, { "description": "Input send request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/SessionSendInputRequest" } } ], "responses": { "204": { "description": "No Content" } } } }, "/process/session/{sessionId}/command/{commandId}/logs": { "get": { "description": "Get logs for a specific command within a session. Supports both HTTP and WebSocket streaming.", "produces": ["text/plain"], "tags": ["process"], "summary": "Get session command logs", "operationId": "GetSessionCommandLogs", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "type": "string", "description": "Command ID", "name": "commandId", "in": "path", "required": true }, { "type": "boolean", "description": "Follow logs in real-time (WebSocket only)", "name": "follow", "in": "query" } ], "responses": { "200": { "description": "Log content", "schema": { "type": "string" } } } } }, "/process/session/{sessionId}/exec": { "post": { "description": "Execute a command within an existing shell session", "consumes": ["application/json"], "produces": ["application/json"], "tags": ["process"], "summary": "Execute command in session", "operationId": "SessionExecuteCommand", "parameters": [ { "type": "string", "description": "Session ID", "name": "sessionId", "in": "path", "required": true }, { "description": "Command execution request", "name": "request", "in": "body", "required": true, "schema": { "$ref": "#/definitions/SessionExecuteRequest" } } ], "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/SessionExecuteResponse" } }, "202": { "description": "Accepted", "schema": { "$ref": "#/definitions/SessionExecuteResponse" } } } } }, "/user-home-dir": { "get": { "description": "Get the current user home directory path.", "produces": ["application/json"], "tags": ["info"], "summary": "Get user home directory", "operationId": "GetUserHomeDir", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/UserHomeDirResponse" } } } } }, "/version": { "get": { "description": "Get the current daemon version", "produces": ["application/json"], "tags": ["info"], "summary": "Get version", "operationId": "GetVersion", "responses": { "200": { "description": "OK", "schema": { "type": "object", "additionalProperties": { "type": "string" } } } } } }, "/work-dir": { "get": { "description": "Get the current working directory path. This is default directory used for running commands.", "produces": ["application/json"], "tags": ["info"], "summary": "Get working directory", "operationId": "GetWorkDir", "responses": { "200": { "description": "OK", "schema": { "$ref": "#/definitions/WorkDirResponse" } } } } } }, "definitions": { "Command": { "type": "object", "required": ["command", "id"], "properties": { "command": { "type": "string" }, "exitCode": { "type": "integer" }, "id": { "type": "string" } } }, "CompletionContext": { "type": "object", "required": ["triggerKind"], "properties": { "triggerCharacter": { "type": "string" }, "triggerKind": { "type": "integer" } } }, "CompletionItem": { "type": "object", "required": ["label"], "properties": { "detail": { "type": "string" }, "documentation": {}, "filterText": { "type": "string" }, "insertText": { "type": "string" }, "kind": { "type": "integer" }, "label": { "type": "string" }, "sortText": { "type": "string" } } }, "CompletionList": { "type": "object", "required": ["isIncomplete", "items"], "properties": { "isIncomplete": { "type": "boolean" }, "items": { "type": "array", "items": { "$ref": "#/definitions/CompletionItem" } } } }, "ComputerUseStartResponse": { "type": "object", "properties": { "message": { "type": "string" }, "status": { "type": "object", "additionalProperties": { "$ref": "#/definitions/ProcessStatus" } } } }, "ComputerUseStatusResponse": { "type": "object", "properties": { "status": { "type": "string" } } }, "ComputerUseStopResponse": { "type": "object", "properties": { "message": { "type": "string" }, "status": { "type": "object", "additionalProperties": { "$ref": "#/definitions/ProcessStatus" } } } }, "CreateContextRequest": { "type": "object", "properties": { "cwd": { "type": "string" }, "language": { "type": "string" } } }, "CreateSessionRequest": { "type": "object", "required": ["sessionId"], "properties": { "sessionId": { "type": "string" } } }, "DisplayInfo": { "type": "object", "properties": { "height": { "type": "integer" }, "id": { "type": "integer" }, "isActive": { "type": "boolean" }, "width": { "type": "integer" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "DisplayInfoResponse": { "type": "object", "properties": { "displays": { "type": "array", "items": { "$ref": "#/definitions/DisplayInfo" } } } }, "Empty": { "type": "object" }, "ExecuteRequest": { "type": "object", "required": ["command"], "properties": { "command": { "type": "string" }, "cwd": { "description": "Current working directory", "type": "string" }, "timeout": { "description": "Timeout in seconds, defaults to 10 seconds", "type": "integer" } } }, "ExecuteResponse": { "type": "object", "required": ["result"], "properties": { "exitCode": { "type": "integer" }, "result": { "type": "string" } } }, "FileInfo": { "type": "object", "required": ["group", "isDir", "modTime", "mode", "name", "owner", "permissions", "size"], "properties": { "group": { "type": "string" }, "isDir": { "type": "boolean" }, "modTime": { "type": "string" }, "mode": { "type": "string" }, "name": { "type": "string" }, "owner": { "type": "string" }, "permissions": { "type": "string" }, "size": { "type": "integer" } } }, "FileStatus": { "type": "object", "required": ["extra", "name", "staging", "worktree"], "properties": { "extra": { "type": "string" }, "name": { "type": "string" }, "staging": { "$ref": "#/definitions/Status" }, "worktree": { "$ref": "#/definitions/Status" } } }, "FilesDownloadRequest": { "type": "object", "required": ["paths"], "properties": { "paths": { "type": "array", "items": { "type": "string" } } } }, "GitAddRequest": { "type": "object", "required": ["files", "path"], "properties": { "files": { "description": "files to add (use . for all files)", "type": "array", "items": { "type": "string" } }, "path": { "type": "string" } } }, "GitBranchRequest": { "type": "object", "required": ["name", "path"], "properties": { "name": { "type": "string" }, "path": { "type": "string" } } }, "GitCheckoutRequest": { "type": "object", "required": ["branch", "path"], "properties": { "branch": { "type": "string" }, "path": { "type": "string" } } }, "GitCloneRequest": { "type": "object", "required": ["path", "url"], "properties": { "branch": { "type": "string" }, "commit_id": { "type": "string" }, "password": { "type": "string" }, "path": { "type": "string" }, "url": { "type": "string" }, "username": { "type": "string" } } }, "GitCommitInfo": { "type": "object", "required": ["author", "email", "hash", "message", "timestamp"], "properties": { "author": { "type": "string" }, "email": { "type": "string" }, "hash": { "type": "string" }, "message": { "type": "string" }, "timestamp": { "type": "string" } } }, "GitCommitRequest": { "type": "object", "required": ["author", "email", "message", "path"], "properties": { "allow_empty": { "type": "boolean" }, "author": { "type": "string" }, "email": { "type": "string" }, "message": { "type": "string" }, "path": { "type": "string" } } }, "GitCommitResponse": { "type": "object", "required": ["hash"], "properties": { "hash": { "type": "string" } } }, "GitRepoRequest": { "type": "object", "required": ["path"], "properties": { "password": { "type": "string" }, "path": { "type": "string" }, "username": { "type": "string" } } }, "GitStatus": { "type": "object", "required": ["currentBranch", "fileStatus"], "properties": { "ahead": { "type": "integer" }, "behind": { "type": "integer" }, "branchPublished": { "type": "boolean" }, "currentBranch": { "type": "string" }, "fileStatus": { "type": "array", "items": { "$ref": "#/definitions/FileStatus" } } } }, "InitializeRequest": { "type": "object", "required": ["token"], "properties": { "token": { "type": "string" } } }, "InterpreterContext": { "type": "object", "required": ["active", "createdAt", "cwd", "id", "language"], "properties": { "active": { "type": "boolean" }, "createdAt": { "type": "string" }, "cwd": { "type": "string" }, "id": { "type": "string" }, "language": { "type": "string" } } }, "IsPortInUseResponse": { "type": "object", "properties": { "isInUse": { "type": "boolean" } } }, "KeyboardHotkeyRequest": { "type": "object", "properties": { "keys": { "description": "e.g., \"ctrl+c\", \"cmd+v\"", "type": "string" } } }, "KeyboardPressRequest": { "type": "object", "properties": { "key": { "type": "string" }, "modifiers": { "description": "ctrl, alt, shift, cmd", "type": "array", "items": { "type": "string" } } } }, "KeyboardTypeRequest": { "type": "object", "properties": { "delay": { "description": "milliseconds between keystrokes", "type": "integer" }, "text": { "type": "string" } } }, "ListBranchResponse": { "type": "object", "required": ["branches"], "properties": { "branches": { "type": "array", "items": { "type": "string" } } } }, "ListContextsResponse": { "type": "object", "required": ["contexts"], "properties": { "contexts": { "type": "array", "items": { "$ref": "#/definitions/InterpreterContext" } } } }, "ListRecordingsResponse": { "type": "object", "required": ["recordings"], "properties": { "recordings": { "type": "array", "items": { "$ref": "#/definitions/Recording" } } } }, "LspCompletionParams": { "type": "object", "required": ["languageId", "pathToProject", "position", "uri"], "properties": { "context": { "$ref": "#/definitions/CompletionContext" }, "languageId": { "type": "string" }, "pathToProject": { "type": "string" }, "position": { "$ref": "#/definitions/LspPosition" }, "uri": { "type": "string" } } }, "LspDocumentRequest": { "type": "object", "required": ["languageId", "pathToProject", "uri"], "properties": { "languageId": { "type": "string" }, "pathToProject": { "type": "string" }, "uri": { "type": "string" } } }, "LspLocation": { "type": "object", "required": ["range", "uri"], "properties": { "range": { "$ref": "#/definitions/LspRange" }, "uri": { "type": "string" } } }, "LspPosition": { "type": "object", "required": ["character", "line"], "properties": { "character": { "type": "integer" }, "line": { "type": "integer" } } }, "LspRange": { "type": "object", "required": ["end", "start"], "properties": { "end": { "$ref": "#/definitions/LspPosition" }, "start": { "$ref": "#/definitions/LspPosition" } } }, "LspServerRequest": { "type": "object", "required": ["languageId", "pathToProject"], "properties": { "languageId": { "type": "string" }, "pathToProject": { "type": "string" } } }, "LspSymbol": { "type": "object", "required": ["kind", "location", "name"], "properties": { "kind": { "type": "integer" }, "location": { "$ref": "#/definitions/LspLocation" }, "name": { "type": "string" } } }, "Match": { "type": "object", "required": ["content", "file", "line"], "properties": { "content": { "type": "string" }, "file": { "type": "string" }, "line": { "type": "integer" } } }, "MouseClickRequest": { "type": "object", "properties": { "button": { "description": "left, right, middle", "type": "string" }, "double": { "type": "boolean" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseClickResponse": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseDragRequest": { "type": "object", "properties": { "button": { "type": "string" }, "endX": { "type": "integer" }, "endY": { "type": "integer" }, "startX": { "type": "integer" }, "startY": { "type": "integer" } } }, "MouseDragResponse": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseMoveRequest": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MousePositionResponse": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "MouseScrollRequest": { "type": "object", "properties": { "amount": { "type": "integer" }, "direction": { "description": "up, down", "type": "string" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "PortList": { "type": "object", "properties": { "ports": { "type": "array", "items": { "type": "integer" } } } }, "Position": { "type": "object", "properties": { "x": { "type": "integer" }, "y": { "type": "integer" } } }, "ProcessErrorsResponse": { "type": "object", "properties": { "errors": { "type": "string" }, "processName": { "type": "string" } } }, "ProcessLogsResponse": { "type": "object", "properties": { "logs": { "type": "string" }, "processName": { "type": "string" } } }, "ProcessRestartResponse": { "type": "object", "properties": { "message": { "type": "string" }, "processName": { "type": "string" } } }, "ProcessStatus": { "type": "object", "properties": { "autoRestart": { "type": "boolean" }, "pid": { "type": "integer" }, "priority": { "type": "integer" }, "running": { "type": "boolean" } } }, "ProcessStatusResponse": { "type": "object", "properties": { "processName": { "type": "string" }, "running": { "type": "boolean" } } }, "PtyCreateRequest": { "type": "object", "properties": { "cols": { "type": "integer" }, "cwd": { "type": "string" }, "envs": { "type": "object", "additionalProperties": { "type": "string" } }, "id": { "type": "string" }, "lazyStart": { "description": "Don't start PTY until first client connects", "type": "boolean" }, "rows": { "type": "integer" } } }, "PtyCreateResponse": { "type": "object", "required": ["sessionId"], "properties": { "sessionId": { "type": "string" } } }, "PtyListResponse": { "type": "object", "required": ["sessions"], "properties": { "sessions": { "type": "array", "items": { "$ref": "#/definitions/PtySessionInfo" } } } }, "PtyResizeRequest": { "type": "object", "required": ["cols", "rows"], "properties": { "cols": { "type": "integer", "maximum": 1000, "minimum": 1 }, "rows": { "type": "integer", "maximum": 1000, "minimum": 1 } } }, "PtySessionInfo": { "type": "object", "required": ["active", "cols", "createdAt", "cwd", "envs", "id", "lazyStart", "rows"], "properties": { "active": { "type": "boolean" }, "cols": { "type": "integer" }, "createdAt": { "type": "string" }, "cwd": { "type": "string" }, "envs": { "type": "object", "additionalProperties": { "type": "string" } }, "id": { "type": "string" }, "lazyStart": { "description": "Whether this session uses lazy start", "type": "boolean" }, "rows": { "type": "integer" } } }, "Recording": { "type": "object", "required": ["fileName", "filePath", "id", "startTime", "status"], "properties": { "durationSeconds": { "type": "number" }, "endTime": { "type": "string" }, "fileName": { "type": "string" }, "filePath": { "type": "string" }, "id": { "type": "string" }, "sizeBytes": { "type": "integer" }, "startTime": { "type": "string" }, "status": { "type": "string" } } }, "ReplaceRequest": { "type": "object", "required": ["files", "newValue", "pattern"], "properties": { "files": { "type": "array", "items": { "type": "string" } }, "newValue": { "type": "string" }, "pattern": { "type": "string" } } }, "ReplaceResult": { "type": "object", "properties": { "error": { "type": "string" }, "file": { "type": "string" }, "success": { "type": "boolean" } } }, "ScreenshotResponse": { "type": "object", "properties": { "cursorPosition": { "$ref": "#/definitions/Position" }, "screenshot": { "type": "string" }, "sizeBytes": { "type": "integer" } } }, "ScrollResponse": { "type": "object", "properties": { "success": { "type": "boolean" } } }, "SearchFilesResponse": { "type": "object", "required": ["files"], "properties": { "files": { "type": "array", "items": { "type": "string" } } } }, "Session": { "type": "object", "required": ["commands", "sessionId"], "properties": { "commands": { "type": "array", "items": { "$ref": "#/definitions/Command" } }, "sessionId": { "type": "string" } } }, "SessionExecuteRequest": { "type": "object", "required": ["command"], "properties": { "async": { "type": "boolean" }, "command": { "type": "string" }, "runAsync": { "type": "boolean" }, "suppressInputEcho": { "type": "boolean" } } }, "SessionExecuteResponse": { "type": "object", "required": ["cmdId"], "properties": { "cmdId": { "type": "string" }, "exitCode": { "type": "integer" }, "output": { "type": "string" }, "stderr": { "type": "string" }, "stdout": { "type": "string" } } }, "SessionSendInputRequest": { "type": "object", "required": ["data"], "properties": { "data": { "type": "string" } } }, "StartRecordingRequest": { "type": "object", "properties": { "label": { "type": "string" } } }, "Status": { "type": "string", "enum": ["Unmodified", "Untracked", "Modified", "Added", "Deleted", "Renamed", "Copied", "Updated but unmerged"], "x-enum-varnames": [ "Unmodified", "Untracked", "Modified", "Added", "Deleted", "Renamed", "Copied", "UpdatedButUnmerged" ] }, "StopRecordingRequest": { "type": "object", "required": ["id"], "properties": { "id": { "type": "string" } } }, "UserHomeDirResponse": { "type": "object", "required": ["dir"], "properties": { "dir": { "type": "string" } } }, "WindowInfo": { "type": "object", "properties": { "height": { "type": "integer" }, "id": { "type": "integer" }, "isActive": { "type": "boolean" }, "title": { "type": "string" }, "width": { "type": "integer" }, "x": { "type": "integer" }, "y": { "type": "integer" } } }, "WindowsResponse": { "type": "object", "properties": { "windows": { "type": "array", "items": { "$ref": "#/definitions/WindowInfo" } } } }, "WorkDirResponse": { "type": "object", "required": ["dir"], "properties": { "dir": { "type": "string" } } }, "gin.H": { "type": "object", "additionalProperties": {} }, "git.GitDeleteBranchRequest": { "type": "object", "required": ["name", "path"], "properties": { "name": { "type": "string" }, "path": { "type": "string" } } } } } ================================================ FILE: apps/daemon/pkg/toolbox/docs/swagger.yaml ================================================ definitions: Command: properties: command: type: string exitCode: type: integer id: type: string required: - command - id type: object CompletionContext: properties: triggerCharacter: type: string triggerKind: type: integer required: - triggerKind type: object CompletionItem: properties: detail: type: string documentation: {} filterText: type: string insertText: type: string kind: type: integer label: type: string sortText: type: string required: - label type: object CompletionList: properties: isIncomplete: type: boolean items: items: $ref: '#/definitions/CompletionItem' type: array required: - isIncomplete - items type: object ComputerUseStartResponse: properties: message: type: string status: additionalProperties: $ref: '#/definitions/ProcessStatus' type: object type: object ComputerUseStatusResponse: properties: status: type: string type: object ComputerUseStopResponse: properties: message: type: string status: additionalProperties: $ref: '#/definitions/ProcessStatus' type: object type: object CreateContextRequest: properties: cwd: type: string language: type: string type: object CreateSessionRequest: properties: sessionId: type: string required: - sessionId type: object DisplayInfo: properties: height: type: integer id: type: integer isActive: type: boolean width: type: integer x: type: integer 'y': type: integer type: object DisplayInfoResponse: properties: displays: items: $ref: '#/definitions/DisplayInfo' type: array type: object Empty: type: object ExecuteRequest: properties: command: type: string cwd: description: Current working directory type: string timeout: description: Timeout in seconds, defaults to 10 seconds type: integer required: - command type: object ExecuteResponse: properties: exitCode: type: integer result: type: string required: - result type: object FileInfo: properties: group: type: string isDir: type: boolean modTime: type: string mode: type: string name: type: string owner: type: string permissions: type: string size: type: integer required: - group - isDir - modTime - mode - name - owner - permissions - size type: object FileStatus: properties: extra: type: string name: type: string staging: $ref: '#/definitions/Status' worktree: $ref: '#/definitions/Status' required: - extra - name - staging - worktree type: object FilesDownloadRequest: properties: paths: items: type: string type: array required: - paths type: object GitAddRequest: properties: files: description: files to add (use . for all files) items: type: string type: array path: type: string required: - files - path type: object GitBranchRequest: properties: name: type: string path: type: string required: - name - path type: object GitCheckoutRequest: properties: branch: type: string path: type: string required: - branch - path type: object GitCloneRequest: properties: branch: type: string commit_id: type: string password: type: string path: type: string url: type: string username: type: string required: - path - url type: object GitCommitInfo: properties: author: type: string email: type: string hash: type: string message: type: string timestamp: type: string required: - author - email - hash - message - timestamp type: object GitCommitRequest: properties: allow_empty: type: boolean author: type: string email: type: string message: type: string path: type: string required: - author - email - message - path type: object GitCommitResponse: properties: hash: type: string required: - hash type: object GitRepoRequest: properties: password: type: string path: type: string username: type: string required: - path type: object GitStatus: properties: ahead: type: integer behind: type: integer branchPublished: type: boolean currentBranch: type: string fileStatus: items: $ref: '#/definitions/FileStatus' type: array required: - currentBranch - fileStatus type: object InitializeRequest: properties: token: type: string required: - token type: object InterpreterContext: properties: active: type: boolean createdAt: type: string cwd: type: string id: type: string language: type: string required: - active - createdAt - cwd - id - language type: object IsPortInUseResponse: properties: isInUse: type: boolean type: object KeyboardHotkeyRequest: properties: keys: description: e.g., "ctrl+c", "cmd+v" type: string type: object KeyboardPressRequest: properties: key: type: string modifiers: description: ctrl, alt, shift, cmd items: type: string type: array type: object KeyboardTypeRequest: properties: delay: description: milliseconds between keystrokes type: integer text: type: string type: object ListBranchResponse: properties: branches: items: type: string type: array required: - branches type: object ListContextsResponse: properties: contexts: items: $ref: '#/definitions/InterpreterContext' type: array required: - contexts type: object ListRecordingsResponse: properties: recordings: items: $ref: '#/definitions/Recording' type: array required: - recordings type: object LspCompletionParams: properties: context: $ref: '#/definitions/CompletionContext' languageId: type: string pathToProject: type: string position: $ref: '#/definitions/LspPosition' uri: type: string required: - languageId - pathToProject - position - uri type: object LspDocumentRequest: properties: languageId: type: string pathToProject: type: string uri: type: string required: - languageId - pathToProject - uri type: object LspLocation: properties: range: $ref: '#/definitions/LspRange' uri: type: string required: - range - uri type: object LspPosition: properties: character: type: integer line: type: integer required: - character - line type: object LspRange: properties: end: $ref: '#/definitions/LspPosition' start: $ref: '#/definitions/LspPosition' required: - end - start type: object LspServerRequest: properties: languageId: type: string pathToProject: type: string required: - languageId - pathToProject type: object LspSymbol: properties: kind: type: integer location: $ref: '#/definitions/LspLocation' name: type: string required: - kind - location - name type: object Match: properties: content: type: string file: type: string line: type: integer required: - content - file - line type: object MouseClickRequest: properties: button: description: left, right, middle type: string double: type: boolean x: type: integer 'y': type: integer type: object MouseClickResponse: properties: x: type: integer 'y': type: integer type: object MouseDragRequest: properties: button: type: string endX: type: integer endY: type: integer startX: type: integer startY: type: integer type: object MouseDragResponse: properties: x: type: integer 'y': type: integer type: object MouseMoveRequest: properties: x: type: integer 'y': type: integer type: object MousePositionResponse: properties: x: type: integer 'y': type: integer type: object MouseScrollRequest: properties: amount: type: integer direction: description: up, down type: string x: type: integer 'y': type: integer type: object PortList: properties: ports: items: type: integer type: array type: object Position: properties: x: type: integer 'y': type: integer type: object ProcessErrorsResponse: properties: errors: type: string processName: type: string type: object ProcessLogsResponse: properties: logs: type: string processName: type: string type: object ProcessRestartResponse: properties: message: type: string processName: type: string type: object ProcessStatus: properties: autoRestart: type: boolean pid: type: integer priority: type: integer running: type: boolean type: object ProcessStatusResponse: properties: processName: type: string running: type: boolean type: object PtyCreateRequest: properties: cols: type: integer cwd: type: string envs: additionalProperties: type: string type: object id: type: string lazyStart: description: Don't start PTY until first client connects type: boolean rows: type: integer type: object PtyCreateResponse: properties: sessionId: type: string required: - sessionId type: object PtyListResponse: properties: sessions: items: $ref: '#/definitions/PtySessionInfo' type: array required: - sessions type: object PtyResizeRequest: properties: cols: maximum: 1000 minimum: 1 type: integer rows: maximum: 1000 minimum: 1 type: integer required: - cols - rows type: object PtySessionInfo: properties: active: type: boolean cols: type: integer createdAt: type: string cwd: type: string envs: additionalProperties: type: string type: object id: type: string lazyStart: description: Whether this session uses lazy start type: boolean rows: type: integer required: - active - cols - createdAt - cwd - envs - id - lazyStart - rows type: object Recording: properties: durationSeconds: type: number endTime: type: string fileName: type: string filePath: type: string id: type: string sizeBytes: type: integer startTime: type: string status: type: string required: - fileName - filePath - id - startTime - status type: object ReplaceRequest: properties: files: items: type: string type: array newValue: type: string pattern: type: string required: - files - newValue - pattern type: object ReplaceResult: properties: error: type: string file: type: string success: type: boolean type: object ScreenshotResponse: properties: cursorPosition: $ref: '#/definitions/Position' screenshot: type: string sizeBytes: type: integer type: object ScrollResponse: properties: success: type: boolean type: object SearchFilesResponse: properties: files: items: type: string type: array required: - files type: object Session: properties: commands: items: $ref: '#/definitions/Command' type: array sessionId: type: string required: - commands - sessionId type: object SessionExecuteRequest: properties: async: type: boolean command: type: string runAsync: type: boolean suppressInputEcho: type: boolean required: - command type: object SessionExecuteResponse: properties: cmdId: type: string exitCode: type: integer output: type: string stderr: type: string stdout: type: string required: - cmdId type: object SessionSendInputRequest: properties: data: type: string required: - data type: object StartRecordingRequest: properties: label: type: string type: object Status: enum: - Unmodified - Untracked - Modified - Added - Deleted - Renamed - Copied - Updated but unmerged type: string x-enum-varnames: - Unmodified - Untracked - Modified - Added - Deleted - Renamed - Copied - UpdatedButUnmerged StopRecordingRequest: properties: id: type: string required: - id type: object UserHomeDirResponse: properties: dir: type: string required: - dir type: object WindowInfo: properties: height: type: integer id: type: integer isActive: type: boolean title: type: string width: type: integer x: type: integer 'y': type: integer type: object WindowsResponse: properties: windows: items: $ref: '#/definitions/WindowInfo' type: array type: object WorkDirResponse: properties: dir: type: string required: - dir type: object gin.H: additionalProperties: {} type: object git.GitDeleteBranchRequest: properties: name: type: string path: type: string required: - name - path type: object info: contact: {} description: Daytona Toolbox API title: Daytona Toolbox API version: v0.0.0-dev paths: /computeruse/display/info: get: description: Get information about all available displays operationId: GetDisplayInfo produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/DisplayInfoResponse' summary: Get display information tags: - computer-use /computeruse/display/windows: get: description: Get information about all open windows operationId: GetWindows produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/WindowsResponse' summary: Get windows information tags: - computer-use /computeruse/keyboard/hotkey: post: consumes: - application/json description: Press a hotkey combination (e.g., ctrl+c, cmd+v) operationId: PressHotkey parameters: - description: Hotkey press request in: body name: request required: true schema: $ref: '#/definitions/KeyboardHotkeyRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Empty' summary: Press hotkey tags: - computer-use /computeruse/keyboard/key: post: consumes: - application/json description: Press a key with optional modifiers operationId: PressKey parameters: - description: Key press request in: body name: request required: true schema: $ref: '#/definitions/KeyboardPressRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Empty' summary: Press key tags: - computer-use /computeruse/keyboard/type: post: consumes: - application/json description: Type text with optional delay between keystrokes operationId: TypeText parameters: - description: Text typing request in: body name: request required: true schema: $ref: '#/definitions/KeyboardTypeRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Empty' summary: Type text tags: - computer-use /computeruse/mouse/click: post: consumes: - application/json description: Click the mouse button at the specified coordinates operationId: Click parameters: - description: Mouse click request in: body name: request required: true schema: $ref: '#/definitions/MouseClickRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/MouseClickResponse' summary: Click mouse button tags: - computer-use /computeruse/mouse/drag: post: consumes: - application/json description: Drag the mouse from start to end coordinates operationId: Drag parameters: - description: Mouse drag request in: body name: request required: true schema: $ref: '#/definitions/MouseDragRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/MouseDragResponse' summary: Drag mouse tags: - computer-use /computeruse/mouse/move: post: consumes: - application/json description: Move the mouse cursor to the specified coordinates operationId: MoveMouse parameters: - description: Mouse move request in: body name: request required: true schema: $ref: '#/definitions/MouseMoveRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/MousePositionResponse' summary: Move mouse cursor tags: - computer-use /computeruse/mouse/position: get: description: Get the current mouse cursor position operationId: GetMousePosition produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/MousePositionResponse' summary: Get mouse position tags: - computer-use /computeruse/mouse/scroll: post: consumes: - application/json description: Scroll the mouse wheel at the specified coordinates operationId: Scroll parameters: - description: Mouse scroll request in: body name: request required: true schema: $ref: '#/definitions/MouseScrollRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ScrollResponse' summary: Scroll mouse wheel tags: - computer-use /computeruse/process-status: get: description: Get the status of all computer use processes operationId: GetComputerUseStatus produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ComputerUseStatusResponse' summary: Get computer use process status tags: - computer-use /computeruse/process/{processName}/errors: get: description: Get errors for a specific computer use process operationId: GetProcessErrors parameters: - description: Process name to get errors for in: path name: processName required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ProcessErrorsResponse' summary: Get process errors tags: - computer-use /computeruse/process/{processName}/logs: get: description: Get logs for a specific computer use process operationId: GetProcessLogs parameters: - description: Process name to get logs for in: path name: processName required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ProcessLogsResponse' summary: Get process logs tags: - computer-use /computeruse/process/{processName}/restart: post: description: Restart a specific computer use process operationId: RestartProcess parameters: - description: Process name to restart in: path name: processName required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ProcessRestartResponse' summary: Restart specific process tags: - computer-use /computeruse/process/{processName}/status: get: description: Check if a specific computer use process is running operationId: GetProcessStatus parameters: - description: Process name to check in: path name: processName required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ProcessStatusResponse' summary: Get specific process status tags: - computer-use /computeruse/recordings: get: description: Get a list of all recordings (active and completed) operationId: ListRecordings produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ListRecordingsResponse' '500': description: Internal Server Error schema: additionalProperties: type: string type: object summary: List all recordings tags: - computer-use /computeruse/recordings/{id}: delete: description: Delete a recording file by ID operationId: DeleteRecording parameters: - description: Recording ID in: path name: id required: true type: string responses: '204': description: No Content '400': description: Bad Request schema: additionalProperties: type: string type: object '404': description: Not Found schema: additionalProperties: type: string type: object '500': description: Internal Server Error schema: additionalProperties: type: string type: object summary: Delete a recording tags: - computer-use get: description: Get details of a specific recording by ID operationId: GetRecording parameters: - description: Recording ID in: path name: id required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Recording' '404': description: Not Found schema: additionalProperties: type: string type: object '500': description: Internal Server Error schema: additionalProperties: type: string type: object summary: Get recording details tags: - computer-use /computeruse/recordings/{id}/download: get: description: Download a recording by providing its ID operationId: DownloadRecording parameters: - description: Recording ID in: path name: id required: true type: string produces: - application/octet-stream responses: '200': description: OK schema: type: file '404': description: Not Found schema: additionalProperties: type: string type: object '500': description: Internal Server Error schema: additionalProperties: type: string type: object summary: Download a recording tags: - computer-use /computeruse/recordings/start: post: consumes: - application/json description: Start a new screen recording session operationId: StartRecording parameters: - description: Recording options in: body name: request schema: $ref: '#/definitions/StartRecordingRequest' produces: - application/json responses: '201': description: Created schema: $ref: '#/definitions/Recording' '400': description: Bad Request schema: additionalProperties: type: string type: object '500': description: Internal Server Error schema: additionalProperties: type: string type: object summary: Start a new recording tags: - computer-use /computeruse/recordings/stop: post: consumes: - application/json description: Stop an active screen recording session operationId: StopRecording parameters: - description: Recording ID to stop in: body name: request required: true schema: $ref: '#/definitions/StopRecordingRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Recording' '400': description: Bad Request schema: additionalProperties: type: string type: object '404': description: Not Found schema: additionalProperties: type: string type: object summary: Stop a recording tags: - computer-use /computeruse/screenshot: get: description: Take a screenshot of the entire screen operationId: TakeScreenshot parameters: - description: Whether to show cursor in screenshot in: query name: showCursor type: boolean produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ScreenshotResponse' summary: Take a screenshot tags: - computer-use /computeruse/screenshot/compressed: get: description: Take a compressed screenshot of the entire screen operationId: TakeCompressedScreenshot parameters: - description: Whether to show cursor in screenshot in: query name: showCursor type: boolean - description: Image format (png or jpeg) in: query name: format type: string - description: JPEG quality (1-100) in: query name: quality type: integer - description: Scale factor (0.1-1.0) in: query name: scale type: number produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ScreenshotResponse' summary: Take a compressed screenshot tags: - computer-use /computeruse/screenshot/region: get: description: Take a screenshot of a specific region of the screen operationId: TakeRegionScreenshot parameters: - description: X coordinate of the region in: query name: x required: true type: integer - description: Y coordinate of the region in: query name: 'y' required: true type: integer - description: Width of the region in: query name: width required: true type: integer - description: Height of the region in: query name: height required: true type: integer - description: Whether to show cursor in screenshot in: query name: showCursor type: boolean produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ScreenshotResponse' summary: Take a region screenshot tags: - computer-use /computeruse/screenshot/region/compressed: get: description: Take a compressed screenshot of a specific region of the screen operationId: TakeCompressedRegionScreenshot parameters: - description: X coordinate of the region in: query name: x required: true type: integer - description: Y coordinate of the region in: query name: 'y' required: true type: integer - description: Width of the region in: query name: width required: true type: integer - description: Height of the region in: query name: height required: true type: integer - description: Whether to show cursor in screenshot in: query name: showCursor type: boolean - description: Image format (png or jpeg) in: query name: format type: string - description: JPEG quality (1-100) in: query name: quality type: integer - description: Scale factor (0.1-1.0) in: query name: scale type: number produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ScreenshotResponse' summary: Take a compressed region screenshot tags: - computer-use /computeruse/start: post: description: Start all computer use processes and return their status operationId: StartComputerUse produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ComputerUseStartResponse' summary: Start computer use processes tags: - computer-use /computeruse/status: get: description: Get the current status of the computer use system operationId: GetComputerUseSystemStatus produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ComputerUseStatusResponse' summary: Get computer use status tags: - computer-use /computeruse/stop: post: description: Stop all computer use processes and return their status operationId: StopComputerUse produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ComputerUseStopResponse' summary: Stop computer use processes tags: - computer-use /files: delete: description: Delete a file or directory at the specified path operationId: DeleteFile parameters: - description: File or directory path to delete in: query name: path required: true type: string - description: Enable recursive deletion for directories in: query name: recursive type: boolean responses: '204': description: No Content summary: Delete a file or directory tags: - file-system get: description: List files and directories in the specified path operationId: ListFiles parameters: - description: Directory path to list (defaults to working directory) in: query name: path type: string produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/FileInfo' type: array summary: List files and directories tags: - file-system /files/bulk-download: post: consumes: - application/json description: Download multiple files by providing their paths operationId: DownloadFiles parameters: - description: Paths of files to download in: body name: downloadFiles required: true schema: $ref: '#/definitions/FilesDownloadRequest' produces: - multipart/form-data responses: '200': description: OK schema: $ref: '#/definitions/gin.H' summary: Download multiple files tags: - file-system /files/bulk-upload: post: consumes: - multipart/form-data description: Upload multiple files with their destination paths operationId: UploadFiles responses: '200': description: OK summary: Upload multiple files tags: - file-system /files/download: get: description: Download a file by providing its path operationId: DownloadFile parameters: - description: File path to download in: query name: path required: true type: string produces: - application/octet-stream responses: '200': description: OK schema: type: file summary: Download a file tags: - file-system /files/find: get: description: Search for text pattern within files in a directory operationId: FindInFiles parameters: - description: Directory path to search in in: query name: path required: true type: string - description: Text pattern to search for in: query name: pattern required: true type: string produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/Match' type: array summary: Find text in files tags: - file-system /files/folder: post: consumes: - application/json description: Create a folder with the specified path and optional permissions operationId: CreateFolder parameters: - description: Folder path to create in: query name: path required: true type: string - description: 'Octal permission mode (default: 0755)' in: query name: mode required: true type: string responses: '201': description: Created summary: Create a folder tags: - file-system /files/info: get: description: Get detailed information about a file or directory operationId: GetFileInfo parameters: - description: File or directory path in: query name: path required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/FileInfo' summary: Get file information tags: - file-system /files/move: post: description: Move or rename a file or directory from source to destination operationId: MoveFile parameters: - description: Source file or directory path in: query name: source required: true type: string - description: Destination file or directory path in: query name: destination required: true type: string responses: '200': description: OK summary: Move or rename file/directory tags: - file-system /files/permissions: post: description: Set file permissions, ownership, and group for a file or directory operationId: SetFilePermissions parameters: - description: File or directory path in: query name: path required: true type: string - description: Owner (username or UID) in: query name: owner type: string - description: Group (group name or GID) in: query name: group type: string - description: File mode in octal format (e.g., 0755) in: query name: mode type: string responses: '200': description: OK summary: Set file permissions tags: - file-system /files/replace: post: consumes: - application/json description: Replace text pattern with new value in multiple files operationId: ReplaceInFiles parameters: - description: Replace request in: body name: request required: true schema: $ref: '#/definitions/ReplaceRequest' produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/ReplaceResult' type: array summary: Replace text in files tags: - file-system /files/search: get: description: Search for files matching a specific pattern in a directory operationId: SearchFiles parameters: - description: Directory path to search in in: query name: path required: true type: string - description: File pattern to match (e.g., *.txt, *.go) in: query name: pattern required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/SearchFilesResponse' summary: Search files by pattern tags: - file-system /files/upload: post: consumes: - multipart/form-data description: Upload a file to the specified path operationId: UploadFile parameters: - description: Destination path for the uploaded file in: query name: path required: true type: string - description: File to upload in: formData name: file required: true type: file responses: '200': description: OK schema: $ref: '#/definitions/gin.H' summary: Upload a file tags: - file-system /git/add: post: consumes: - application/json description: Add files to the Git staging area operationId: AddFiles parameters: - description: Add files request in: body name: request required: true schema: $ref: '#/definitions/GitAddRequest' produces: - application/json responses: '200': description: OK summary: Add files to Git staging tags: - git /git/branches: delete: consumes: - application/json description: Delete a branch from the Git repository operationId: DeleteBranch parameters: - description: Delete branch request in: body name: request required: true schema: $ref: '#/definitions/git.GitDeleteBranchRequest' produces: - application/json responses: '204': description: No Content summary: Delete a branch tags: - git get: description: Get a list of all branches in the Git repository operationId: ListBranches parameters: - description: Repository path in: query name: path required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ListBranchResponse' summary: List branches tags: - git post: consumes: - application/json description: Create a new branch in the Git repository operationId: CreateBranch parameters: - description: Create branch request in: body name: request required: true schema: $ref: '#/definitions/GitBranchRequest' produces: - application/json responses: '201': description: Created summary: Create a new branch tags: - git /git/checkout: post: consumes: - application/json description: Switch to a different branch or commit in the Git repository operationId: CheckoutBranch parameters: - description: Checkout request in: body name: request required: true schema: $ref: '#/definitions/GitCheckoutRequest' produces: - application/json responses: '200': description: OK summary: Checkout branch or commit tags: - git /git/clone: post: consumes: - application/json description: Clone a Git repository to the specified path operationId: CloneRepository parameters: - description: Clone repository request in: body name: request required: true schema: $ref: '#/definitions/GitCloneRequest' produces: - application/json responses: '200': description: OK summary: Clone a Git repository tags: - git /git/commit: post: consumes: - application/json description: Commit staged changes to the Git repository operationId: CommitChanges parameters: - description: Commit request in: body name: request required: true schema: $ref: '#/definitions/GitCommitRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/GitCommitResponse' summary: Commit changes tags: - git /git/history: get: description: Get the commit history of the Git repository operationId: GetCommitHistory parameters: - description: Repository path in: query name: path required: true type: string produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/GitCommitInfo' type: array summary: Get commit history tags: - git /git/pull: post: consumes: - application/json description: Pull changes from the remote Git repository operationId: PullChanges parameters: - description: Pull request in: body name: request required: true schema: $ref: '#/definitions/GitRepoRequest' produces: - application/json responses: '200': description: OK summary: Pull changes from remote tags: - git /git/push: post: consumes: - application/json description: Push local changes to the remote Git repository operationId: PushChanges parameters: - description: Push request in: body name: request required: true schema: $ref: '#/definitions/GitRepoRequest' produces: - application/json responses: '200': description: OK summary: Push changes to remote tags: - git /git/status: get: description: Get the Git status of the repository at the specified path operationId: GetStatus parameters: - description: Repository path in: query name: path required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/GitStatus' summary: Get Git status tags: - git /init: post: description: Set the auth token and initialize telemetry for the toolbox server operationId: Initialize parameters: - description: Initialization request in: body name: request required: true schema: $ref: '#/definitions/InitializeRequest' produces: - application/json responses: '200': description: OK schema: additionalProperties: type: string type: object summary: Initialize toolbox server tags: - server /lsp/completions: post: consumes: - application/json description: Get code completion suggestions from the LSP server operationId: Completions parameters: - description: Completion request in: body name: request required: true schema: $ref: '#/definitions/LspCompletionParams' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/CompletionList' summary: Get code completions tags: - lsp /lsp/did-close: post: consumes: - application/json description: Notify the LSP server that a document has been closed operationId: DidClose parameters: - description: Document request in: body name: request required: true schema: $ref: '#/definitions/LspDocumentRequest' produces: - application/json responses: '200': description: OK summary: Notify document closed tags: - lsp /lsp/did-open: post: consumes: - application/json description: Notify the LSP server that a document has been opened operationId: DidOpen parameters: - description: Document request in: body name: request required: true schema: $ref: '#/definitions/LspDocumentRequest' produces: - application/json responses: '200': description: OK summary: Notify document opened tags: - lsp /lsp/document-symbols: get: description: Get symbols (functions, classes, etc.) from a document operationId: DocumentSymbols parameters: - description: Language ID (e.g., python, typescript) in: query name: languageId required: true type: string - description: Path to project in: query name: pathToProject required: true type: string - description: Document URI in: query name: uri required: true type: string produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/LspSymbol' type: array summary: Get document symbols tags: - lsp /lsp/start: post: consumes: - application/json description: Start a Language Server Protocol server for the specified language operationId: Start parameters: - description: LSP server request in: body name: request required: true schema: $ref: '#/definitions/LspServerRequest' produces: - application/json responses: '200': description: OK summary: Start LSP server tags: - lsp /lsp/stop: post: consumes: - application/json description: Stop a Language Server Protocol server operationId: Stop parameters: - description: LSP server request in: body name: request required: true schema: $ref: '#/definitions/LspServerRequest' produces: - application/json responses: '200': description: OK summary: Stop LSP server tags: - lsp /lsp/workspacesymbols: get: description: Search for symbols across the entire workspace operationId: WorkspaceSymbols parameters: - description: Search query in: query name: query required: true type: string - description: Language ID (e.g., python, typescript) in: query name: languageId required: true type: string - description: Path to project in: query name: pathToProject required: true type: string produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/LspSymbol' type: array summary: Get workspace symbols tags: - lsp /port: get: description: Get a list of all currently active ports operationId: GetPorts produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/PortList' summary: Get active ports tags: - port /port/{port}/in-use: get: description: Check if a specific port is currently in use operationId: IsPortInUse parameters: - description: Port number (3000-9999) in: path name: port required: true type: integer produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/IsPortInUseResponse' summary: Check if port is in use tags: - port /process/execute: post: consumes: - application/json description: Execute a shell command and return the output and exit code operationId: ExecuteCommand parameters: - description: Command execution request in: body name: request required: true schema: $ref: '#/definitions/ExecuteRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ExecuteResponse' summary: Execute a command tags: - process /process/interpreter/context: get: description: Returns information about all user-created interpreter contexts (excludes default context) operationId: ListInterpreterContexts produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/ListContextsResponse' summary: List all user-created interpreter contexts tags: - interpreter post: consumes: - application/json description: Creates a new isolated interpreter context with optional working directory and language operationId: CreateInterpreterContext parameters: - description: Context configuration in: body name: request required: true schema: $ref: '#/definitions/CreateContextRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/InterpreterContext' '400': description: Bad Request schema: additionalProperties: type: string type: object '500': description: Internal Server Error schema: additionalProperties: type: string type: object summary: Create a new interpreter context tags: - interpreter /process/interpreter/context/{id}: delete: description: Deletes an interpreter context and shuts down its worker process operationId: DeleteInterpreterContext parameters: - description: Context ID in: path name: id required: true type: string produces: - application/json responses: '200': description: OK schema: additionalProperties: type: string type: object '400': description: Bad Request schema: additionalProperties: type: string type: object '404': description: Not Found schema: additionalProperties: type: string type: object summary: Delete an interpreter context tags: - interpreter /process/interpreter/execute: get: consumes: - application/json description: Executes code in a specified context (or default context if not specified) via WebSocket streaming operationId: ExecuteInterpreterCode produces: - application/json responses: '101': description: Switching Protocols headers: Connection: description: Upgrade type: string Upgrade: description: websocket type: string schema: type: string summary: Execute code in an interpreter context tags: - interpreter /process/pty: get: description: Get a list of all active pseudo-terminal sessions operationId: ListPtySessions produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/PtyListResponse' summary: List all PTY sessions tags: - process post: consumes: - application/json description: Create a new pseudo-terminal session with specified configuration operationId: CreatePtySession parameters: - description: PTY session creation request in: body name: request required: true schema: $ref: '#/definitions/PtyCreateRequest' produces: - application/json responses: '201': description: Created schema: $ref: '#/definitions/PtyCreateResponse' summary: Create a new PTY session tags: - process /process/pty/{sessionId}: delete: description: Delete a pseudo-terminal session and terminate its process operationId: DeletePtySession parameters: - description: PTY session ID in: path name: sessionId required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/gin.H' summary: Delete a PTY session tags: - process get: description: Get detailed information about a specific pseudo-terminal session operationId: GetPtySession parameters: - description: PTY session ID in: path name: sessionId required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/PtySessionInfo' summary: Get PTY session information tags: - process /process/pty/{sessionId}/connect: get: description: Establish a WebSocket connection to interact with a pseudo-terminal session operationId: ConnectPtySession parameters: - description: PTY session ID in: path name: sessionId required: true type: string responses: '101': description: Switching Protocols - WebSocket connection established summary: Connect to PTY session via WebSocket tags: - process /process/pty/{sessionId}/resize: post: consumes: - application/json description: Resize the terminal dimensions of a pseudo-terminal session operationId: ResizePtySession parameters: - description: PTY session ID in: path name: sessionId required: true type: string - description: Resize request with new dimensions in: body name: request required: true schema: $ref: '#/definitions/PtyResizeRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/PtySessionInfo' summary: Resize a PTY session tags: - process /process/session: get: description: Get a list of all active shell sessions operationId: ListSessions produces: - application/json responses: '200': description: OK schema: items: $ref: '#/definitions/Session' type: array summary: List all sessions tags: - process post: consumes: - application/json description: Create a new shell session for command execution operationId: CreateSession parameters: - description: Session creation request in: body name: request required: true schema: $ref: '#/definitions/CreateSessionRequest' produces: - application/json responses: '201': description: Created summary: Create a new session tags: - process /process/session/{sessionId}: delete: description: Delete an existing shell session operationId: DeleteSession parameters: - description: Session ID in: path name: sessionId required: true type: string responses: '204': description: No Content summary: Delete a session tags: - process get: description: Get details of a specific session including its commands operationId: GetSession parameters: - description: Session ID in: path name: sessionId required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Session' summary: Get session details tags: - process /process/session/{sessionId}/command/{commandId}: get: description: Get details of a specific command within a session operationId: GetSessionCommand parameters: - description: Session ID in: path name: sessionId required: true type: string - description: Command ID in: path name: commandId required: true type: string produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Command' summary: Get session command details tags: - process /process/session/{sessionId}/command/{commandId}/input: post: consumes: - application/json description: Send input data to a running command in a session for interactive execution operationId: SendInput parameters: - description: Session ID in: path name: sessionId required: true type: string - description: Command ID in: path name: commandId required: true type: string - description: Input send request in: body name: request required: true schema: $ref: '#/definitions/SessionSendInputRequest' responses: '204': description: No Content summary: Send input to command tags: - process /process/session/{sessionId}/command/{commandId}/logs: get: description: Get logs for a specific command within a session. Supports both HTTP and WebSocket streaming. operationId: GetSessionCommandLogs parameters: - description: Session ID in: path name: sessionId required: true type: string - description: Command ID in: path name: commandId required: true type: string - description: Follow logs in real-time (WebSocket only) in: query name: follow type: boolean produces: - text/plain responses: '200': description: Log content schema: type: string summary: Get session command logs tags: - process /process/session/{sessionId}/exec: post: consumes: - application/json description: Execute a command within an existing shell session operationId: SessionExecuteCommand parameters: - description: Session ID in: path name: sessionId required: true type: string - description: Command execution request in: body name: request required: true schema: $ref: '#/definitions/SessionExecuteRequest' produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/SessionExecuteResponse' '202': description: Accepted schema: $ref: '#/definitions/SessionExecuteResponse' summary: Execute command in session tags: - process /process/session/entrypoint: get: description: Get details of an entrypoint session including its commands operationId: GetEntrypointSession produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/Session' summary: Get entrypoint session details tags: - process /process/session/entrypoint/logs: get: description: Get logs for a sandbox entrypoint session. Supports both HTTP and WebSocket streaming. operationId: GetEntrypointLogs parameters: - description: Follow logs in real-time (WebSocket only) in: query name: follow type: boolean produces: - text/plain responses: '200': description: Entrypoint log content schema: type: string summary: Get entrypoint logs tags: - process /user-home-dir: get: description: Get the current user home directory path. operationId: GetUserHomeDir produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/UserHomeDirResponse' summary: Get user home directory tags: - info /version: get: description: Get the current daemon version operationId: GetVersion produces: - application/json responses: '200': description: OK schema: additionalProperties: type: string type: object summary: Get version tags: - info /work-dir: get: description: Get the current working directory path. This is default directory used for running commands. operationId: GetWorkDir produces: - application/json responses: '200': description: OK schema: $ref: '#/definitions/WorkDirResponse' summary: Get working directory tags: - info swagger: '2.0' ================================================ FILE: apps/daemon/pkg/toolbox/fs/create_folder.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "net/http" "os" "strconv" "github.com/gin-gonic/gin" ) // CreateFolder godoc // // @Summary Create a folder // @Description Create a folder with the specified path and optional permissions // @Tags file-system // @Accept json // @Param path query string true "Folder path to create" // @Param mode query string true "Octal permission mode (default: 0755)" // @Success 201 // @Router /files/folder [post] // // @id CreateFolder func CreateFolder(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } // Get the permission mode from query params, default to 0755 mode := c.Query("mode") var perm os.FileMode = 0755 if mode != "" { modeNum, err := strconv.ParseUint(mode, 8, 32) if err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid mode format")) return } perm = os.FileMode(modeNum) } if err := os.MkdirAll(path, perm); err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusCreated) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/delete_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "net/http" "os" "github.com/gin-gonic/gin" ) // DeleteFile godoc // // @Summary Delete a file or directory // @Description Delete a file or directory at the specified path // @Tags file-system // @Param path query string true "File or directory path to delete" // @Param recursive query boolean false "Enable recursive deletion for directories" // @Success 204 // @Router /files [delete] // // @id DeleteFile func DeleteFile(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } // Check if recursive deletion is requested recursive := c.Query("recursive") == "true" // Get file info to check if it's a directory info, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } // If it's a directory and recursive flag is not set, return error if info.IsDir() && !recursive { c.AbortWithError(http.StatusBadRequest, errors.New("cannot delete directory without recursive flag")) return } var deleteErr error if recursive { deleteErr = os.RemoveAll(path) } else { deleteErr = os.Remove(path) } if deleteErr != nil { c.AbortWithError(http.StatusBadRequest, deleteErr) return } c.Status(http.StatusNoContent) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/download_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "fmt" "net/http" "os" "path/filepath" "github.com/gin-gonic/gin" ) // DownloadFile godoc // // @Summary Download a file // @Description Download a file by providing its path // @Tags file-system // @Produce octet-stream // @Param path query string true "File path to download" // @Success 200 {file} binary // @Router /files/download [get] // // @id DownloadFile func DownloadFile(c *gin.Context) { requestedPath := c.Query("path") if requestedPath == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } absPath, err := filepath.Abs(requestedPath) if err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid path: %w", err)) return } fileInfo, err := os.Stat(absPath) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } if fileInfo.IsDir() { c.AbortWithError(http.StatusBadRequest, errors.New("path must be a file")) return } c.Header("Content-Description", "File Transfer") c.Header("Content-Type", "application/octet-stream") c.Header("Content-Disposition", "attachment; filename="+filepath.Base(absPath)) c.Header("Content-Transfer-Encoding", "binary") c.Header("Expires", "0") c.Header("Cache-Control", "must-revalidate") c.Header("Pragma", "public") c.File(absPath) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/download_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "context" "fmt" "io" "mime" "mime/multipart" "net/http" "net/textproto" "os" "path/filepath" "github.com/gin-gonic/gin" ) // Wraps an io.Writer and aborts writes if the context is canceled. type ctxWriter struct { ctx context.Context w io.Writer } func (cw *ctxWriter) Write(p []byte) (int, error) { select { case <-cw.ctx.Done(): return 0, cw.ctx.Err() default: } return cw.w.Write(p) } // DownloadFiles godoc // // @Summary Download multiple files // @Description Download multiple files by providing their paths // @Tags file-system // @Accept json // @Produce multipart/form-data // @Param downloadFiles body FilesDownloadRequest true "Paths of files to download" // @Success 200 {object} gin.H // @Router /files/bulk-download [post] // // @id DownloadFiles func DownloadFiles(c *gin.Context) { var req FilesDownloadRequest if err := c.BindJSON(&req); err != nil || len(req.Paths) == 0 { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ "error": "request body must be {\"paths\": [ ... ]} and non-empty", }) return } const boundary = "DAYTONA-FILE-BOUNDARY" c.Status(http.StatusOK) c.Header("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%s", boundary)) mw := multipart.NewWriter(c.Writer) if err := mw.SetBoundary(boundary); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{ "error": "failed to set multipart boundary", }) return } defer mw.Close() // ensure final boundary is written for _, path := range req.Paths { if !fileExists(path) { writeErrorPart(c, mw, path, fmt.Sprintf("file not found or invalid: %s", path)) continue } f, err := os.Open(path) if err != nil { writeErrorPart(c, mw, path, fmt.Sprintf("error opening file: %v", err)) continue } if err := writeFilePart(c.Request.Context(), mw, path, f); err != nil { writeErrorPart(c, mw, path, fmt.Sprintf("error streaming file: %v", err)) } f.Close() } } // Streams a file part using io.Copy and respects context cancellation. func writeFilePart(ctx context.Context, mw *multipart.Writer, path string, r io.Reader) error { ctype := mime.TypeByExtension(filepath.Ext(path)) if ctype == "" { ctype = "application/octet-stream" } hdr := textproto.MIMEHeader{} hdr.Set("Content-Type", ctype) hdr.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, "file", path), ) part, err := mw.CreatePart(hdr) if err != nil { return err } // Wrap part with context-aware writer cw := &ctxWriter{ctx: ctx, w: part} _, err = io.Copy(cw, r) return err } // Writes an error message as a multipart part. func writeErrorPart(ctx *gin.Context, mw *multipart.Writer, path, text string) { hdr := textproto.MIMEHeader{} hdr.Set("Content-Type", "text/plain; charset=utf-8") hdr.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, "error", path), ) if part, err := mw.CreatePart(hdr); err == nil { _, err := io.WriteString(part, text) if err != nil { ctx.AbortWithError(http.StatusInternalServerError, err) } } } func fileExists(path string) bool { info, err := os.Stat(path) return err == nil && !info.IsDir() } ================================================ FILE: apps/daemon/pkg/toolbox/fs/find_in_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "bufio" "errors" "net/http" "os" "path/filepath" "strings" "github.com/gin-gonic/gin" ) // FindInFiles godoc // // @Summary Find text in files // @Description Search for text pattern within files in a directory // @Tags file-system // @Produce json // @Param path query string true "Directory path to search in" // @Param pattern query string true "Text pattern to search for" // @Success 200 {array} Match // @Router /files/find [get] // // @id FindInFiles func FindInFiles(c *gin.Context) { path := c.Query("path") pattern := c.Query("pattern") if path == "" || pattern == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path and pattern are required")) return } var matches = make([]Match, 0) err := filepath.Walk(path, func(filePath string, info os.FileInfo, err error) error { if err != nil { return filepath.SkipDir } if !info.Mode().IsRegular() { return nil } file, err := os.Open(filePath) if err != nil { return nil } defer file.Close() buf := make([]byte, 512) n, err := file.Read(buf) if err != nil { return nil } for i := 0; i < n; i++ { // skip binary files if buf[i] == 0 { return nil } } _, err = file.Seek(0, 0) if err != nil { return nil } scanner := bufio.NewScanner(file) lineNum := 1 for scanner.Scan() { if strings.Contains(scanner.Text(), pattern) { matches = append(matches, Match{ File: filePath, Line: lineNum, Content: scanner.Text(), }) } lineNum++ } return nil }) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, matches) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/get_file_info.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "fmt" "net/http" "os" "strconv" "syscall" "github.com/gin-gonic/gin" ) // GetFileInfo godoc // // @Summary Get file information // @Description Get detailed information about a file or directory // @Tags file-system // @Produce json // @Param path query string true "File or directory path" // @Success 200 {object} FileInfo // @Router /files/info [get] // // @id GetFileInfo func GetFileInfo(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } info, err := getFileInfo(path) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, info) } func getFileInfo(path string) (FileInfo, error) { info, err := os.Stat(path) if err != nil { return FileInfo{}, err } stat := info.Sys().(*syscall.Stat_t) return FileInfo{ Name: info.Name(), Size: info.Size(), Mode: info.Mode().String(), ModTime: info.ModTime().String(), IsDir: info.IsDir(), Owner: strconv.FormatUint(uint64(stat.Uid), 10), Group: strconv.FormatUint(uint64(stat.Gid), 10), Permissions: fmt.Sprintf("%04o", info.Mode().Perm()), }, nil } ================================================ FILE: apps/daemon/pkg/toolbox/fs/list_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "net/http" "os" "path/filepath" "github.com/gin-gonic/gin" ) // ListFiles godoc // // @Summary List files and directories // @Description List files and directories in the specified path // @Tags file-system // @Produce json // @Param path query string false "Directory path to list (defaults to working directory)" // @Success 200 {array} FileInfo // @Router /files [get] // // @id ListFiles func ListFiles(c *gin.Context) { path := c.Query("path") if path == "" { path = "." } files, err := os.ReadDir(path) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } var fileInfos = make([]FileInfo, 0) for _, file := range files { info, err := getFileInfo(filepath.Join(path, file.Name())) if err != nil { continue } fileInfos = append(fileInfos, info) } c.JSON(http.StatusOK, fileInfos) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/move_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "bufio" "errors" "fmt" "net/http" "os" "path/filepath" "github.com/gin-gonic/gin" ) // MoveFile godoc // // @Summary Move or rename file/directory // @Description Move or rename a file or directory from source to destination // @Tags file-system // @Param source query string true "Source file or directory path" // @Param destination query string true "Destination file or directory path" // @Success 200 // @Router /files/move [post] // // @id MoveFile func MoveFile(c *gin.Context) { sourcePath := c.Query("source") destPath := c.Query("destination") if sourcePath == "" || destPath == "" { c.AbortWithError(http.StatusBadRequest, errors.New("source and destination paths are required")) return } // Get absolute paths absSourcePath, err := filepath.Abs(sourcePath) if err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid source path")) return } absDestPath, err := filepath.Abs(destPath) if err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid destination path")) return } // Check if source exists sourceInfo, err := os.Stat(absSourcePath) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } // Check if destination parent directory exists destDir := filepath.Dir(absDestPath) _, err = os.Stat(destDir) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } // Check if destination already exists if _, err := os.Stat(absDestPath); err == nil { c.AbortWithError(http.StatusConflict, errors.New("destination already exists")) return } // Perform the move operation err = os.Rename(absSourcePath, absDestPath) if err != nil { // If rename fails (e.g., across different devices), try copy and delete if err := copyFile(absSourcePath, absDestPath, sourceInfo); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("failed to move file: %w", err)) return } // If copy successful, delete the source if err := os.RemoveAll(absSourcePath); err != nil { // If delete fails, inform that the file was copied but not deleted c.JSON(http.StatusOK, gin.H{ "message": "file copied successfully but source could not be deleted", "error": fmt.Sprintf("failed to delete source: %v", err), }) return } } c.Status(http.StatusOK) } func copyFile(src, dst string, srcInfo os.FileInfo) error { if srcInfo.IsDir() { return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { if err != nil { return err } // Create relative path relPath, err := filepath.Rel(src, path) if err != nil { return err } targetPath := filepath.Join(dst, relPath) if info.IsDir() { return os.MkdirAll(targetPath, info.Mode()) } // Copy the file return copyFileContents(path, targetPath, info.Mode()) }) } return copyFileContents(src, dst, srcInfo.Mode()) } func copyFileContents(src, dst string, mode os.FileMode) error { in, err := os.Open(src) if err != nil { return err } defer in.Close() out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return err } defer out.Close() _, err = bufio.NewReader(in).WriteTo(out) return err } ================================================ FILE: apps/daemon/pkg/toolbox/fs/replace_in_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "fmt" "net/http" "os" "strings" "github.com/gin-gonic/gin" ) // ReplaceInFiles godoc // // @Summary Replace text in files // @Description Replace text pattern with new value in multiple files // @Tags file-system // @Accept json // @Produce json // @Param request body ReplaceRequest true "Replace request" // @Success 200 {array} ReplaceResult // @Router /files/replace [post] // // @id ReplaceInFiles func ReplaceInFiles(c *gin.Context) { var req ReplaceRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } results := make([]ReplaceResult, 0, len(req.Files)) for _, filePath := range req.Files { content, err := os.ReadFile(filePath) if err != nil { results = append(results, ReplaceResult{ File: filePath, Success: false, Error: err.Error(), }) continue } newValue := "" if req.NewValue != nil { newValue = *req.NewValue } newContent := strings.ReplaceAll(string(content), req.Pattern, newValue) err = os.WriteFile(filePath, []byte(newContent), 0644) if err != nil { results = append(results, ReplaceResult{ File: filePath, Success: false, Error: err.Error(), }) continue } results = append(results, ReplaceResult{ File: filePath, Success: true, }) } c.JSON(http.StatusOK, results) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/search_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "net/http" "os" "path/filepath" "github.com/gin-gonic/gin" ) // SearchFiles godoc // // @Summary Search files by pattern // @Description Search for files matching a specific pattern in a directory // @Tags file-system // @Produce json // @Param path query string true "Directory path to search in" // @Param pattern query string true "File pattern to match (e.g., *.txt, *.go)" // @Success 200 {object} SearchFilesResponse // @Router /files/search [get] // // @id SearchFiles func SearchFiles(c *gin.Context) { path := c.Query("path") pattern := c.Query("pattern") if path == "" || pattern == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path and pattern are required")) return } var matches []string err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { if err != nil { return filepath.SkipDir } if matched, _ := filepath.Match(pattern, info.Name()); matched { matches = append(matches, path) } return nil }) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, SearchFilesResponse{ Files: matches, }) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/set_file_permissions.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "fmt" "net/http" "os" "os/user" "path/filepath" "strconv" "github.com/gin-gonic/gin" ) // SetFilePermissions godoc // // @Summary Set file permissions // @Description Set file permissions, ownership, and group for a file or directory // @Tags file-system // @Param path query string true "File or directory path" // @Param owner query string false "Owner (username or UID)" // @Param group query string false "Group (group name or GID)" // @Param mode query string false "File mode in octal format (e.g., 0755)" // @Success 200 // @Router /files/permissions [post] // // @id SetFilePermissions func SetFilePermissions(c *gin.Context) { path := c.Query("path") ownerParam := c.Query("owner") groupParam := c.Query("group") mode := c.Query("mode") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } // convert to absolute path and check existence absPath, err := filepath.Abs(path) if err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid path")) return } _, err = os.Stat(absPath) if err != nil { if os.IsNotExist(err) { c.AbortWithError(http.StatusNotFound, err) return } if os.IsPermission(err) { c.AbortWithError(http.StatusForbidden, err) return } c.AbortWithError(http.StatusBadRequest, err) return } // handle mode change if mode != "" { modeNum, err := strconv.ParseUint(mode, 8, 32) if err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid mode format")) return } if err := os.Chmod(absPath, os.FileMode(modeNum)); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("failed to change mode: %w", err)) return } } // handle ownership change if ownerParam != "" || groupParam != "" { uid := -1 gid := -1 // resolve owner if ownerParam != "" { // first try as numeric UID if uidNum, err := strconv.Atoi(ownerParam); err == nil { uid = uidNum } else { // try as username if u, err := user.Lookup(ownerParam); err == nil { if uid, err = strconv.Atoi(u.Uid); err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid user ID")) return } } else { c.AbortWithError(http.StatusBadRequest, errors.New("user not found")) return } } } // resolve group if groupParam != "" { // first try as numeric GID if gidNum, err := strconv.Atoi(groupParam); err == nil { gid = gidNum } else { // try as group name if g, err := user.LookupGroup(groupParam); err == nil { if gid, err = strconv.Atoi(g.Gid); err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid group ID")) return } } else { c.AbortWithError(http.StatusBadRequest, errors.New("group not found")) return } } } if err := os.Chown(absPath, uid, gid); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("failed to change ownership: %w", err)) return } } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs type FileInfo struct { Name string `json:"name" validate:"required"` Size int64 `json:"size" validate:"required"` Mode string `json:"mode" validate:"required"` ModTime string `json:"modTime" validate:"required"` IsDir bool `json:"isDir" validate:"required"` Owner string `json:"owner" validate:"required"` Group string `json:"group" validate:"required"` Permissions string `json:"permissions" validate:"required"` } // @name FileInfo type ReplaceRequest struct { Files []string `json:"files" validate:"required"` Pattern string `json:"pattern" validate:"required"` NewValue *string `json:"newValue" validate:"required"` } // @name ReplaceRequest type ReplaceResult struct { File string `json:"file"` Success bool `json:"success"` Error string `json:"error,omitempty"` } // @name ReplaceResult type Match struct { File string `json:"file" validate:"required"` Line int `json:"line" validate:"required"` Content string `json:"content" validate:"required"` } // @name Match type SearchFilesResponse struct { Files []string `json:"files" validate:"required"` } // @name SearchFilesResponse type FilesDownloadRequest struct { Paths []string `json:"paths" validate:"required"` } // @name FilesDownloadRequest ================================================ FILE: apps/daemon/pkg/toolbox/fs/upload_file.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "errors" "net/http" "github.com/gin-gonic/gin" ) // UploadFile godoc // // @Summary Upload a file // @Description Upload a file to the specified path // @Tags file-system // @Accept multipart/form-data // @Param path query string true "Destination path for the uploaded file" // @Param file formData file true "File to upload" // @Success 200 {object} gin.H // @Router /files/upload [post] // // @id UploadFile func UploadFile(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } file, err := c.FormFile("file") if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } if err := c.SaveUploadedFile(file, path); err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/fs/upload_files.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package fs import ( "fmt" "io" "net/http" "os" "path/filepath" "strings" "github.com/gin-gonic/gin" ) // UploadFiles godoc // // @Summary Upload multiple files // @Description Upload multiple files with their destination paths // @Tags file-system // @Accept multipart/form-data // @Success 200 // @Router /files/bulk-upload [post] // // @id UploadFiles func UploadFiles(c *gin.Context) { reader, err := c.Request.MultipartReader() if err != nil { c.JSON(http.StatusBadRequest, gin.H{"errors": []string{"invalid multipart form"}}) return } dests := make(map[string]string) var errs []string for { part, err := reader.NextPart() if err == io.EOF { break } if err != nil { errs = append(errs, fmt.Sprintf("reading part: %v", err)) continue } name := part.FormName() if strings.HasSuffix(name, ".path") { data, err := io.ReadAll(part) if err != nil { idx := extractIndex(name) errs = append(errs, fmt.Sprintf("path[%s]: %v", idx, err)) continue } idx := extractIndex(name) dests[idx] = string(data) continue } if strings.HasSuffix(name, ".file") { idx := extractIndex(name) dest, ok := dests[idx] if !ok { errs = append(errs, fmt.Sprintf("file[%s]: missing .path metadata", idx)) continue } if d := filepath.Dir(dest); d != "" { if err := os.MkdirAll(d, 0o755); err != nil { errs = append(errs, fmt.Sprintf("%s: mkdir %s: %v", dest, d, err)) continue } } f, err := os.Create(dest) if err != nil { errs = append(errs, fmt.Sprintf("%s: create: %v", dest, err)) continue } if _, err := io.Copy(f, part); err != nil { errs = append(errs, fmt.Sprintf("%s: write: %v", dest, err)) } f.Close() continue } } if len(errs) > 0 { c.JSON(http.StatusBadRequest, gin.H{"errors": errs}) return } c.Status(http.StatusOK) } func extractIndex(fieldName string) string { s := strings.TrimPrefix(fieldName, "files[") return strings.TrimSuffix(strings.TrimSuffix(s, "].path"), "].file") } ================================================ FILE: apps/daemon/pkg/toolbox/git/add.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // AddFiles godoc // // @Summary Add files to Git staging // @Description Add files to the Git staging area // @Tags git // @Accept json // @Produce json // @Param request body GitAddRequest true "Add files request" // @Success 200 // @Router /git/add [post] // // @id AddFiles func AddFiles(c *gin.Context) { var req GitAddRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } gitService := git.Service{ WorkDir: req.Path, } if err := gitService.Add(req.Files); err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/git/checkout.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // CheckoutBranch godoc // // @Summary Checkout branch or commit // @Description Switch to a different branch or commit in the Git repository // @Tags git // @Accept json // @Produce json // @Param request body GitCheckoutRequest true "Checkout request" // @Success 200 // @Router /git/checkout [post] // // @id CheckoutBranch func CheckoutBranch(c *gin.Context) { var req GitCheckoutRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } gitService := git.Service{ WorkDir: req.Path, } if err := gitService.Checkout(req.Branch); err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/git/clone_repository.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/daytonaio/daemon/pkg/gitprovider" "github.com/gin-gonic/gin" go_git_http "github.com/go-git/go-git/v5/plumbing/transport/http" ) // CloneRepository godoc // // @Summary Clone a Git repository // @Description Clone a Git repository to the specified path // @Tags git // @Accept json // @Produce json // @Param request body GitCloneRequest true "Clone repository request" // @Success 200 // @Router /git/clone [post] // // @id CloneRepository func CloneRepository(c *gin.Context) { var req GitCloneRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } branch := "" if req.Branch != nil { branch = *req.Branch } repo := gitprovider.GitRepository{ Url: req.URL, Branch: branch, } if req.CommitID != nil { repo.Target = gitprovider.CloneTargetCommit repo.Sha = *req.CommitID } gitService := git.Service{ WorkDir: req.Path, } var auth *go_git_http.BasicAuth // Set authentication if provided if req.Username != nil && req.Password != nil { auth = &go_git_http.BasicAuth{ Username: *req.Username, Password: *req.Password, } } err := gitService.CloneRepository(&repo, auth) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/git/commit.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "time" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" go_git "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/plumbing/object" ) // CommitChanges godoc // // @Summary Commit changes // @Description Commit staged changes to the Git repository // @Tags git // @Accept json // @Produce json // @Param request body GitCommitRequest true "Commit request" // @Success 200 {object} GitCommitResponse // @Router /git/commit [post] // // @id CommitChanges func CommitChanges(c *gin.Context) { var req GitCommitRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } gitService := git.Service{ WorkDir: req.Path, } commitSha, err := gitService.Commit(req.Message, &go_git.CommitOptions{ Author: &object.Signature{ Name: req.Author, Email: req.Email, When: time.Now(), }, AllowEmptyCommits: req.AllowEmpty, }) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, GitCommitResponse{ Hash: commitSha, }) } ================================================ FILE: apps/daemon/pkg/toolbox/git/create_branch.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // CreateBranch godoc // // @Summary Create a new branch // @Description Create a new branch in the Git repository // @Tags git // @Accept json // @Produce json // @Param request body GitBranchRequest true "Create branch request" // @Success 201 // @Router /git/branches [post] // // @id CreateBranch func CreateBranch(c *gin.Context) { var req GitBranchRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } gitService := git.Service{ WorkDir: req.Path, } if err := gitService.CreateBranch(req.Name); err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusCreated) } ================================================ FILE: apps/daemon/pkg/toolbox/git/delete_branch.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // DeleteBranch godoc // // @Summary Delete a branch // @Description Delete a branch from the Git repository // @Tags git // @Accept json // @Produce json // @Param request body GitDeleteBranchRequest true "Delete branch request" // @Success 204 // @Router /git/branches [delete] // // @id DeleteBranch func DeleteBranch(c *gin.Context) { var req GitDeleteBranchRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } gitService := git.Service{ WorkDir: req.Path, } if err := gitService.DeleteBranch(req.Name); err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusNoContent) } ================================================ FILE: apps/daemon/pkg/toolbox/git/history.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "errors" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // GetCommitHistory godoc // // @Summary Get commit history // @Description Get the commit history of the Git repository // @Tags git // @Produce json // @Param path query string true "Repository path" // @Success 200 {array} git.GitCommitInfo // @Router /git/history [get] // // @id GetCommitHistory func GetCommitHistory(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } gitService := git.Service{ WorkDir: path, } log, err := gitService.Log() if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, log) } ================================================ FILE: apps/daemon/pkg/toolbox/git/list_branches.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "errors" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // ListBranches godoc // // @Summary List branches // @Description Get a list of all branches in the Git repository // @Tags git // @Produce json // @Param path query string true "Repository path" // @Success 200 {object} ListBranchResponse // @Router /git/branches [get] // // @id ListBranches func ListBranches(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } gitService := git.Service{ WorkDir: path, } branchList, err := gitService.ListBranches() if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, ListBranchResponse{ Branches: branchList, }) } ================================================ FILE: apps/daemon/pkg/toolbox/git/pull.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" go_git "github.com/go-git/go-git/v5" go_git_http "github.com/go-git/go-git/v5/plumbing/transport/http" ) // PullChanges godoc // // @Summary Pull changes from remote // @Description Pull changes from the remote Git repository // @Tags git // @Accept json // @Produce json // @Param request body GitRepoRequest true "Pull request" // @Success 200 // @Router /git/pull [post] // // @id PullChanges func PullChanges(c *gin.Context) { var req GitRepoRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } var auth *go_git_http.BasicAuth if req.Username != nil && req.Password != nil { auth = &go_git_http.BasicAuth{ Username: *req.Username, Password: *req.Password, } } gitService := git.Service{ WorkDir: req.Path, } err := gitService.Pull(auth) if err != nil && err != go_git.NoErrAlreadyUpToDate { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/git/push.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "fmt" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" go_git "github.com/go-git/go-git/v5" go_git_http "github.com/go-git/go-git/v5/plumbing/transport/http" ) // PushChanges godoc // // @Summary Push changes to remote // @Description Push local changes to the remote Git repository // @Tags git // @Accept json // @Produce json // @Param request body GitRepoRequest true "Push request" // @Success 200 // @Router /git/push [post] // // @id PushChanges func PushChanges(c *gin.Context) { var req GitRepoRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } var auth *go_git_http.BasicAuth if req.Username != nil && req.Password != nil { auth = &go_git_http.BasicAuth{ Username: *req.Username, Password: *req.Password, } } gitService := git.Service{ WorkDir: req.Path, } err := gitService.Push(auth) if err != nil && err != go_git.NoErrAlreadyUpToDate { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } ================================================ FILE: apps/daemon/pkg/toolbox/git/status.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git import ( "errors" "net/http" "github.com/daytonaio/daemon/pkg/git" "github.com/gin-gonic/gin" ) // GetStatus godoc // // @Summary Get Git status // @Description Get the Git status of the repository at the specified path // @Tags git // @Produce json // @Param path query string true "Repository path" // @Success 200 {object} git.GitStatus // @Router /git/status [get] // // @id GetStatus func GetStatus(c *gin.Context) { path := c.Query("path") if path == "" { c.AbortWithError(http.StatusBadRequest, errors.New("path is required")) return } gitService := git.Service{ WorkDir: path, } status, err := gitService.GetGitStatus() if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, status) } ================================================ FILE: apps/daemon/pkg/toolbox/git/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package git type GitAddRequest struct { Path string `json:"path" validate:"required"` // files to add (use . for all files) Files []string `json:"files" validate:"required"` } // @name GitAddRequest type GitCloneRequest struct { URL string `json:"url" validate:"required"` Path string `json:"path" validate:"required"` Username *string `json:"username,omitempty" validate:"optional"` Password *string `json:"password,omitempty" validate:"optional"` Branch *string `json:"branch,omitempty" validate:"optional"` CommitID *string `json:"commit_id,omitempty" validate:"optional"` } // @name GitCloneRequest type GitCommitRequest struct { Path string `json:"path" validate:"required"` Message string `json:"message" validate:"required"` Author string `json:"author" validate:"required"` Email string `json:"email" validate:"required"` AllowEmpty bool `json:"allow_empty,omitempty"` } // @name GitCommitRequest type GitCommitResponse struct { Hash string `json:"hash" validate:"required"` } // @name GitCommitResponse type GitBranchRequest struct { Path string `json:"path" validate:"required"` Name string `json:"name" validate:"required"` } // @name GitBranchRequest type GitDeleteBranchRequest struct { Path string `json:"path" validate:"required"` Name string `json:"name" validate:"required"` } type ListBranchResponse struct { Branches []string `json:"branches" validate:"required"` } // @name ListBranchResponse type GitRepoRequest struct { Path string `json:"path" validate:"required"` Username *string `json:"username,omitempty" validate:"optional"` Password *string `json:"password,omitempty" validate:"optional"` } // @name GitRepoRequest type GitCheckoutRequest struct { Path string `json:"path" validate:"required"` Branch string `json:"branch" validate:"required"` } // @name GitCheckoutRequest ================================================ FILE: apps/daemon/pkg/toolbox/lsp/client.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp import ( "context" "fmt" "io" "os" "os/exec" "strings" "github.com/sourcegraph/jsonrpc2" ) type Client struct { conn *jsonrpc2.Conn } type InitializeParams struct { ProcessID int `json:"processId"` ClientInfo ClientInfo `json:"clientInfo"` RootURI string `json:"rootUri"` Capabilities ClientCapabilities `json:"capabilities"` } type ClientInfo struct { Name string `json:"name"` Version string `json:"version"` } type ClientCapabilities struct { TextDocument TextDocumentClientCapabilities `json:"textDocument"` Workspace WorkspaceClientCapabilities `json:"workspace"` } type TextDocumentClientCapabilities struct { Completion CompletionClientCapabilities `json:"completion"` DocumentSymbol DocumentSymbolClientCapabilities `json:"documentSymbol"` } type CompletionClientCapabilities struct { DynamicRegistration bool `json:"dynamicRegistration"` CompletionItem CompletionItemCapabilities `json:"completionItem"` ContextSupport bool `json:"contextSupport"` } type CompletionItemCapabilities struct { SnippetSupport bool `json:"snippetSupport"` CommitCharactersSupport bool `json:"commitCharactersSupport"` DocumentationFormat []string `json:"documentationFormat"` DeprecatedSupport bool `json:"deprecatedSupport"` PreselectSupport bool `json:"preselectSupport"` } type DocumentSymbolClientCapabilities struct { DynamicRegistration bool `json:"dynamicRegistration"` SymbolKind SymbolKindInfo `json:"symbolKind"` } type SymbolKindInfo struct { ValueSet []int `json:"valueSet"` } type WorkspaceClientCapabilities struct { Symbol WorkspaceSymbolClientCapabilities `json:"symbol"` } type WorkspaceSymbolClientCapabilities struct { DynamicRegistration bool `json:"dynamicRegistration"` } type StdioStream struct { cmd *exec.Cmd in io.WriteCloser out io.ReadCloser } type Range struct { Start LspPosition `json:"start" validate:"required"` End LspPosition `json:"end" validate:"required"` } // @name Range type TextDocumentIdentifier struct { URI string `json:"uri" validate:"required"` } type VersionedTextDocumentIdentifier struct { URI string `json:"uri" validate:"required"` Version int `json:"version" validate:"required"` } // @name VersionedTextDocumentIdentifier type CompletionParams struct { TextDocument TextDocumentIdentifier `json:"textDocument" validate:"required"` Position LspPosition `json:"position" validate:"required"` Context *CompletionContext `json:"context,omitempty" validate:"optional"` } // @name CompletionParams type CompletionContext struct { TriggerKind int `json:"triggerKind" validate:"required"` TriggerCharacter *string `json:"triggerCharacter,omitempty" validate:"optional"` } // @name CompletionContext type CompletionItem struct { Label string `json:"label" validate:"required"` Kind *int `json:"kind,omitempty" validate:"optional"` Detail *string `json:"detail,omitempty" validate:"optional"` Documentation interface{} `json:"documentation,omitempty" validate:"optional"` SortText *string `json:"sortText,omitempty" validate:"optional"` FilterText *string `json:"filterText,omitempty" validate:"optional"` InsertText *string `json:"insertText,omitempty" validate:"optional"` } // @name CompletionItem type CompletionList struct { IsIncomplete bool `json:"isIncomplete" validate:"required"` Items []CompletionItem `json:"items" validate:"required"` } // @name CompletionList type LspSymbol struct { Kind int `json:"kind" validate:"required"` Location LspLocation `json:"location" validate:"required"` Name string `json:"name" validate:"required"` } // @name LspSymbol type LspLocation struct { Range LspRange `json:"range" validate:"required"` URI string `json:"uri" validate:"required"` } // @name LspLocation type LspRange struct { End LspPosition `json:"end" validate:"required"` Start LspPosition `json:"start" validate:"required"` } // @name LspRange type LspPosition struct { Character int `json:"character" validate:"required"` Line int `json:"line" validate:"required"` } // @name LspPosition type WorkspaceSymbolParams struct { Query string `json:"query" validate:"required"` } // @name WorkspaceSymbolParams func (s *StdioStream) Read(p []byte) (n int, err error) { return s.out.Read(p) } func (s *StdioStream) Write(p []byte) (n int, err error) { return s.in.Write(p) } func (s *StdioStream) Close() error { if err := s.in.Close(); err != nil { return err } return s.out.Close() } func NewStdioStream(cmd *exec.Cmd) (*StdioStream, error) { stdin, err := cmd.StdinPipe() if err != nil { return nil, err } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } return &StdioStream{ cmd: cmd, in: stdin, out: stdout, }, nil } func (c *Client) NotifyDidClose(ctx context.Context, uri string) error { params := map[string]interface{}{ "textDocument": map[string]interface{}{ "uri": uri, }, } return c.conn.Notify(ctx, "textDocument/didClose", params) } func (c *Client) GetWorkspaceSymbols(ctx context.Context, query string) ([]LspSymbol, error) { params := map[string]interface{}{ "query": query, } var symbols []LspSymbol err := c.conn.Call(ctx, "workspace/symbol", params, &symbols) return symbols, err } func (c *Client) GetCompletion(ctx context.Context, uri string, position LspPosition, context *CompletionContext) (*CompletionList, error) { params := CompletionParams{ TextDocument: TextDocumentIdentifier{ URI: uri, }, Position: position, Context: context, } var result interface{} if err := c.conn.Call(ctx, "textDocument/completion", params, &result); err != nil { return nil, err } // Handle both possible response types: CompletionList or []CompletionItem var completionList CompletionList switch v := result.(type) { case map[string]interface{}: // It's a CompletionList if items, ok := v["items"].([]interface{}); ok { completionItems := make([]CompletionItem, 0, len(items)) for _, item := range items { if itemMap, ok := item.(map[string]interface{}); ok { completionItems = append(completionItems, parseCompletionItem(itemMap)) } } completionList.Items = completionItems completionList.IsIncomplete = v["isIncomplete"].(bool) } case []interface{}: // It's an array of CompletionItems completionItems := make([]CompletionItem, 0, len(v)) for _, item := range v { if itemMap, ok := item.(map[string]interface{}); ok { completionItems = append(completionItems, parseCompletionItem(itemMap)) } } completionList.Items = completionItems } return &completionList, nil } func (c *Client) DidOpen(ctx context.Context, uri string, languageId string) error { path := strings.TrimPrefix(uri, "file://") content, err := os.ReadFile(path) if err != nil { return fmt.Errorf("failed to read file: %w", err) } params := map[string]interface{}{ "textDocument": map[string]interface{}{ "uri": uri, "languageId": languageId, "version": 1, "text": string(content), }, } return c.conn.Notify(ctx, "textDocument/didOpen", params) } func (c *Client) GetDocumentSymbols(ctx context.Context, uri string) ([]LspSymbol, error) { params := map[string]interface{}{ "textDocument": map[string]interface{}{ "uri": uri, }, } var symbols []LspSymbol err := c.conn.Call(ctx, "textDocument/documentSymbol", params, &symbols) return symbols, err } func parseCompletionItem(item map[string]interface{}) CompletionItem { ci := CompletionItem{ Label: item["label"].(string), } if kind, ok := item["kind"].(float64); ok { k := int(kind) ci.Kind = &k } if detail, ok := item["detail"].(string); ok { ci.Detail = &detail } if sortText, ok := item["sortText"].(string); ok { ci.SortText = &sortText } if filterText, ok := item["filterText"].(string); ok { ci.FilterText = &filterText } if insertText, ok := item["insertText"].(string); ok { ci.InsertText = &insertText } ci.Documentation = item["documentation"] return ci } func (c *Client) Initialize(ctx context.Context, params InitializeParams) error { var result interface{} if err := c.conn.Call(ctx, "initialize", params, &result); err != nil { return err } return c.conn.Notify(ctx, "initialized", nil) } func (c *Client) Shutdown(ctx context.Context) error { return c.conn.Notify(ctx, "shutdown", nil) } ================================================ FILE: apps/daemon/pkg/toolbox/lsp/lsp.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp import ( "errors" "fmt" "log/slog" "net/http" "github.com/gin-gonic/gin" ) // Start godoc // // @Summary Start LSP server // @Description Start a Language Server Protocol server for the specified language // @Tags lsp // @Accept json // @Produce json // @Param request body LspServerRequest true "LSP server request" // @Success 200 // @Router /lsp/start [post] // // @id Start func Start(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { var req LspServerRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } service := GetLSPService(logger) err := service.Start(req.LanguageId, req.PathToProject) if err != nil { logger.Error("error starting LSP server", "error", err) c.AbortWithError(http.StatusInternalServerError, errors.New("error starting LSP server")) return } c.Status(http.StatusOK) } } // Stop godoc // // @Summary Stop LSP server // @Description Stop a Language Server Protocol server // @Tags lsp // @Accept json // @Produce json // @Param request body LspServerRequest true "LSP server request" // @Success 200 // @Router /lsp/stop [post] // // @id Stop func Stop(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { var req LspServerRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } service := GetLSPService(logger) err := service.Shutdown(req.LanguageId, req.PathToProject) if err != nil { logger.Error("error stopping LSP server", "error", err) c.AbortWithError(http.StatusInternalServerError, errors.New("error stopping LSP server")) return } c.Status(http.StatusOK) } } // DidOpen godoc // // @Summary Notify document opened // @Description Notify the LSP server that a document has been opened // @Tags lsp // @Accept json // @Produce json // @Param request body LspDocumentRequest true "Document request" // @Success 200 // @Router /lsp/did-open [post] // // @id DidOpen func DidOpen(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { var req LspDocumentRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } service := GetLSPService(logger) server, err := service.Get(req.LanguageId, req.PathToProject) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } if !server.IsInitialized() { c.AbortWithError(http.StatusBadRequest, errors.New("server not initialized")) return } err = server.HandleDidOpen(c.Request.Context(), req.Uri) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } } // DidClose godoc // // @Summary Notify document closed // @Description Notify the LSP server that a document has been closed // @Tags lsp // @Accept json // @Produce json // @Param request body LspDocumentRequest true "Document request" // @Success 200 // @Router /lsp/did-close [post] // // @id DidClose func DidClose(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { var req LspDocumentRequest if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } service := GetLSPService(logger) server, err := service.Get(req.LanguageId, req.PathToProject) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } if !server.IsInitialized() { c.AbortWithError(http.StatusBadRequest, errors.New("server not initialized")) return } err = server.HandleDidClose(c.Request.Context(), req.Uri) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.Status(http.StatusOK) } } // Completions godoc // // @Summary Get code completions // @Description Get code completion suggestions from the LSP server // @Tags lsp // @Accept json // @Produce json // @Param request body LspCompletionParams true "Completion request" // @Success 200 {object} CompletionList // @Router /lsp/completions [post] // // @id Completions func Completions(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { var req LspCompletionParams if err := c.ShouldBindJSON(&req); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } service := GetLSPService(logger) server, err := service.Get(req.LanguageId, req.PathToProject) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } if !server.IsInitialized() { c.AbortWithError(http.StatusBadRequest, errors.New("server not initialized")) return } textDocument := TextDocumentIdentifier{ URI: req.Uri, } completionParams := CompletionParams{ TextDocument: textDocument, Position: req.Position, Context: req.Context, } list, err := server.HandleCompletions(c.Request.Context(), completionParams) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, list) } } // DocumentSymbols godoc // // @Summary Get document symbols // @Description Get symbols (functions, classes, etc.) from a document // @Tags lsp // @Produce json // @Param languageId query string true "Language ID (e.g., python, typescript)" // @Param pathToProject query string true "Path to project" // @Param uri query string true "Document URI" // @Success 200 {array} LspSymbol // @Router /lsp/document-symbols [get] // // @id DocumentSymbols func DocumentSymbols(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { languageId := c.Query("languageId") if languageId == "" { c.AbortWithError(http.StatusBadRequest, errors.New("languageId is required")) return } pathToProject := c.Query("pathToProject") if pathToProject == "" { c.AbortWithError(http.StatusBadRequest, errors.New("pathToProject is required")) return } uri := c.Query("uri") if uri == "" { c.AbortWithError(http.StatusBadRequest, errors.New("uri is required")) return } service := GetLSPService(logger) server, err := service.Get(languageId, pathToProject) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } if !server.IsInitialized() { c.AbortWithError(http.StatusBadRequest, errors.New("server not initialized")) return } symbols, err := server.HandleDocumentSymbols(c.Request.Context(), uri) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, symbols) } } // WorkspaceSymbols godoc // // @Summary Get workspace symbols // @Description Search for symbols across the entire workspace // @Tags lsp // @Produce json // @Param query query string true "Search query" // @Param languageId query string true "Language ID (e.g., python, typescript)" // @Param pathToProject query string true "Path to project" // @Success 200 {array} LspSymbol // @Router /lsp/workspacesymbols [get] // // @id WorkspaceSymbols func WorkspaceSymbols(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { query := c.Query("query") if query == "" { c.AbortWithError(http.StatusBadRequest, errors.New("query is required")) return } languageId := c.Query("languageId") if languageId == "" { c.AbortWithError(http.StatusBadRequest, errors.New("languageId is required")) return } pathToProject := c.Query("pathToProject") if pathToProject == "" { c.AbortWithError(http.StatusBadRequest, errors.New("pathToProject is required")) return } service := GetLSPService(logger) server, err := service.Get(languageId, pathToProject) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } if !server.IsInitialized() { c.AbortWithError(http.StatusBadRequest, errors.New("server not initialized")) return } symbols, err := server.HandleWorkspaceSymbols(c.Request.Context(), query) if err != nil { c.AbortWithError(http.StatusBadRequest, err) return } c.JSON(http.StatusOK, symbols) } } ================================================ FILE: apps/daemon/pkg/toolbox/lsp/python_lsp.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp import ( "context" "fmt" "log/slog" "os" "os/exec" "github.com/sourcegraph/jsonrpc2" ) type PythonLSPServer struct { *LSPServerAbstract } func (s *PythonLSPServer) Initialize(pathToProject string) error { ctx := context.Background() cmd := exec.Command("pylsp") stream, err := NewStdioStream(cmd) if err != nil { return fmt.Errorf("failed to create stdio stream: %w", err) } if err := cmd.Start(); err != nil { return fmt.Errorf("failed to start Python LSP server: %w", err) } handler := jsonrpc2.HandlerWithError(func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (interface{}, error) { s.logger.Debug("Received request", "method", req.Method) if req.Params != nil { s.logger.Debug("Request params", "params", req.Params) } return nil, nil }) conn := jsonrpc2.NewConn(ctx, jsonrpc2.NewBufferedStream(stream, jsonrpc2.VSCodeObjectCodec{}), handler) client := &Client{conn: conn} params := InitializeParams{ ProcessID: os.Getpid(), ClientInfo: ClientInfo{ Name: "datyona-python-lsp-client", Version: "0.0.1", }, RootURI: "file://" + pathToProject, Capabilities: ClientCapabilities{ TextDocument: TextDocumentClientCapabilities{ Completion: CompletionClientCapabilities{ DynamicRegistration: true, CompletionItem: CompletionItemCapabilities{ SnippetSupport: true, CommitCharactersSupport: true, DocumentationFormat: []string{"markdown", "plaintext"}, DeprecatedSupport: true, PreselectSupport: true, }, ContextSupport: true, }, DocumentSymbol: DocumentSymbolClientCapabilities{ DynamicRegistration: true, SymbolKind: SymbolKindInfo{ ValueSet: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, }, }, }, Workspace: WorkspaceClientCapabilities{ Symbol: WorkspaceSymbolClientCapabilities{ DynamicRegistration: true, }, }, }, } if err := client.Initialize(ctx, params); err != nil { conn.Close() killerr := cmd.Process.Kill() if killerr != nil { return fmt.Errorf("failed to initialize Python LSP connection: %w, failed to kill process: %w", err, killerr) } return fmt.Errorf("failed to initialize Python LSP connection: %w", err) } s.client = client s.initialized = true return nil } func (s *PythonLSPServer) Shutdown() error { err := s.client.Shutdown(context.Background()) if err != nil { return fmt.Errorf("failed to shutdown Python LSP server: %w", err) } s.initialized = false return nil } func NewPythonLSPServer(logger *slog.Logger) *PythonLSPServer { return &PythonLSPServer{ LSPServerAbstract: &LSPServerAbstract{ languageId: "python", logger: logger.With(slog.String("component", "python_lsp_server")), }, } } ================================================ FILE: apps/daemon/pkg/toolbox/lsp/server.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp import ( "context" "log/slog" ) type LSPServer interface { Initialize(pathToProject string) error IsInitialized() bool Shutdown() error HandleDidOpen(ctx context.Context, uri string) error HandleDidClose(ctx context.Context, uri string) error HandleCompletions(ctx context.Context, params CompletionParams) (*CompletionList, error) HandleDocumentSymbols(ctx context.Context, uri string) ([]LspSymbol, error) HandleWorkspaceSymbols(ctx context.Context, query string) ([]LspSymbol, error) } type LSPServerAbstract struct { client *Client logger *slog.Logger languageId string initialized bool } // Add new request types type WorkspaceSymbolRequest struct { Query string `json:"query"` } func (s *LSPServerAbstract) IsInitialized() bool { return s.initialized } func (s *LSPServerAbstract) HandleDidOpen(ctx context.Context, uri string) error { if err := s.client.DidOpen(ctx, uri, s.languageId); err != nil { return err } return nil } func (s *LSPServerAbstract) HandleDidClose(ctx context.Context, uri string) error { if err := s.client.NotifyDidClose(ctx, uri); err != nil { return err } return nil } func (s *LSPServerAbstract) HandleCompletions(ctx context.Context, params CompletionParams) (*CompletionList, error) { completions, err := s.client.GetCompletion( ctx, params.TextDocument.URI, params.Position, params.Context, ) if err != nil { return nil, err } return completions, nil } func (s *LSPServerAbstract) HandleDocumentSymbols(ctx context.Context, uri string) ([]LspSymbol, error) { symbols, err := s.client.GetDocumentSymbols(ctx, uri) if err != nil { return nil, err } return symbols, nil } func (s *LSPServerAbstract) HandleWorkspaceSymbols(ctx context.Context, query string) ([]LspSymbol, error) { symbols, err := s.client.GetWorkspaceSymbols(ctx, query) if err != nil { return nil, err } return symbols, nil } ================================================ FILE: apps/daemon/pkg/toolbox/lsp/service.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp import ( "encoding/base64" "fmt" "log/slog" "sync" ) type LSPService struct { logger *slog.Logger servers map[string]LSPServer } var ( instance *LSPService once sync.Once ) func GetLSPService(logger *slog.Logger) *LSPService { once.Do(func() { instance = &LSPService{ logger: logger, servers: make(map[string]LSPServer), } }) return instance } func (s *LSPService) Get(languageId string, pathToProject string) (LSPServer, error) { key := generateKey(languageId, pathToProject) if server, ok := s.servers[key]; ok { return server, nil } switch languageId { case "typescript": server := NewTypeScriptLSPServer(s.logger) s.servers[key] = server return server, nil case "python": server := NewPythonLSPServer(s.logger) s.servers[key] = server return server, nil default: return nil, fmt.Errorf("unsupported language: %s", languageId) } } func (s *LSPService) Start(languageId string, pathToProject string) error { key := generateKey(languageId, pathToProject) server, ok := s.servers[key] if ok { if server.IsInitialized() { return nil } } else { newServer := NewTypeScriptLSPServer(s.logger) s.servers[key] = newServer server = newServer } err := server.Initialize(pathToProject) if err != nil { return fmt.Errorf("failed to create TypeScript LSP server: %w", err) } return nil } func (s *LSPService) Shutdown(languageId string, pathToProject string) error { key := generateKey(languageId, pathToProject) server, ok := s.servers[key] if !ok { return fmt.Errorf("no server for language: %s", languageId) } err := server.Shutdown() delete(s.servers, key) return err } func generateKey(languageId, pathToProject string) string { data := fmt.Sprintf("%s:%s", languageId, pathToProject) return base64.StdEncoding.EncodeToString([]byte(data)) } ================================================ FILE: apps/daemon/pkg/toolbox/lsp/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp type LspServerRequest struct { LanguageId string `json:"languageId" validate:"required"` PathToProject string `json:"pathToProject" validate:"required"` } // @name LspServerRequest type LspDocumentRequest struct { LanguageId string `json:"languageId" validate:"required"` PathToProject string `json:"pathToProject" validate:"required"` Uri string `json:"uri" validate:"required"` } // @name LspDocumentRequest type LspCompletionParams struct { LanguageId string `json:"languageId" validate:"required"` PathToProject string `json:"pathToProject" validate:"required"` Uri string `json:"uri" validate:"required"` Position LspPosition `json:"position" validate:"required"` Context *CompletionContext `json:"context,omitempty" validate:"optional"` } // @name LspCompletionParams ================================================ FILE: apps/daemon/pkg/toolbox/lsp/typescript_lsp.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package lsp import ( "context" "fmt" "log/slog" "os" "os/exec" "github.com/sourcegraph/jsonrpc2" ) type TypescriptLSPServer struct { *LSPServerAbstract } func (s *TypescriptLSPServer) Initialize(pathToProject string) error { ctx := context.Background() cmd := exec.Command("typescript-language-server", "--stdio") stream, err := NewStdioStream(cmd) if err != nil { return fmt.Errorf("failed to create stdio stream: %w", err) } if err := cmd.Start(); err != nil { return fmt.Errorf("failed to start LSP server: %w", err) } handler := jsonrpc2.HandlerWithError(func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (interface{}, error) { s.logger.Debug("Received request", "method", req.Method) if req.Params != nil { s.logger.Debug("Request params", "params", req.Params) } return nil, nil }) conn := jsonrpc2.NewConn(ctx, jsonrpc2.NewBufferedStream(stream, jsonrpc2.VSCodeObjectCodec{}), handler) client := &Client{conn: conn} params := InitializeParams{ ProcessID: os.Getpid(), ClientInfo: ClientInfo{ Name: "datyona-typescript-lsp-client", Version: "0.0.1", }, RootURI: "file://" + pathToProject, Capabilities: ClientCapabilities{ TextDocument: TextDocumentClientCapabilities{ Completion: CompletionClientCapabilities{ DynamicRegistration: true, CompletionItem: CompletionItemCapabilities{ SnippetSupport: true, CommitCharactersSupport: true, DocumentationFormat: []string{"markdown", "plaintext"}, DeprecatedSupport: true, PreselectSupport: true, }, ContextSupport: true, }, DocumentSymbol: DocumentSymbolClientCapabilities{ DynamicRegistration: true, SymbolKind: SymbolKindInfo{ ValueSet: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, }, }, }, Workspace: WorkspaceClientCapabilities{ Symbol: WorkspaceSymbolClientCapabilities{ DynamicRegistration: true, }, }, }, } if err := client.Initialize(ctx, params); err != nil { conn.Close() killerr := cmd.Process.Kill() if killerr != nil { return fmt.Errorf("failed to initialize Typescript LSP connection: %w, failed to kill process: %w", err, killerr) } return fmt.Errorf("failed to initialize Typescript LSP connection: %w", err) } s.client = client s.initialized = true return nil } func (s *TypescriptLSPServer) Shutdown() error { err := s.client.Shutdown(context.Background()) if err != nil { return fmt.Errorf("failed to shutdown Typescript LSP server: %w", err) } s.initialized = false return nil } func NewTypeScriptLSPServer(logger *slog.Logger) *TypescriptLSPServer { return &TypescriptLSPServer{ LSPServerAbstract: &LSPServerAbstract{ languageId: "typescript", logger: logger.With(slog.String("component", "typescript_lsp_server")), }, } } ================================================ FILE: apps/daemon/pkg/toolbox/middlewares/error.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package middlewares import ( "net/http" "time" "github.com/daytonaio/daemon/pkg/common" "github.com/gin-gonic/gin" ) func ErrorMiddleware() gin.HandlerFunc { return func(ctx *gin.Context) { ctx.Next() if len(ctx.Errors) > 0 { err := ctx.Errors.Last() statusCode := ctx.Writer.Status() errorResponse := common.ErrorResponse{ StatusCode: statusCode, Message: err.Error(), Code: http.StatusText(statusCode), Timestamp: time.Now(), Path: ctx.Request.URL.Path, Method: ctx.Request.Method, } ctx.Header("Content-Type", "application/json") ctx.AbortWithStatusJSON(statusCode, errorResponse) } } } ================================================ FILE: apps/daemon/pkg/toolbox/port/detector.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package port import ( "context" "errors" "fmt" "net" "net/http" "strconv" "time" "github.com/cakturk/go-netstat/netstat" "github.com/gin-gonic/gin" cmap "github.com/orcaman/concurrent-map/v2" ) type portsDetector struct { portMap cmap.ConcurrentMap[string, bool] } func NewPortsDetector() *portsDetector { return &portsDetector{ portMap: cmap.New[bool](), } } func (d *portsDetector) Start(ctx context.Context) { for { select { case <-ctx.Done(): return default: time.Sleep(1 * time.Second) // get only listening TCP sockets tabs, err := netstat.TCPSocks(func(s *netstat.SockTabEntry) bool { return s.State == netstat.Listen }) if err != nil { continue } freshMap := map[string]bool{} for _, e := range tabs { s := strconv.Itoa(int(e.LocalAddr.Port)) freshMap[s] = true d.portMap.Set(s, true) } for _, port := range d.portMap.Keys() { if !freshMap[port] { d.portMap.Remove(port) } } } } } // GetPorts godoc // // @Summary Get active ports // @Description Get a list of all currently active ports // @Tags port // @Produce json // @Success 200 {object} PortList // @Router /port [get] // // @id GetPorts func (d *portsDetector) GetPorts(c *gin.Context) { ports := PortList{ Ports: []uint{}, } for _, port := range d.portMap.Keys() { portInt, err := strconv.Atoi(port) if err != nil { continue } ports.Ports = append(ports.Ports, uint(portInt)) } c.JSON(http.StatusOK, ports) } // IsPortInUse godoc // // @Summary Check if port is in use // @Description Check if a specific port is currently in use // @Tags port // @Produce json // @Param port path int true "Port number (3000-9999)" // @Success 200 {object} IsPortInUseResponse // @Router /port/{port}/in-use [get] // // @id IsPortInUse func (d *portsDetector) IsPortInUse(c *gin.Context) { portParam := c.Param("port") port, err := strconv.Atoi(portParam) if err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("invalid port: must be a number between 3000 and 9999")) return } if port < 3000 || port > 9999 { c.AbortWithError(http.StatusBadRequest, errors.New("port out of range: must be between 3000 and 9999")) return } portStr := strconv.Itoa(port) if d.portMap.Has(portStr) { c.JSON(http.StatusOK, IsPortInUseResponse{ IsInUse: true, }) } else { // If the port is not in the map, we check synchronously if it's in use and update the map _, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 50*time.Millisecond) if err != nil { c.JSON(http.StatusOK, IsPortInUseResponse{ IsInUse: false, }) } else { d.portMap.Set(portStr, true) c.JSON(http.StatusOK, IsPortInUseResponse{ IsInUse: true, }) } } } ================================================ FILE: apps/daemon/pkg/toolbox/port/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package port type PortList struct { Ports []uint `json:"ports"` } // @name PortList type IsPortInUseResponse struct { IsInUse bool `json:"isInUse"` } // @name IsPortInUseResponse ================================================ FILE: apps/daemon/pkg/toolbox/process/execute.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package process import ( "bytes" "errors" "log/slog" "net/http" "os/exec" "time" "github.com/gin-gonic/gin" ) // ExecuteCommand godoc // // @Summary Execute a command // @Description Execute a shell command and return the output and exit code // @Tags process // @Accept json // @Produce json // @Param request body ExecuteRequest true "Command execution request" // @Success 200 {object} ExecuteResponse // @Router /process/execute [post] // // @id ExecuteCommand func ExecuteCommand(logger *slog.Logger) gin.HandlerFunc { return func(c *gin.Context) { var request ExecuteRequest if err := c.ShouldBindJSON(&request); err != nil { c.AbortWithError(http.StatusBadRequest, errors.New("command is required")) return } cmdParts := parseCommand(request.Command) if len(cmdParts) == 0 { c.AbortWithError(http.StatusBadRequest, errors.New("empty command")) return } cmd := exec.Command(cmdParts[0], cmdParts[1:]...) if request.Cwd != nil { cmd.Dir = *request.Cwd } // set maximum execution time timeout := 360 * time.Second if request.Timeout != nil && *request.Timeout > 0 { timeout = time.Duration(*request.Timeout) * time.Second } timeoutReached := false timer := time.AfterFunc(timeout, func() { timeoutReached = true if cmd.Process != nil { // kill the process group err := cmd.Process.Kill() if err != nil { logger.Error("failed to kill process", "error", err) return } } }) defer timer.Stop() output, err := cmd.CombinedOutput() if err != nil { if timeoutReached { c.AbortWithError(http.StatusRequestTimeout, errors.New("command execution timeout")) return } if exitError, ok := err.(*exec.ExitError); ok { exitCode := exitError.ExitCode() c.JSON(http.StatusOK, ExecuteResponse{ ExitCode: exitCode, Result: string(output), }) return } c.JSON(http.StatusOK, ExecuteResponse{ ExitCode: -1, Result: string(output), }) return } if cmd.ProcessState == nil { c.JSON(http.StatusOK, ExecuteResponse{ ExitCode: -1, Result: string(output), }) return } exitCode := cmd.ProcessState.ExitCode() c.JSON(http.StatusOK, ExecuteResponse{ ExitCode: exitCode, Result: string(output), }) } } // parseCommand splits a command string properly handling quotes func parseCommand(command string) []string { var args []string var current bytes.Buffer var inQuotes bool var quoteChar rune for _, r := range command { switch { case r == '"' || r == '\'': if !inQuotes { inQuotes = true quoteChar = r } else if quoteChar == r { inQuotes = false quoteChar = 0 } else { current.WriteRune(r) } case r == ' ' && !inQuotes: if current.Len() > 0 { args = append(args, current.String()) current.Reset() } default: current.WriteRune(r) } } if current.Len() > 0 { args = append(args, current.String()) } return args } ================================================ FILE: apps/daemon/pkg/toolbox/process/interpreter/controller.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package interpreter import ( "encoding/json" "errors" "fmt" "log/slog" "net/http" "time" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/daemon/internal/util" "github.com/gin-gonic/gin" "github.com/gorilla/websocket" ) func NewInterpreterController(logger *slog.Logger, workDir string) *Controller { InitManager(workDir) // Pre-warm the default interpreter context to reduce latency on first request go func() { _, err := GetOrCreateDefaultContext(logger) if err != nil { logger.Debug("Failed to pre-create default interpreter context", "error", err) } }() return &Controller{logger: logger.With(slog.String("component", "interpreter_controller")), workDir: workDir} } // CreateContext creates a new interpreter context // // @Summary Create a new interpreter context // @Description Creates a new isolated interpreter context with optional working directory and language // @Tags interpreter // @Accept json // @Produce json // @Param request body CreateContextRequest true "Context configuration" // @Success 200 {object} InterpreterContext // @Failure 400 {object} map[string]string // @Failure 500 {object} map[string]string // @Router /process/interpreter/context [post] // // @id CreateInterpreterContext func (c *Controller) CreateContext(ctx *gin.Context) { var req CreateContextRequest err := ctx.ShouldBindJSON(&req) if err != nil { ctx.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request payload: %w", err)) return } language := LanguagePython if req.Language != nil { language = *req.Language } if language != LanguagePython { ctx.AbortWithError(http.StatusBadRequest, fmt.Errorf("unsupported language: %s (only '%s' is supported currently)", language, LanguagePython)) return } cwd := c.workDir if req.Cwd != nil { cwd = *req.Cwd } iCtx, err := CreateContext(c.logger, cwd, language) if err != nil { ctx.AbortWithError(http.StatusInternalServerError, err) return } info := iCtx.Info() ctx.JSON(http.StatusOK, ContextInfo{ ID: info.ID, Cwd: info.Cwd, CreatedAt: info.CreatedAt, Active: info.Active, Language: info.Language, }) } // Execute executes code in an interpreter context via WebSocket // // @Summary Execute code in an interpreter context // @Description Executes code in a specified context (or default context if not specified) via WebSocket streaming // @Tags interpreter // @Accept json // @Produce json // @Router /process/interpreter/execute [get] // @Success 101 {string} string "Switching Protocols" // @Header 101 {string} Upgrade "websocket" // @Header 101 {string} Connection "Upgrade" // // @id ExecuteInterpreterCode func (c *Controller) Execute(ctx *gin.Context) { // Upgrade to WebSocket ws, err := util.UpgradeToWebSocket(ctx.Writer, ctx.Request) if err != nil { ctx.AbortWithStatus(http.StatusBadRequest) return } _, payload, err := ws.ReadMessage() if err != nil { writeWSError(ws, "failed to read first message", websocket.CloseProtocolError) return } var req ExecuteRequest err = json.Unmarshal(payload, &req) if err != nil { writeWSError(ws, "invalid JSON payload", websocket.CloseProtocolError) return } if req.Code == "" { writeWSError(ws, "code is required", websocket.ClosePolicyViolation) return } timeout := 10 * time.Minute if req.Timeout != nil { if *req.Timeout < 0 { writeWSError(ws, "timeout must be greater than or equal to 0", websocket.ClosePolicyViolation) return } if *req.Timeout == 0 { timeout = 0 } else { timeout = time.Duration(*req.Timeout) * time.Second } } var iCtx *Context if req.ContextID == nil { iCtx, err = GetOrCreateDefaultContext(c.logger) if err != nil { writeWSError(ws, "failed to get default context: "+err.Error(), websocket.CloseInternalServerErr) return } } else { iCtx, err = GetContext(*req.ContextID) if err != nil { writeWSError(ws, "context not found: "+*req.ContextID, websocket.ClosePolicyViolation) return } } contextInfo := iCtx.Info() if !contextInfo.Active { writeWSError(ws, "context is not active", websocket.ClosePolicyViolation) return } var envs map[string]string if req.Envs != nil { envs = *req.Envs } go iCtx.enqueueAndExecute(req.Code, envs, timeout, ws) } // writeWSError sends an error message to the WebSocket and closes the connection func writeWSError(ws *websocket.Conn, value string, closeCode int) { closeMessage := websocket.FormatCloseMessage(closeCode, value) _ = ws.WriteControl(websocket.CloseMessage, closeMessage, time.Now().Add(writeWait)) _ = ws.Close() } // DeleteContext deletes an interpreter context // // @Summary Delete an interpreter context // @Description Deletes an interpreter context and shuts down its worker process // @Tags interpreter // @Produce json // @Param id path string true "Context ID" // @Success 200 {object} map[string]string // @Failure 400 {object} map[string]string // @Failure 404 {object} map[string]string // @Router /process/interpreter/context/{id} [delete] // // @id DeleteInterpreterContext func (c *Controller) DeleteContext(ctx *gin.Context) { id := ctx.Param("id") if id == "" { ctx.AbortWithError(http.StatusBadRequest, errors.New("context ID is required")) return } if id == "default" { ctx.AbortWithError(http.StatusBadRequest, errors.New("cannot delete default context")) return } err := DeleteContext(id) if err != nil { if common_errors.IsNotFoundError(err) { ctx.AbortWithError(http.StatusNotFound, err) return } ctx.AbortWithError(http.StatusInternalServerError, err) return } ctx.JSON(http.StatusOK, gin.H{"message": "Context deleted successfully"}) } // ListContexts lists all user-created interpreter contexts (excludes default) // // @Summary List all user-created interpreter contexts // @Description Returns information about all user-created interpreter contexts (excludes default context) // @Tags interpreter // @Produce json // @Success 200 {object} ListContextsResponse // @Router /process/interpreter/context [get] // // @id ListInterpreterContexts func (c *Controller) ListContexts(ctx *gin.Context) { allContexts := ListContexts() userContexts := make([]ContextInfo, 0, len(allContexts)) for _, context := range allContexts { if context.ID != "default" { userContexts = append(userContexts, context) } } ctx.JSON(http.StatusOK, ListContextsResponse{Contexts: userContexts}) } ================================================ FILE: apps/daemon/pkg/toolbox/process/interpreter/manager.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package interpreter import ( "fmt" "log/slog" "sync" "time" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/google/uuid" ) // Manager manages multiple interpreter contexts type Manager struct { contexts map[string]*Context mu sync.RWMutex defaultCwd string } var globalManager *Manager // InitManager initializes the global context manager func InitManager(defaultCwd string) { globalManager = &Manager{ contexts: make(map[string]*Context), defaultCwd: defaultCwd, } } // CreateContext creates a new interpreter context func (m *Manager) CreateContext(logger *slog.Logger, id, cwd, language string) (*Context, error) { m.mu.Lock() defer m.mu.Unlock() if _, exists := m.contexts[id]; exists { return nil, fmt.Errorf("context with ID '%s' already exists", id) } if language != "" && language != LanguagePython { return nil, fmt.Errorf("unsupported language: %s (only '%s' is supported)", language, LanguagePython) } if language == "" { language = LanguagePython } if cwd == "" { cwd = m.defaultCwd } iCtx := &Context{ info: ContextInfo{ ID: id, Cwd: cwd, CreatedAt: time.Now(), Active: false, Language: language, }, logger: logger.With(slog.String("context_id", id)), } err := iCtx.start() if err != nil { return nil, fmt.Errorf("failed to start context: %w", err) } m.contexts[id] = iCtx return iCtx, nil } // GetContext retrieves an existing context by ID func (m *Manager) GetContext(id string) (*Context, error) { m.mu.RLock() iCtx, exists := m.contexts[id] m.mu.RUnlock() if !exists { return nil, fmt.Errorf("context with ID '%s' not found", id) } info := iCtx.Info() if !info.Active { err := iCtx.start() if err != nil { return nil, fmt.Errorf("failed to restart context '%s': %w", id, err) } } return iCtx, nil } // GetOrCreateDefaultContext gets or creates the default context func (m *Manager) GetOrCreateDefaultContext(logger *slog.Logger) (*Context, error) { m.mu.RLock() iCtx, exists := m.contexts["default"] m.mu.RUnlock() if exists { info := iCtx.Info() if !info.Active { err := iCtx.start() if err != nil { return nil, fmt.Errorf("failed to restart default context: %w", err) } } return iCtx, nil } return m.CreateContext(logger, "default", m.defaultCwd, LanguagePython) } // DeleteContext removes a context and shuts it down func (m *Manager) DeleteContext(id string) error { m.mu.Lock() defer m.mu.Unlock() iCtx, exists := m.contexts[id] if !exists { return common_errors.NewNotFoundError(fmt.Errorf("context with ID '%s' not found", id)) } iCtx.shutdown() delete(m.contexts, id) return nil } // ListContexts returns information about all contexts func (m *Manager) ListContexts() []ContextInfo { m.mu.RLock() defer m.mu.RUnlock() contexts := make([]ContextInfo, 0, len(m.contexts)) for _, iCtx := range m.contexts { contexts = append(contexts, iCtx.Info()) } return contexts } // Global convenience functions // CreateContext creates a new context using the global manager func CreateContext(logger *slog.Logger, cwd, language string) (*Context, error) { if globalManager == nil { return nil, fmt.Errorf("context manager not initialized") } id := uuid.NewString() return globalManager.CreateContext(logger, id, cwd, language) } // GetContext gets a context by ID using the global manager func GetContext(id string) (*Context, error) { if globalManager == nil { return nil, fmt.Errorf("context manager not initialized") } return globalManager.GetContext(id) } // GetOrCreateDefaultContext gets or creates the default context func GetOrCreateDefaultContext(logger *slog.Logger) (*Context, error) { if globalManager == nil { return nil, fmt.Errorf("context manager not initialized") } return globalManager.GetOrCreateDefaultContext(logger) } // DeleteContext deletes a context using the global manager func DeleteContext(id string) error { if globalManager == nil { return fmt.Errorf("context manager not initialized") } return globalManager.DeleteContext(id) } // ListContexts lists all contexts using the global manager func ListContexts() []ContextInfo { if globalManager == nil { return []ContextInfo{} } return globalManager.ListContexts() } ================================================ FILE: apps/daemon/pkg/toolbox/process/interpreter/repl_client.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package interpreter import ( "bufio" "context" _ "embed" "encoding/json" "errors" "fmt" "os" "os/exec" "path/filepath" "syscall" "time" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/google/uuid" "github.com/gorilla/websocket" ) //go:embed repl_worker.py var pythonWorkerScript string // Info returns the current context information func (c *Context) Info() ContextInfo { c.mu.Lock() defer c.mu.Unlock() return c.info } // enqueueAndExecute enqueues a job and processes jobs FIFO ensuring single execution at a time func (c *Context) enqueueAndExecute(code string, envs map[string]string, timeout time.Duration, ws *websocket.Conn) { c.mu.Lock() if c.queue == nil { c.queue = make(chan execJob, 128) go c.processQueue() } c.mu.Unlock() job := execJob{code: code, envs: envs, timeout: timeout, ws: ws} c.queue <- job } func (c *Context) processQueue() { for job := range c.queue { if job.ws != nil { go c.attachWebSocket(job.ws) } result, err := c.executeCode(job.code, job.envs, job.timeout) if err != nil && common_errors.IsRequestTimeoutError(err) || result.Status == CommandStatusTimeout { c.closeClient(WebSocketCloseTimeout, "") } else { c.closeClient(websocket.CloseNormalClosure, "") } } } // closeClient closes the WebSocket client with specified close code func (c *Context) closeClient(code int, message string) { c.mu.Lock() defer c.mu.Unlock() if c.client == nil { return } c.client.requestClose(code, message) c.client = nil } // executeCode executes code in the interpreter context func (c *Context) executeCode(code string, envs map[string]string, timeout time.Duration) (*CommandExecution, error) { cmdID := uuid.NewString() execution := &CommandExecution{ ID: cmdID, Code: code, Status: CommandStatusRunning, StartedAt: time.Now(), } c.commandMu.Lock() c.activeCommand = execution c.commandMu.Unlock() workerCmd := WorkerCommand{ID: cmdID, Code: code, Envs: envs} err := c.sendCommand(workerCmd) if err != nil { execution.Status = CommandStatusError now := time.Now() execution.EndedAt = &now execution.Error = &Error{Name: "CommunicationError", Value: err.Error()} return execution, err } resultChan := make(chan bool, 1) go func() { for { time.Sleep(50 * time.Millisecond) c.commandMu.Lock() if c.activeCommand == nil || c.activeCommand.Status != CommandStatusRunning { c.commandMu.Unlock() resultChan <- true return } c.commandMu.Unlock() } }() var timeoutC <-chan time.Time if timeout > 0 { timer := time.NewTimer(timeout) defer timer.Stop() timeoutC = timer.C } select { case <-resultChan: c.commandMu.Lock() result := c.activeCommand c.activeCommand = nil c.commandMu.Unlock() return result, nil case <-timeoutC: if c.cmd != nil && c.cmd.Process != nil { _ = c.cmd.Process.Signal(syscall.SIGINT) } graceful := time.NewTimer(gracePeriod) defer graceful.Stop() select { case <-resultChan: c.commandMu.Lock() result := c.activeCommand c.activeCommand = nil c.commandMu.Unlock() return result, nil case <-graceful.C: if c.cmd != nil && c.cmd.Process != nil { _ = c.cmd.Process.Kill() } c.commandMu.Lock() if c.activeCommand != nil { c.activeCommand.Status = CommandStatusTimeout now := time.Now() c.activeCommand.EndedAt = &now c.activeCommand.Error = &Error{ Name: "TimeoutError", Value: "Execution timeout - code took too long to execute", } result := c.activeCommand c.activeCommand = nil c.commandMu.Unlock() return result, common_errors.NewRequestTimeoutError(fmt.Errorf("execution timeout")) } c.commandMu.Unlock() return execution, common_errors.NewRequestTimeoutError(fmt.Errorf("execution timeout")) } } } // start initializes and starts the Python worker process func (c *Context) start() error { c.mu.Lock() defer c.mu.Unlock() // Already running? if c.info.Active && c.cmd != nil && c.stdin != nil { return nil } ctx, cancel := context.WithCancel(context.Background()) c.ctx = ctx c.cancel = cancel // Create (or reuse) a single shared worker script file tempDir := os.TempDir() workerPath := filepath.Join(tempDir, "daytona_repl_worker.py") // Check if worker file exists, if not create it if _, err := os.Stat(workerPath); os.IsNotExist(err) { err := os.WriteFile(workerPath, []byte(pythonWorkerScript), workerScriptPerms) if err != nil { cancel() return fmt.Errorf("failed to create worker script: %w", err) } } c.workerPath = workerPath // Start Python worker process pyCmd := detectPythonCommand() cmd := exec.CommandContext(ctx, pyCmd, workerPath) cmd.Dir = c.info.Cwd cmd.Env = os.Environ() // Get stdin/stdout pipes stdin, err := cmd.StdinPipe() if err != nil { cancel() return fmt.Errorf("failed to create stdin pipe: %w", err) } stdout, err := cmd.StdoutPipe() if err != nil { cancel() stdin.Close() return fmt.Errorf("failed to create stdout pipe: %w", err) } cmd.Stderr = os.Stderr // Start the process err = cmd.Start() if err != nil { cancel() stdin.Close() stdout.Close() return fmt.Errorf("failed to start Python worker: %w", err) } c.cmd = cmd c.stdin = stdin c.stdout = stdout c.info.Active = true c.done = make(chan struct{}) c.logger.Debug("Started interpreter context", "contextId", c.info.ID, "pid", c.cmd.Process.Pid) // Start reading worker output go c.workerReadLoop() // Monitor process exit go c.monitorProcess() return nil } // detectPythonCommand attempts to find a working python interpreter func detectPythonCommand() string { candidates := []string{"python3", "python"} for _, c := range candidates { if _, err := exec.LookPath(c); err == nil { return c } } return "python3" } // sendCommand sends a command to the Python worker func (c *Context) sendCommand(cmd WorkerCommand) error { c.mu.Lock() stdin := c.stdin c.mu.Unlock() if stdin == nil { return errors.New("worker stdin not available") } data, err := json.Marshal(cmd) if err != nil { return fmt.Errorf("failed to marshal command: %w", err) } data = append(data, '\n') _, err = stdin.Write(data) if err != nil { return fmt.Errorf("failed to write command: %w", err) } return nil } // workerReadLoop reads messages from the Python worker func (c *Context) workerReadLoop() { scanner := bufio.NewScanner(c.stdout) scanner.Buffer(make([]byte, 64*1024), 1024*1024) for scanner.Scan() { line := scanner.Text() var chunk map[string]any err := json.Unmarshal([]byte(line), &chunk) if err != nil { c.logger.Error("Failed to parse worker chunk", "error", err) continue } c.handleChunk(chunk) } err := scanner.Err() if err != nil { c.logger.Error("Error reading from worker", "error", err) } } // handleChunk processes streaming chunks from the Python worker func (c *Context) handleChunk(chunk map[string]any) { // Extract all fields at the beginning chunkType := getStringFromChunk(chunk, "type") text := getStringFromChunk(chunk, "text") name := getStringFromChunk(chunk, "name") value := getStringFromChunk(chunk, "value") traceback := getStringFromChunk(chunk, "traceback") // Update internal command state for certain chunk types switch chunkType { case ChunkTypeError: c.commandMu.Lock() if c.activeCommand != nil { c.activeCommand.Status = CommandStatusError now := time.Now() c.activeCommand.EndedAt = &now c.activeCommand.Error = &Error{ Name: name, Value: value, Traceback: traceback, } } c.commandMu.Unlock() case ChunkTypeControl: c.commandMu.Lock() if c.activeCommand != nil { switch text { case ControlChunkTypeCompleted: // Only set to OK if no error occurred (status would be Error already) if c.activeCommand.Status == CommandStatusRunning { c.activeCommand.Status = CommandStatusOK } now := time.Now() c.activeCommand.EndedAt = &now case ControlChunkTypeInterrupted: c.activeCommand.Status = CommandStatusTimeout now := time.Now() c.activeCommand.EndedAt = &now } } c.commandMu.Unlock() return } // Stream to WebSocket client c.emitOutput(&OutputMessage{ Type: chunkType, Text: text, Name: name, Value: value, Traceback: traceback, }) } // Helper functions func getStringFromChunk(chunk map[string]any, key string) string { if val, ok := chunk[key].(string); ok { return val } return "" } // monitorProcess monitors the worker process and cleans up on exit func (c *Context) monitorProcess() { err := c.cmd.Wait() c.mu.Lock() c.info.Active = false contextID := c.info.ID done := c.done c.mu.Unlock() // Notify waiters that the process has exited if done != nil { close(done) } if err != nil { c.commandMu.Lock() if c.activeCommand != nil && c.activeCommand.Status == CommandStatusRunning { c.activeCommand.Status = CommandStatusError now := time.Now() c.activeCommand.EndedAt = &now c.activeCommand.Error = &Error{ Name: "WorkerProcessError", Value: err.Error(), } } c.commandMu.Unlock() c.logger.Error("Interpreter context process exited with error", "contextId", contextID, "error", err) } else { c.logger.Debug("Interpreter context process exited normally", "contextId", contextID) } // Close WebSocket client if any c.closeClient(websocket.CloseGoingAway, "worker process ended") } // shutdown gracefully shuts down the worker func (c *Context) shutdown() { c.mu.Lock() if !c.info.Active { c.mu.Unlock() return } // Get references while we have the lock contextID := c.info.ID cancel := c.cancel cmd := c.cmd done := c.done queue := c.queue c.mu.Unlock() // Close the queue to exit processQueue goroutine and prevent new jobs if queue != nil { close(queue) c.queue = nil } // Send SIGTERM to trigger immediate graceful shutdown (not queued) if cmd != nil && cmd.Process != nil { _ = cmd.Process.Signal(syscall.SIGTERM) } // Wait for process to exit (monitorProcess will close the done channel) if done != nil { select { case <-done: // Process exited gracefully c.logger.Debug("Interpreter context shut down gracefully", "contextId", contextID) case <-time.After(2 * time.Second): // Timeout - force kill c.logger.Debug("Interpreter context shutdown timeout, force killing", "contextId", contextID) if cancel != nil { cancel() } if cmd != nil && cmd.Process != nil { _ = cmd.Process.Kill() } // Wait a bit more for kill to take effect time.Sleep(100 * time.Millisecond) } } // Close WebSocket client c.closeClient(websocket.CloseGoingAway, "context shutdown") } ================================================ FILE: apps/daemon/pkg/toolbox/process/interpreter/repl_worker.py ================================================ # Copyright 2025 Daytona Platforms Inc. # SPDX-License-Identifier: AGPL-3.0 """ Stateful Python REPL Worker for Daytona - JSON line protocol (stdout) - Persistent globals across exec calls - Clean user-only tracebacks - Graceful SIGINT """ import io import json import os import signal import sys import traceback from contextlib import redirect_stderr, redirect_stdout class REPLWorker: def __init__(self): self.globals = { "__name__": "__main__", "__doc__": None, "__package__": None, "__builtins__": __builtins__, } self.should_shutdown = False self._setup_signals() # ---------- IO ---------- def _emit(self, chunk: dict): try: json.dump(chunk, sys.__stdout__) sys.__stdout__.write("\n") sys.__stdout__.flush() except Exception as e: sys.__stderr__.write(f"Failed to send chunk: {e}\n") class _StreamEmitter(io.TextIOBase): def __init__(self, worker: "REPLWorker", stream_type: str): self.worker = worker self.stream_type = stream_type self._buffer: list[str] = [] def writable(self): return True def write(self, data): # type: ignore[override] if not data: return 0 if not isinstance(data, str): data = str(data) self._buffer.append(data) if "\n" in data or sum(len(chunk) for chunk in self._buffer) >= 1024: self.flush() return len(data) def flush(self): # type: ignore[override] if not self._buffer: return payload = "".join(self._buffer) self._buffer.clear() # pylint: disable=protected-access self.worker._emit({"type": self.stream_type, "text": payload}) def isatty(self): return False @property def encoding(self): return "utf-8" def close(self): # type: ignore[override] self.flush() return super().close() # ---------- Signals ---------- def _setup_signals(self): def sigint_handler(_signum, _frame): raise KeyboardInterrupt("Execution interrupted") def sigterm_handler(_signum, _frame): self.should_shutdown = True raise KeyboardInterrupt("Shutting down") signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) # ---------- Tracebacks ---------- def _clean_tb(self, etype, evalue, tb): frames = [f for f in traceback.extract_tb(tb) if f.filename == ""] if not frames: return f"{etype.__name__}: {evalue}\n" parts = ["Traceback (most recent call last):\n"] for f in frames: parts.append(f' File "", line {f.lineno}\n') if f.line: parts.append(f" {f.line}\n") parts.append(f"{etype.__name__}: {evalue}\n") return "".join(parts) # ---------- Exec ---------- def execute_code(self, code: str, envs=None) -> None: stdout_emitter = self._StreamEmitter(self, "stdout") stderr_emitter = self._StreamEmitter(self, "stderr") control_text, error_chunk = "completed", None env_snapshot = None if envs: env_snapshot = {} for key, value in envs.items(): if not isinstance(key, str): key = str(key) previous = os.environ.get(key) env_snapshot[key] = previous if value is None: os.environ.pop(key, None) else: os.environ[key] = str(value) try: with redirect_stdout(stdout_emitter), redirect_stderr(stderr_emitter): compiled = compile(code, "", "exec") exec(compiled, self.globals) # pylint: disable=exec-used except KeyboardInterrupt: control_text = "interrupted" except (SystemExit, Exception) as e: # SystemExit completes normally # Errors are indicated by the error chunk, not control type if not isinstance(e, SystemExit): error_chunk = { "type": "error", "name": type(e).__name__, "value": str(e), "traceback": self._clean_tb(type(e), e, e.__traceback__), } finally: stdout_emitter.flush() stderr_emitter.flush() if env_snapshot is not None: for key, previous in env_snapshot.items(): if previous is None: os.environ.pop(key, None) else: os.environ[key] = previous if error_chunk: self._emit(error_chunk) self._emit({"type": "control", "text": control_text}) # ---------- Protocol ---------- def handle_command(self, line: str) -> None: try: msg = json.loads(line) envs = msg.get("envs") if envs is not None and not isinstance(envs, dict): raise ValueError("envs must be an object") self.execute_code(msg.get("code", ""), envs) except json.JSONDecodeError as e: self._emit({"type": "error", "name": "JSONDecodeError", "value": str(e), "traceback": ""}) except Exception as e: self._emit({"type": "error", "name": type(e).__name__, "value": str(e), "traceback": ""}) # ---------- Main loop ---------- def run(self): while True: if self.should_shutdown: break try: line = sys.stdin.readline() if not line: break # EOF line = line.strip() if not line: continue self.handle_command(line) except KeyboardInterrupt: continue except Exception as e: sys.__stderr__.write(f"Fatal error in main loop: {e}\n") break if __name__ == "__main__": REPLWorker().run() ================================================ FILE: apps/daemon/pkg/toolbox/process/interpreter/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package interpreter import ( "context" "io" "log/slog" "os/exec" "sync" "time" "github.com/gorilla/websocket" ) // Constants for WebSocket and execution management const ( writeWait = 10 * time.Second gracePeriod = 2 * time.Second workerScriptPerms = 0700 ) // WebSocket close codes (4000-4999 are for private/application use) const ( WebSocketCloseTimeout = 4008 // Execution timeout ) // Chunk type constants for websocket streaming const ( ChunkTypeStdout = "stdout" ChunkTypeStderr = "stderr" ChunkTypeError = "error" ChunkTypeControl = "control" ) // Control chunk subtypes const ( ControlChunkTypeCompleted = "completed" ControlChunkTypeInterrupted = "interrupted" ) // Command execution statuses const ( CommandStatusRunning = "running" CommandStatusOK = "ok" CommandStatusError = "error" CommandStatusTimeout = "timeout" ) // Supported languages const ( LanguagePython = "python" ) // Controller handles interpreter-related HTTP endpoints type Controller struct { logger *slog.Logger workDir string } // API Request/Response types // CreateContextRequest represents a request to create a new interpreter context type CreateContextRequest struct { Cwd *string `json:"cwd" validate:"optional"` Language *string `json:"language" validate:"optional"` } // @name CreateContextRequest // ExecuteRequest represents a request to execute code type ExecuteRequest struct { Code string `json:"code" binding:"required"` ContextID *string `json:"contextId" validate:"optional"` Timeout *int64 `json:"timeout" validate:"optional"` // seconds, 0 disables timeout Envs *map[string]string `json:"envs" validate:"optional"` } // @name ExecuteRequest // ListContextsResponse represents the response when listing contexts type ListContextsResponse struct { Contexts []ContextInfo `json:"contexts" binding:"required"` } // @name ListContextsResponse // Context types // ContextInfo contains metadata about an interpreter context type ContextInfo struct { ID string `json:"id" binding:"required"` Cwd string `json:"cwd" binding:"required"` CreatedAt time.Time `json:"createdAt" binding:"required"` Active bool `json:"active" binding:"required"` Language string `json:"language" binding:"required"` } // @name InterpreterContext // Context represents an active interpreter context with operational methods type Context struct { info ContextInfo logger *slog.Logger cmd *exec.Cmd stdin io.WriteCloser stdout io.ReadCloser ctx context.Context cancel context.CancelFunc workerPath string // Single websocket client (protected by mu) client *wsClient // Command tracking activeCommand *CommandExecution commandMu sync.Mutex // Execution FIFO queue queue chan execJob // Process exit notification done chan struct{} // Guards session state and client mu sync.Mutex } // CommandExecution tracks a single code execution type CommandExecution struct { ID string `json:"id" binding:"required"` Code string `json:"code" binding:"required"` Status string `json:"status" binding:"required"` // "running", "ok", "error", "interrupted", "exit", "timeout" Error *Error `json:"error,omitempty"` StartedAt time.Time `json:"startedAt" binding:"required"` EndedAt *time.Time `json:"endedAt,omitempty"` } // Error represents a structured error from code execution type Error struct { Name string `json:"name" binding:"required"` Value string `json:"value" binding:"required"` Traceback string `json:"traceback" binding:"required"` } // Internal types // wsClient represents a WebSocket client connection for output streaming type wsClient struct { id string conn *websocket.Conn send chan wsFrame pongCh <-chan []byte // queued pong payloads from PingHandler, drained by clientWriter done chan struct{} // signals when clientWriter exits closeOnce sync.Once logger *slog.Logger } type wsFrame struct { output *OutputMessage close *closeRequest } type closeRequest struct { code int message string } // OutputMessage represents output sent to WebSocket clients type OutputMessage struct { Type string `json:"type" binding:"required"` Text string `json:"text" binding:"required"` Name string `json:"name" binding:"required"` Value string `json:"value" binding:"required"` Traceback string `json:"traceback" binding:"required"` } // WorkerCommand represents a command sent to the language worker type WorkerCommand struct { ID string `json:"id" binding:"required"` Code string `json:"code" binding:"required"` Envs map[string]string `json:"envs" binding:"required"` } // execJob represents one queued execution type execJob struct { code string envs map[string]string timeout time.Duration ws *websocket.Conn } ================================================ FILE: apps/daemon/pkg/toolbox/process/interpreter/websocket.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package interpreter import ( "encoding/json" "log/slog" "time" "github.com/daytonaio/daemon/internal/util" "github.com/google/uuid" "github.com/gorilla/websocket" ) // attachWebSocket connects a WebSocket client to the interpreter context func (c *Context) attachWebSocket(ws *websocket.Conn) { pongCh := util.SetupWSKeepAlive(ws, c.logger) clientId := uuid.NewString() cl := &wsClient{ id: clientId, conn: ws, send: make(chan wsFrame, 1024), pongCh: pongCh, done: make(chan struct{}), logger: c.logger.With(slog.String("clientId", clientId)), } c.mu.Lock() if c.client != nil { c.client.close() } c.client = cl c.mu.Unlock() c.logger.Debug("Client attached to interpreter context", "clientId", cl.id, "contextId", c.info.ID) go c.clientWriter(cl) // Continuously read from the WebSocket so that gorilla/websocket's // PingHandler is invoked for incoming ping frames. Without this, // client keepalive pings go unanswered and the connection is closed // with code 1011 after ~50s. go func() { for { if _, _, err := ws.ReadMessage(); err != nil { return } } }() // Wait for clientWriter to exit (signals disconnection) <-cl.done c.mu.Lock() if c.client != nil && c.client.id == cl.id { c.client = nil } c.mu.Unlock() cl.close() c.logger.Debug("Client detached from interpreter context", "clientId", cl.id, "contextId", c.info.ID) } // clientWriter sends output messages to the WebSocket client func (c *Context) clientWriter(cl *wsClient) { defer close(cl.done) for { // Priority: always flush pending pong responses before writing data. // This ensures keepalive pongs are never delayed by data writes. util.WritePendingPongs(cl.conn, cl.pongCh, writeWait, cl.logger) select { case <-c.ctx.Done(): return case pong := <-cl.pongCh: // Pong arrived while waiting for data — write it immediately. if err := cl.conn.WriteControl(websocket.PongMessage, pong, time.Now().Add(writeWait)); err != nil { cl.logger.Debug("failed to write pong", "error", err) } case frame, ok := <-cl.send: if !ok { return } err := cl.writeFrame(frame) if err != nil { c.logger.Debug("Failed to write frame", "error", err) } if frame.close != nil { return } } } } // emitOutput sends an output message to the connected WebSocket client func (c *Context) emitOutput(msg *OutputMessage) { c.mu.Lock() cl := c.client c.mu.Unlock() if cl == nil { return } select { case cl.send <- wsFrame{output: msg}: default: c.logger.Debug("Client send channel full - closing slow consumer") cl.requestClose(websocket.ClosePolicyViolation, "slow consumer") c.mu.Lock() if c.client != nil && c.client.id == cl.id { c.client = nil } c.mu.Unlock() } } // close closes a WebSocket client connection func (cl *wsClient) close() { cl.closeOnce.Do(func() { close(cl.send) // Wait for clientWriter to drain remaining messages with a timeout // This ensures close frames and other pending messages have time to be sent timer := time.NewTimer(5 * time.Second) select { case <-cl.done: // clientWriter has finished processing all messages if !timer.Stop() { <-timer.C } case <-timer.C: // Timeout reached, proceed with closing cl.logger.Debug("Timeout waiting for client writer to finish") } // Close the connection. The background read goroutine started in // attachWebSocket is the sole reader — gorilla's default CloseHandler // (invoked during ReadMessage) handles the RFC 6455 close handshake. // We must not call NextReader/ReadMessage here to avoid violating // gorilla's single-concurrent-reader rule. _ = cl.conn.Close() }) } func (cl *wsClient) writeFrame(frame wsFrame) error { if frame.output == nil && frame.close == nil { return nil } err := cl.conn.SetWriteDeadline(time.Now().Add(writeWait)) if err != nil { return err } if frame.close != nil { payload := websocket.FormatCloseMessage(frame.close.code, frame.close.message) return cl.conn.WriteMessage(websocket.CloseMessage, payload) } data, err := json.Marshal(frame.output) if err != nil { return err } return cl.conn.WriteMessage(websocket.TextMessage, data) } func (cl *wsClient) requestClose(code int, message string) { frame := wsFrame{ close: &closeRequest{ code: code, message: message, }, } select { case cl.send <- frame: default: cl.logger.Debug("Couldn't send close frame to client - closing connection") } cl.close() } ================================================ FILE: apps/daemon/pkg/toolbox/process/pty/controller.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package pty import ( "encoding/json" "fmt" "log/slog" "net/http" "time" "github.com/daytonaio/daemon/internal/util" "github.com/gin-gonic/gin" "github.com/gorilla/websocket" cmap "github.com/orcaman/concurrent-map/v2" ) // NewPTYController creates a new PTY controller func NewPTYController(logger *slog.Logger, workDir string) *PTYController { return &PTYController{logger: logger.With(slog.String("component", "PTY_controller")), workDir: workDir} } // CreatePTYSession godoc // // @Summary Create a new PTY session // @Description Create a new pseudo-terminal session with specified configuration // @Tags process // @Accept json // @Produce json // @Param request body PTYCreateRequest true "PTY session creation request" // @Success 201 {object} PTYCreateResponse // @Router /process/pty [post] // // @id CreatePtySession func (p *PTYController) CreatePTYSession(c *gin.Context) { var req PTYCreateRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } // Validate session ID if req.ID == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "session ID is required"}) return } // Check if session with this ID already exists if _, exists := ptyManager.Get(req.ID); exists { c.JSON(http.StatusConflict, gin.H{"error": fmt.Sprintf("PTY session with ID '%s' already exists", req.ID)}) return } // Defaults if req.Cwd == "" { req.Cwd = p.workDir } if req.Envs == nil { req.Envs = make(map[string]string, 1) } if req.Envs["TERM"] == "" { req.Envs["TERM"] = "xterm-256color" } if req.Cols == nil { req.Cols = util.Pointer(uint16(80)) } if req.Rows == nil { req.Rows = util.Pointer(uint16(24)) } // Set upper limits to avoid ioctl errors if *req.Cols > 1000 { c.JSON(http.StatusBadRequest, gin.H{"error": "invalid value for cols - must be less than 1000"}) return } if *req.Rows > 1000 { c.JSON(http.StatusBadRequest, gin.H{"error": "invalid value for rows - must be less than 1000"}) return } session := &PTYSession{ info: PTYSessionInfo{ ID: req.ID, Cwd: req.Cwd, Envs: req.Envs, Cols: *req.Cols, Rows: *req.Rows, CreatedAt: time.Now(), Active: false, LazyStart: req.LazyStart, }, clients: cmap.New[*wsClient](), logger: p.logger.With(slog.String("sessionId", req.ID)), } // Add to manager first to prevent race conditions ptyManager.Add(session) // Start PTY immediately if not lazy start (default behavior) if !req.LazyStart { if err := session.start(); err != nil { // If start fails, remove from manager ptyManager.Delete(req.ID) p.logger.Error("failed to start PTY at create", "error", err) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to start PTY session"}) return } } c.JSON(http.StatusCreated, PTYCreateResponse{SessionID: req.ID}) } // ListPTYSessions godoc // // @Summary List all PTY sessions // @Description Get a list of all active pseudo-terminal sessions // @Tags process // @Produce json // @Success 200 {object} PTYListResponse // @Router /process/pty [get] // // @id ListPtySessions func (p *PTYController) ListPTYSessions(c *gin.Context) { c.JSON(http.StatusOK, PTYListResponse{Sessions: ptyManager.List()}) } // GetPTYSession godoc // // @Summary Get PTY session information // @Description Get detailed information about a specific pseudo-terminal session // @Tags process // @Produce json // @Param sessionId path string true "PTY session ID" // @Success 200 {object} PTYSessionInfo // @Router /process/pty/{sessionId} [get] // // @id GetPtySession func (p *PTYController) GetPTYSession(c *gin.Context) { id := c.Param("sessionId") if id == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "session ID is required"}) return } if s, ok := ptyManager.Get(id); ok { c.JSON(http.StatusOK, s.Info()) return } c.JSON(http.StatusNotFound, gin.H{"error": "PTY session not found"}) } // DeletePTYSession godoc // // @Summary Delete a PTY session // @Description Delete a pseudo-terminal session and terminate its process // @Tags process // @Produce json // @Param sessionId path string true "PTY session ID" // @Success 200 {object} gin.H // @Router /process/pty/{sessionId} [delete] // // @id DeletePtySession func (p *PTYController) DeletePTYSession(c *gin.Context) { id := c.Param("sessionId") if id == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "session ID is required"}) return } if s, ok := ptyManager.Delete(id); ok { s.kill() p.logger.Debug("Deleted PTY session", "sessionId", id) c.JSON(http.StatusOK, gin.H{"message": "PTY session deleted"}) return } c.JSON(http.StatusNotFound, gin.H{"error": "PTY session not found"}) } // ConnectPTYSession godoc // // @Summary Connect to PTY session via WebSocket // @Description Establish a WebSocket connection to interact with a pseudo-terminal session // @Tags process // @Param sessionId path string true "PTY session ID" // @Success 101 "Switching Protocols - WebSocket connection established" // @Router /process/pty/{sessionId}/connect [get] // // @id ConnectPtySession func (p *PTYController) ConnectPTYSession(c *gin.Context) { id := c.Param("sessionId") if id == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "session ID is required"}) return } // Upgrade to WebSocket ws, err := util.UpgradeToWebSocket(c.Writer, c.Request) if err != nil { p.logger.Error("ws upgrade failed", "error", err) return } session, err := ptyManager.VerifyPTYSessionReady(id) if err != nil { p.logger.Debug("failed to connect to PTY session", "sessionId", id, "error", err) // Send error control message errorMsg := map[string]interface{}{ "type": "control", "status": "error", "error": "Failed to connect to PTY session: " + err.Error(), } if errorJSON, err := json.Marshal(errorMsg); err == nil { _ = ws.WriteMessage(websocket.TextMessage, errorJSON) } _ = ws.Close() return } // Attach to session - this will send the control message internally session.attachWebSocket(ws) } // ResizePTYSession godoc // // @Summary Resize a PTY session // @Description Resize the terminal dimensions of a pseudo-terminal session // @Tags process // @Accept json // @Produce json // @Param sessionId path string true "PTY session ID" // @Param request body PTYResizeRequest true "Resize request with new dimensions" // @Success 200 {object} PTYSessionInfo // @Router /process/pty/{sessionId}/resize [post] // // @id ResizePtySession func (p *PTYController) ResizePTYSession(c *gin.Context) { id := c.Param("sessionId") if id == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "session ID is required"}) return } var req PTYResizeRequest if err := c.ShouldBindJSON(&req); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } if req.Cols > 1000 { c.JSON(http.StatusBadRequest, gin.H{"error": "cols must be less than 1000"}) return } if req.Rows > 1000 { c.JSON(http.StatusBadRequest, gin.H{"error": "rows must be less than 1000"}) return } session, err := ptyManager.VerifyPTYSessionForResize(id) if err != nil { c.JSON(http.StatusGone, gin.H{"error": err.Error()}) return } if err := session.resize(req.Cols, req.Rows); err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } p.logger.Debug("Resized PTY session", "sessionId", id, "cols", req.Cols, "rows", req.Rows) // Return updated session info updatedInfo := session.Info() c.JSON(http.StatusOK, updatedInfo) } ================================================ FILE: apps/daemon/pkg/toolbox/process/pty/manager.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package pty import ( "fmt" cmap "github.com/orcaman/concurrent-map/v2" ) // Global PTY manager instance var ptyManager = &PTYManager{ sessions: cmap.New[*PTYSession](), } // NewPTYManager creates a new PTY manager instance func NewPTYManager() *PTYManager { return &PTYManager{ sessions: cmap.New[*PTYSession](), } } // Add adds a PTY session to the manager func (m *PTYManager) Add(s *PTYSession) { m.sessions.Set(s.info.ID, s) } // Get retrieves a PTY session by ID func (m *PTYManager) Get(id string) (*PTYSession, bool) { s, ok := m.sessions.Get(id) return s, ok } // Delete removes a PTY session from the manager func (m *PTYManager) Delete(id string) (*PTYSession, bool) { s, ok := m.sessions.Get(id) if ok { m.sessions.Remove(id) } return s, ok } // List returns information about all managed PTY sessions func (m *PTYManager) List() []PTYSessionInfo { out := make([]PTYSessionInfo, 0, m.sessions.Count()) for _, s := range m.sessions.Items() { out = append(out, s.Info()) } return out } func (m *PTYManager) VerifyPTYSessionReady(id string) (*PTYSession, error) { // Validate session existence and send control message session, ok := ptyManager.Get(id) if !ok { return nil, fmt.Errorf("PTY session %s not found", id) } sessionInfo := session.Info() // Handle inactive sessions based on lazy start flag if !sessionInfo.Active { if sessionInfo.LazyStart { // Lazy start session - start PTY on first client connection if err := session.start(); err != nil { return nil, fmt.Errorf("failed to start PTY session: %v", err) } } else { // Non-lazy session that's inactive means it has terminated return nil, fmt.Errorf("PTY session '%s' has terminated and is no longer available", id) } } return session, nil } func (m *PTYManager) VerifyPTYSessionForResize(id string) (*PTYSession, error) { session, ok := ptyManager.Get(id) if !ok { return nil, fmt.Errorf("PTY session %s not found", id) } sessionInfo := session.Info() // Check if session can be resized if !sessionInfo.Active && !sessionInfo.LazyStart { // Non-lazy session that's inactive means it has terminated return nil, fmt.Errorf("PTY session '%s' has terminated and cannot be resized", id) } return session, nil } ================================================ FILE: apps/daemon/pkg/toolbox/process/pty/session.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package pty import ( "context" "errors" "fmt" "os" "os/exec" "github.com/creack/pty" "github.com/daytonaio/daemon/pkg/common" ) // Info returns the current session information func (s *PTYSession) Info() PTYSessionInfo { s.mu.Lock() defer s.mu.Unlock() return s.info } // start initializes and starts the PTY session func (s *PTYSession) start() error { s.mu.Lock() defer s.mu.Unlock() // already running? if s.info.Active && s.cmd != nil && s.ptmx != nil { return nil } // Prevent restarting - once a session exits, it should be removed from manager if s.cmd != nil { return errors.New("PTY session has already been used and cannot be restarted") } if s.inCh == nil { s.inCh = make(chan []byte, 1024) } ctx, cancel := context.WithCancel(context.Background()) s.ctx = ctx s.cancel = cancel shell := common.GetShell() if shell == "" { return errors.New("no shell resolved") } cmd := exec.CommandContext(ctx, shell, "-i", "-l") cmd.Dir = s.info.Cwd // Env cmd.Env = os.Environ() for k, v := range s.info.Envs { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) } ptmx, err := pty.StartWithSize(cmd, &pty.Winsize{Rows: s.info.Rows, Cols: s.info.Cols}) if err != nil { cancel() return fmt.Errorf("pty.StartWithSize: %w", err) } s.cmd = cmd s.ptmx = ptmx s.info.Active = true s.logger.Debug("Started PTY session", "sessionId", s.info.ID, "pid", s.cmd.Process.Pid) // 1) PTY -> clients broadcaster go s.ptyReadLoop() // 2) clients -> PTY writer go s.inputWriteLoop() // Reap the process; mark inactive on exit and send exit event go func() { err := s.cmd.Wait() var exitCode int var exitReason string if err != nil { if exitError, ok := err.(*exec.ExitError); ok { exitCode = exitError.ExitCode() // Analyze the exit code to provide meaningful context if exitCode == 137 { exitReason = " (SIGKILL)" } else if exitCode == 130 { exitReason = " (SIGINT - Ctrl+C)" } else if exitCode == 143 { exitReason = " (SIGTERM)" } else if exitCode > 128 { sigNum := exitCode - 128 exitReason = fmt.Sprintf(" (signal %d)", sigNum) } else { exitReason = " (non-zero exit)" } } else { exitCode = 1 exitReason = " (process error)" } } else { exitCode = 0 exitReason = " (clean exit)" } s.mu.Lock() s.info.Active = false sessionID := s.info.ID s.mu.Unlock() // Close WebSocket connections with exit code and reason s.closeClientsWithExitCode(exitCode, exitReason) // Remove session from manager - process has exited and won't be reused ptyManager.Delete(sessionID) s.logger.Debug("PTY session process exited and cleaned up", "sessionId", sessionID, "exitCode", exitCode, "exitReason", exitReason) }() return nil } // kill terminates the PTY session func (s *PTYSession) kill() { // kill process and PTY s.mu.Lock() // Check if already killed to prevent double-kill if !s.info.Active { s.mu.Unlock() return } sessionID := s.info.ID if s.cancel != nil { s.cancel() } if s.ptmx != nil { _ = s.ptmx.Close() s.ptmx = nil } if s.cmd != nil && s.cmd.Process != nil { _ = s.cmd.Process.Kill() } s.info.Active = false s.mu.Unlock() // Close WebSocket connections with kill exit code - 137 = 128 + 9 (SIGKILL) s.closeClientsWithExitCode(137, " (SIGKILL)") // Remove session from manager - manually killed ptyManager.Delete(sessionID) } // ptyReadLoop reads from PTY and broadcasts to all clients func (s *PTYSession) ptyReadLoop() { buf := make([]byte, 32*1024) for { n, err := s.ptmx.Read(buf) if n > 0 { b := make([]byte, n) copy(b, buf[:n]) s.broadcast(b) } if err != nil { return } } } // inputWriteLoop writes client input to PTY func (s *PTYSession) inputWriteLoop() { for { select { case <-s.ctx.Done(): return case data := <-s.inCh: if s.ptmx == nil { return } if _, err := s.ptmx.Write(data); err != nil { return } } } } // sendToPTY sends data from a client to the PTY func (s *PTYSession) sendToPTY(data []byte) error { // Check if inCh is available to prevent panic if s.inCh == nil { return fmt.Errorf("PTY session input channel not available") } select { case s.inCh <- data: return nil case <-s.ctx.Done(): return fmt.Errorf("PTY session input channel closed") } } // resize changes the PTY window size func (s *PTYSession) resize(cols, rows uint16) error { s.mu.Lock() defer s.mu.Unlock() // Check if session is still active if !s.info.Active { return errors.New("cannot resize inactive PTY session") } if cols > 1000 { return fmt.Errorf("cols must be less than 1000") } if rows > 1000 { return fmt.Errorf("rows must be less than 1000") } s.info.Cols = cols s.info.Rows = rows if s.ptmx != nil { if err := pty.Setsize(s.ptmx, &pty.Winsize{Cols: cols, Rows: rows}); err != nil { s.logger.Debug("PTY resize error", "error", err) return err } } else { return errors.New("PTY file descriptor is not available") } return nil } ================================================ FILE: apps/daemon/pkg/toolbox/process/pty/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package pty import ( "context" "log/slog" "os" "os/exec" "sync" "time" "github.com/gorilla/websocket" cmap "github.com/orcaman/concurrent-map/v2" ) // Constants const ( writeWait = 10 * time.Second readLimit = 64 * 1024 ) // PTYController handles PTY-related HTTP endpoints type PTYController struct { logger *slog.Logger workDir string } // PTYManager manages multiple PTY sessions type PTYManager struct { sessions cmap.ConcurrentMap[string, *PTYSession] } // wsClient represents a WebSocket client connection type wsClient struct { id string conn *websocket.Conn send chan []byte // outbound queue for this client (PTY -> WS) done chan struct{} // closed when the client is shutting down closeOnce sync.Once } // PTYSession represents a single PTY session with multi-client support type PTYSession struct { logger *slog.Logger info PTYSessionInfo cmd *exec.Cmd ptmx *os.File ctx context.Context cancel context.CancelFunc // multi-attach clients cmap.ConcurrentMap[string, *wsClient] clientsMu sync.RWMutex // funnel of all client inputs -> single PTY writer (preserves ordering) inCh chan []byte // guards general session fields (info/cmd/ptmx) mu sync.Mutex } // PTYSessionInfo contains metadata about a PTY session type PTYSessionInfo struct { ID string `json:"id" validate:"required"` Cwd string `json:"cwd" validate:"required"` Envs map[string]string `json:"envs" validate:"required"` Cols uint16 `json:"cols" validate:"required"` Rows uint16 `json:"rows" validate:"required"` CreatedAt time.Time `json:"createdAt" validate:"required"` Active bool `json:"active" validate:"required"` LazyStart bool `json:"lazyStart" validate:"required"` // Whether this session uses lazy start } // @name PtySessionInfo // API Request/Response types // PTYCreateRequest represents a request to create a new PTY session type PTYCreateRequest struct { ID string `json:"id"` Cwd string `json:"cwd,omitempty"` Envs map[string]string `json:"envs,omitempty"` Cols *uint16 `json:"cols" validate:"optional"` Rows *uint16 `json:"rows" validate:"optional"` LazyStart bool `json:"lazyStart,omitempty"` // Don't start PTY until first client connects } // @name PtyCreateRequest // PTYCreateResponse represents the response when creating a PTY session type PTYCreateResponse struct { SessionID string `json:"sessionId" validate:"required"` } // @name PtyCreateResponse // PTYListResponse represents the response when listing PTY sessions type PTYListResponse struct { Sessions []PTYSessionInfo `json:"sessions" validate:"required"` } // @name PtyListResponse // PTYResizeRequest represents a request to resize a PTY session type PTYResizeRequest struct { Cols uint16 `json:"cols" binding:"required,min=1,max=1000"` Rows uint16 `json:"rows" binding:"required,min=1,max=1000"` } // @name PtyResizeRequest ================================================ FILE: apps/daemon/pkg/toolbox/process/pty/websocket.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package pty import ( "encoding/json" "fmt" "time" "github.com/google/uuid" "github.com/gorilla/websocket" ) // attachWebSocket connects a new WebSocket client to the PTY session func (s *PTYSession) attachWebSocket(ws *websocket.Conn) { cl := &wsClient{ id: uuid.NewString(), conn: ws, send: make(chan []byte, 256), // if full, drop slow client done: make(chan struct{}), } // Register client FIRST so it can receive PTY output via broadcast s.clients.Set(cl.id, cl) count := s.clients.Count() s.logger.Debug("Client attached to PTY session", "clientId", cl.id, "sessionId", s.info.ID, "clientCount", count) // Start PTY data flow - writer (PTY -> this client) go s.clientWriter(cl) // Send success control message after client is registered and ready successMsg := map[string]interface{}{ "type": "control", "status": "connected", } if successJSON, err := json.Marshal(successMsg); err == nil { _ = ws.WriteMessage(websocket.TextMessage, successJSON) } // reader (this client -> PTY); blocks until disconnect s.clientReader(cl) // on exit, unregister s.clients.Remove(cl.id) cl.close() remaining := s.clients.Count() s.logger.Debug("Client detached from PTY session", "clientId", cl.id, "sessionId", s.info.ID, "clientCount", remaining) } // clientWriter sends PTY output to a specific WebSocket client func (s *PTYSession) clientWriter(cl *wsClient) { for { select { case <-s.ctx.Done(): return case <-cl.done: return case b := <-cl.send: _ = cl.conn.SetWriteDeadline(time.Now().Add(writeWait)) if err := cl.conn.WriteMessage(websocket.BinaryMessage, b); err != nil { return } } } } // clientReader reads input from a WebSocket client and sends to PTY func (s *PTYSession) clientReader(cl *wsClient) { conn := cl.conn conn.SetReadLimit(readLimit) for { _, data, err := conn.ReadMessage() if err != nil { if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { s.logger.Debug("ws read error", "error", err) } return } // Send all message data to PTY (text or binary) if err := s.sendToPTY(data); err != nil { // Send error to client and close connection _ = conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage( websocket.CloseInternalServerErr, "PTY session unavailable", )) return } } } // broadcast sends data to all connected WebSocket clients func (s *PTYSession) broadcast(b []byte) { // send to each client; drop slow clients to avoid stalling the PTY s.clientsMu.RLock() for id, cl := range s.clients.Items() { select { case cl.send <- b: case <-cl.done: // client is shutting down, skip default: // client's outbound queue is full -> drop the client go func(id string, cl *wsClient) { _ = cl.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage( websocket.ClosePolicyViolation, "slow consumer", )) cl.close() }(id, cl) } } s.clientsMu.RUnlock() } // closeClientsWithExitCode closes all WebSocket connections with structured exit data func (s *PTYSession) closeClientsWithExitCode(exitCode int, exitReason string) { var wsCloseCode int var exitReasonStr *string // Map PTY exit codes to WebSocket close codes if exitCode == 0 { wsCloseCode = websocket.CloseNormalClosure exitReasonStr = nil // undefined for clean exit } else { wsCloseCode = websocket.CloseInternalServerErr // Set human-readable reason for non-zero exits switch { case exitCode == 130: reason := "Ctrl+C" exitReasonStr = &reason case exitCode == 137: reason := "SIGKILL" exitReasonStr = &reason case exitCode == 143: reason := "SIGTERM" exitReasonStr = &reason case exitCode > 128: sigNum := exitCode - 128 reason := fmt.Sprintf("signal %d", sigNum) exitReasonStr = &reason default: reason := "non-zero exit" exitReasonStr = &reason } } // Create structured close data as JSON type CloseData struct { ExitCode int `json:"exitCode"` ExitReason *string `json:"exitReason,omitempty"` } closeData := CloseData{ ExitCode: exitCode, ExitReason: exitReasonStr, } closeJSON, _ := json.Marshal(closeData) s.clientsMu.Lock() for id, cl := range s.clients.Items() { _ = cl.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage( wsCloseCode, string(closeJSON), )) cl.close() s.clients.Remove(id) } s.clientsMu.Unlock() } ================================================ FILE: apps/daemon/pkg/toolbox/process/pty/ws_client.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package pty func (cl *wsClient) close() { cl.closeOnce.Do(func() { close(cl.done) _ = cl.conn.Close() }) } ================================================ FILE: apps/daemon/pkg/toolbox/process/session/controller.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "log/slog" "github.com/daytonaio/daemon/pkg/session" ) type SessionController struct { logger *slog.Logger configDir string sessionService *session.SessionService } func NewSessionController(logger *slog.Logger, configDir string, sessionService *session.SessionService) *SessionController { return &SessionController{ logger: logger.With(slog.String("component", "session_controller")), configDir: configDir, sessionService: sessionService, } } ================================================ FILE: apps/daemon/pkg/toolbox/process/session/execute.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "errors" "fmt" "net/http" "strings" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/daemon/internal/util" "github.com/daytonaio/daemon/pkg/session" "github.com/gin-gonic/gin" ) // SessionExecuteCommand godoc // // @Summary Execute command in session // @Description Execute a command within an existing shell session // @Tags process // @Accept json // @Produce json // @Param sessionId path string true "Session ID" // @Param request body SessionExecuteRequest true "Command execution request" // @Success 200 {object} SessionExecuteResponse // @Success 202 {object} SessionExecuteResponse // @Router /process/session/{sessionId}/exec [post] // // @id SessionExecuteCommand func (s *SessionController) SessionExecuteCommand(c *gin.Context) { sessionId := c.Param("sessionId") if sessionId == util.EntrypointSessionID { c.Error(common_errors.NewBadRequestError(errors.New("can't execute command in entrypoint session"))) return } var request SessionExecuteRequest if err := c.ShouldBindJSON(&request); err != nil { c.AbortWithError(http.StatusBadRequest, fmt.Errorf("invalid request body: %w", err)) return } // Validate command is not empty (if not already handled by binding) if strings.TrimSpace(request.Command) == "" { c.AbortWithError(http.StatusBadRequest, errors.New("command cannot be empty")) return } // Handle backward compatibility for "async" field if request.Async { request.RunAsync = true } sdkVersion := util.ExtractSdkVersionFromHeader(c.Request.Header) versionComparison, err := util.CompareVersions(sdkVersion, "0.27.0-0") if err != nil { s.logger.ErrorContext(c.Request.Context(), "failed to compare versions", "error", err) versionComparison = util.Pointer(1) } isCombinedOutput := session.IsCombinedOutput(sdkVersion, versionComparison, c.Request.Header) executeResult, err := s.sessionService.Execute(sessionId, util.EmptyCommandID, request.Command, request.RunAsync, isCombinedOutput, request.SuppressInputEcho) if err != nil { c.Error(fmt.Errorf("failed to execute command: %w", err)) return } if request.RunAsync { c.JSON(http.StatusAccepted, &SessionExecuteResponse{ CommandId: executeResult.CommandId, }) return } c.JSON(http.StatusOK, &SessionExecuteResponse{ CommandId: executeResult.CommandId, Output: executeResult.Output, Stdout: executeResult.Stdout, Stderr: executeResult.Stderr, ExitCode: executeResult.ExitCode, }) } ================================================ FILE: apps/daemon/pkg/toolbox/process/session/input.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "errors" "net/http" "github.com/gin-gonic/gin" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/daemon/internal/util" ) // SendInput godoc // // @Summary Send input to command // @Description Send input data to a running command in a session for interactive execution // @Tags process // @Accept json // @Param sessionId path string true "Session ID" // @Param commandId path string true "Command ID" // @Param request body SessionSendInputRequest true "Input send request" // @Success 204 // @Router /process/session/{sessionId}/command/{commandId}/input [post] // // @id SendInput func (s *SessionController) SendInput(c *gin.Context) { sessionId := c.Param("sessionId") commandId := c.Param("commandId") if sessionId == util.EntrypointSessionID { c.Error(common_errors.NewBadRequestError(errors.New("can't send input to entrypoint session"))) return } var request SessionSendInputRequest if err := c.ShouldBindJSON(&request); err != nil { c.Error(common_errors.NewInvalidBodyRequestError(err)) return } err := s.sessionService.SendInput(sessionId, commandId, request.Data) if err != nil { c.Error(err) return } c.Status(http.StatusNoContent) } ================================================ FILE: apps/daemon/pkg/toolbox/process/session/log.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "net/http" "github.com/daytonaio/daemon/internal/util" "github.com/daytonaio/daemon/pkg/session" "github.com/gin-gonic/gin" ) // GetSessionCommandLogs godoc // // @Summary Get session command logs // @Description Get logs for a specific command within a session. Supports both HTTP and WebSocket streaming. // @Tags process // @Produce text/plain // @Param sessionId path string true "Session ID" // @Param commandId path string true "Command ID" // @Param follow query boolean false "Follow logs in real-time (WebSocket only)" // @Success 200 {string} string "Log content" // @Router /process/session/{sessionId}/command/{commandId}/logs [get] // // @id GetSessionCommandLogs func (s *SessionController) GetSessionCommandLogs(c *gin.Context) { sessionId := c.Param("sessionId") cmdId := c.Param("commandId") sdkVersion := util.ExtractSdkVersionFromHeader(c.Request.Header) versionComparison, err := util.CompareVersions(sdkVersion, "0.27.0-0") if err != nil { s.logger.DebugContext(c.Request.Context(), "failed to compare versions", "error", err) versionComparison = util.Pointer(1) } opts := session.FetchLogsOptions{ IsCombinedOutput: session.IsCombinedOutput(sdkVersion, versionComparison, c.Request.Header), IsWebsocketUpgrade: c.Request.Header.Get("Upgrade") == "websocket", Follow: c.Query("follow") == "true", } logBytes, err := s.sessionService.GetSessionCommandLogs(sessionId, cmdId, c.Request, c.Writer, opts) if err != nil { c.Error(err) return } if logBytes == nil { return } c.String(http.StatusOK, string(logBytes)) } // GetEntrypointLogs godoc // // @Summary Get entrypoint logs // @Description Get logs for a sandbox entrypoint session. Supports both HTTP and WebSocket streaming. // @Tags process // @Produce text/plain // @Param follow query boolean false "Follow logs in real-time (WebSocket only)" // @Success 200 {string} string "Entrypoint log content" // @Router /process/session/entrypoint/logs [get] // // @id GetEntrypointLogs func (s *SessionController) GetEntrypointLogs(c *gin.Context) { opts := session.FetchLogsOptions{ IsCombinedOutput: false, IsWebsocketUpgrade: c.Request.Header.Get("Upgrade") == "websocket", Follow: c.Query("follow") == "true", } logBytes, err := s.sessionService.GetSessionCommandLogs(util.EntrypointSessionID, util.EntrypointCommandID, c.Request, c.Writer, opts) if err != nil { c.Error(err) return } if logBytes == nil { return } c.String(http.StatusOK, string(logBytes)) } ================================================ FILE: apps/daemon/pkg/toolbox/process/session/session.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import ( "errors" "net/http" "github.com/gin-gonic/gin" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/daytonaio/daemon/internal/util" ) // CreateSession godoc // // @Summary Create a new session // @Description Create a new shell session for command execution // @Tags process // @Accept json // @Produce json // @Param request body CreateSessionRequest true "Session creation request" // @Success 201 // @Router /process/session [post] // // @id CreateSession func (s *SessionController) CreateSession(c *gin.Context) { var request CreateSessionRequest if err := c.ShouldBindJSON(&request); err != nil { c.Error(common_errors.NewInvalidBodyRequestError(err)) return } if request.SessionId == util.EntrypointSessionID { c.Error(common_errors.NewBadRequestError(errors.New("provided session ID is reserved and cannot be created/overridden"))) return } // for backward compatibility (only sdk clients before 0.103.X), we use the home directory as the default directory sdkVersion := util.ExtractSdkVersionFromHeader(c.Request.Header) versionComparison, err := util.CompareVersions(sdkVersion, "0.103.0-0") if err != nil { s.logger.ErrorContext(c.Request.Context(), "failed to compare versions", "error", err) versionComparison = util.Pointer(1) } isLegacy := versionComparison != nil && *versionComparison < 0 && sdkVersion != "0.0.0-dev" err = s.sessionService.Create(request.SessionId, isLegacy) if err != nil { c.Error(err) return } c.Status(http.StatusCreated) } // DeleteSession godoc // // @Summary Delete a session // @Description Delete an existing shell session // @Tags process // @Param sessionId path string true "Session ID" // @Success 204 // @Router /process/session/{sessionId} [delete] // // @id DeleteSession func (s *SessionController) DeleteSession(c *gin.Context) { sessionId := c.Param("sessionId") if sessionId == util.EntrypointSessionID { c.Error(common_errors.NewBadRequestError(errors.New("can't delete entrypoint session"))) return } err := s.sessionService.Delete(c.Request.Context(), sessionId) if err != nil { c.Error(err) return } c.Status(http.StatusNoContent) } // ListSessions godoc // // @Summary List all sessions // @Description Get a list of all active shell sessions // @Tags process // @Produce json // @Success 200 {array} SessionDTO // @Router /process/session [get] // // @id ListSessions func (s *SessionController) ListSessions(c *gin.Context) { sessions, err := s.sessionService.List() if err != nil { c.Error(err) return } sessionDTOs := make([]SessionDTO, 0, len(sessions)) for _, session := range sessions { sessionDTOs = append(sessionDTOs, *SessionToDTO(&session)) } c.JSON(http.StatusOK, sessionDTOs) } // GetSession godoc // // @Summary Get session details // @Description Get details of a specific session including its commands // @Tags process // @Produce json // @Param sessionId path string true "Session ID" // @Success 200 {object} SessionDTO // @Router /process/session/{sessionId} [get] // // @id GetSession func (s *SessionController) GetSession(c *gin.Context) { sessionId := c.Param("sessionId") session, err := s.sessionService.Get(sessionId) if err != nil { c.Error(err) return } c.JSON(http.StatusOK, SessionToDTO(session)) } // GetSessionCommand godoc // // @Summary Get session command details // @Description Get details of a specific command within a session // @Tags process // @Produce json // @Param sessionId path string true "Session ID" // @Param commandId path string true "Command ID" // @Success 200 {object} CommandDTO // @Router /process/session/{sessionId}/command/{commandId} [get] // // @id GetSessionCommand func (s *SessionController) GetSessionCommand(c *gin.Context) { sessionId := c.Param("sessionId") cmdId := c.Param("commandId") command, err := s.sessionService.GetSessionCommand(sessionId, cmdId) if err != nil { c.Error(err) return } c.JSON(http.StatusOK, CommandToDTO(command)) } // GetEntrypointSession godoc // // @Summary Get entrypoint session details // @Description Get details of an entrypoint session including its commands // @Tags process // @Produce json // @Success 200 {object} SessionDTO // @Router /process/session/entrypoint [get] // // @id GetEntrypointSession func (s *SessionController) GetEntrypointSession(c *gin.Context) { session, err := s.sessionService.Get(util.EntrypointSessionID) if err != nil { c.Error(err) return } c.JSON(http.StatusOK, SessionToDTO(session)) } ================================================ FILE: apps/daemon/pkg/toolbox/process/session/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package session import "github.com/daytonaio/daemon/pkg/session" type CreateSessionRequest struct { SessionId string `json:"sessionId" validate:"required"` } // @name CreateSessionRequest type SessionExecuteRequest struct { Command string `json:"command" validate:"required"` RunAsync bool `json:"runAsync" validate:"optional"` Async bool `json:"async" validate:"optional"` SuppressInputEcho bool `json:"suppressInputEcho" validate:"optional"` } // @name SessionExecuteRequest type SessionSendInputRequest struct { Data string `json:"data" validate:"required"` } // @name SessionSendInputRequest type SessionExecuteResponse struct { CommandId string `json:"cmdId" validate:"required"` Output *string `json:"output" validate:"optional"` Stdout *string `json:"stdout" validate:"optional"` Stderr *string `json:"stderr" validate:"optional"` ExitCode *int `json:"exitCode" validate:"optional"` } // @name SessionExecuteResponse type SessionCommandLogsResponse struct { Stdout string `json:"stdout" validate:"required"` Stderr string `json:"stderr" validate:"required"` } // @name SessionCommandLogsResponse type CommandDTO struct { Id string `json:"id" validate:"required"` Command string `json:"command" validate:"required"` ExitCode *int `json:"exitCode,omitempty" validate:"optional"` } // @name Command type SessionDTO struct { SessionId string `json:"sessionId" validate:"required"` Commands []*CommandDTO `json:"commands" validate:"required"` } // @name Session func CommandToDTO(c *session.Command) *CommandDTO { return &CommandDTO{ Id: c.Id, Command: c.Command, ExitCode: c.ExitCode, } } func SessionToDTO(s *session.Session) *SessionDTO { commands := make([]*CommandDTO, 0, len(s.Commands)) for _, cmd := range s.Commands { commands = append(commands, CommandToDTO(cmd)) } return &SessionDTO{ SessionId: s.SessionId, Commands: commands, } } ================================================ FILE: apps/daemon/pkg/toolbox/process/types.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package process type ExecuteRequest struct { Command string `json:"command" validate:"required"` // Timeout in seconds, defaults to 10 seconds Timeout *uint32 `json:"timeout,omitempty" validate:"optional"` // Current working directory Cwd *string `json:"cwd,omitempty" validate:"optional"` } // @name ExecuteRequest // TODO: Set ExitCode as required once all sandboxes migrated to the new daemon type ExecuteResponse struct { ExitCode int `json:"exitCode"` Result string `json:"result" validate:"required"` } // @name ExecuteResponse ================================================ FILE: apps/daemon/pkg/toolbox/proxy/proxy.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package proxy import ( "errors" "fmt" "net/url" "strings" common_errors "github.com/daytonaio/common-go/pkg/errors" "github.com/gin-gonic/gin" ) func GetProxyTarget(ctx *gin.Context) (*url.URL, map[string]string, error) { targetPort := ctx.Param("port") if targetPort == "" { ctx.Error(common_errors.NewBadRequestError(errors.New("target port is required"))) return nil, nil, errors.New("target port is required") } // Build the target URL targetURL := fmt.Sprintf("http://localhost:%s", targetPort) // Get the wildcard path and normalize it path := ctx.Param("path") // Ensure path always has a leading slash but not duplicate slashes if path == "" { path = "/" } else if !strings.HasPrefix(path, "/") { path = "/" + path } // Create the complete target URL with path target, err := url.Parse(fmt.Sprintf("%s%s", targetURL, path)) if err != nil { ctx.Error(common_errors.NewBadRequestError(fmt.Errorf("failed to parse target URL: %w", err))) return nil, nil, fmt.Errorf("failed to parse target URL: %w", err) } return target, nil, nil } ================================================ FILE: apps/daemon/pkg/toolbox/server.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 // @title Daytona Toolbox API // @version v0.0.0-dev // @description Daytona Toolbox API package toolbox import ( "context" "fmt" "log/slog" "net" "net/http" "os" "path" "time" common_errors "github.com/daytonaio/common-go/pkg/errors" common_proxy "github.com/daytonaio/common-go/pkg/proxy" "github.com/daytonaio/common-go/pkg/telemetry" "github.com/daytonaio/daemon/internal" "github.com/daytonaio/daemon/pkg/recording" session_svc "github.com/daytonaio/daemon/pkg/session" "github.com/daytonaio/daemon/pkg/toolbox/computeruse" "github.com/daytonaio/daemon/pkg/toolbox/computeruse/manager" recordingcontroller "github.com/daytonaio/daemon/pkg/toolbox/computeruse/recording" "github.com/daytonaio/daemon/pkg/toolbox/config" "github.com/daytonaio/daemon/pkg/toolbox/fs" "github.com/daytonaio/daemon/pkg/toolbox/git" "github.com/daytonaio/daemon/pkg/toolbox/lsp" "github.com/daytonaio/daemon/pkg/toolbox/port" "github.com/daytonaio/daemon/pkg/toolbox/process" "github.com/daytonaio/daemon/pkg/toolbox/process/interpreter" "github.com/daytonaio/daemon/pkg/toolbox/process/pty" "github.com/daytonaio/daemon/pkg/toolbox/process/session" "github.com/daytonaio/daemon/pkg/toolbox/proxy" sloggin "github.com/samber/slog-gin" "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" otellog "go.opentelemetry.io/otel/sdk/log" "go.opentelemetry.io/otel/sdk/metric" sdktrace "go.opentelemetry.io/otel/sdk/trace" "github.com/daytonaio/daemon/pkg/toolbox/docs" "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/binding" swaggerfiles "github.com/swaggo/files" ginSwagger "github.com/swaggo/gin-swagger" ) type ServerConfig struct { Logger *slog.Logger WorkDir string ConfigDir string ComputerUse computeruse.IComputerUse SandboxId string OtelEndpoint *string SessionService *session_svc.SessionService RecordingService *recording.RecordingService OrganizationId *string RegionId *string EntrypointLogFilePath string } func NewServer(config ServerConfig) *server { return &server{ logger: config.Logger.With(slog.String("component", "toolbox_server")), WorkDir: config.WorkDir, SandboxId: config.SandboxId, otelEndpoint: config.OtelEndpoint, telemetry: Telemetry{}, sessionService: config.SessionService, configDir: config.ConfigDir, recordingService: config.RecordingService, organizationId: config.OrganizationId, regionId: config.RegionId, entrypointLogFilePath: config.EntrypointLogFilePath, } } type server struct { WorkDir string ComputerUse computeruse.IComputerUse SandboxId string logger *slog.Logger otelEndpoint *string authToken string telemetry Telemetry sessionService *session_svc.SessionService configDir string recordingService *recording.RecordingService entrypointLogFilePath string entrypointLogCancel context.CancelFunc httpServer *http.Server organizationId *string regionId *string ctx context.Context cancel context.CancelFunc } type Telemetry struct { TracerProvider *sdktrace.TracerProvider MeterProvider *metric.MeterProvider LoggerProvider *otellog.LoggerProvider } func (s *server) Start() error { s.ctx, s.cancel = context.WithCancel(context.Background()) defer s.cancel() docs.SwaggerInfo.Description = "Daytona Toolbox API" docs.SwaggerInfo.Title = "Daytona Toolbox API" docs.SwaggerInfo.BasePath = "/" docs.SwaggerInfo.Version = internal.Version // Set Gin to release mode in production if os.Getenv("ENVIRONMENT") == "production" { gin.SetMode(gin.ReleaseMode) } otelServiceName := fmt.Sprintf("sandbox-%s", s.SandboxId) r := gin.New() r.Use(common_errors.Recovery()) noTelemetryRouter := r.Group("/") r.Use(func(ctx *gin.Context) { if s.telemetry.TracerProvider == nil { ctx.Next() return } otelgin.Middleware(otelServiceName, otelgin.WithTracerProvider(s.telemetry.TracerProvider))(ctx) ctx.Next() }) r.Use(sloggin.New(s.logger)) errMiddleware := common_errors.NewErrorMiddleware(func(ctx *gin.Context, err error) common_errors.ErrorResponse { return common_errors.ErrorResponse{ StatusCode: http.StatusInternalServerError, Message: err.Error(), } }) noTelemetryRouter.Use(sloggin.New(s.logger)) r.Use(errMiddleware) noTelemetryRouter.Use(errMiddleware) binding.Validator = new(DefaultValidator) // Add swagger UI in development mode if os.Getenv("ENVIRONMENT") != "production" { r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler)) } r.POST("/init", s.Initialize(otelServiceName, s.entrypointLogFilePath, s.organizationId, s.regionId)) r.GET("/version", s.GetVersion) // keep /project-dir old behavior for backward compatibility r.GET("/project-dir", s.GetUserHomeDir) r.GET("/user-home-dir", s.GetUserHomeDir) r.GET("/work-dir", s.GetWorkDir) fsController := r.Group("/files") { // read operations fsController.GET("/", fs.ListFiles) fsController.GET("/download", fs.DownloadFile) fsController.POST("/bulk-download", fs.DownloadFiles) fsController.GET("/find", fs.FindInFiles) fsController.GET("/info", fs.GetFileInfo) fsController.GET("/search", fs.SearchFiles) // create/modify operations fsController.POST("/folder", fs.CreateFolder) fsController.POST("/move", fs.MoveFile) fsController.POST("/permissions", fs.SetFilePermissions) fsController.POST("/replace", fs.ReplaceInFiles) fsController.POST("/upload", fs.UploadFile) fsController.POST("/bulk-upload", fs.UploadFiles) // delete operations fsController.DELETE("/", fs.DeleteFile) } processLogger := s.logger.With(slog.String("component", "process_controller")) processController := r.Group("/process") { processController.POST("/execute", process.ExecuteCommand(processLogger)) sessionController := session.NewSessionController(s.logger, s.configDir, s.sessionService) sessionGroup := processController.Group("/session") { sessionGroup.GET("", sessionController.ListSessions) sessionGroup.POST("", sessionController.CreateSession) sessionGroup.GET("/entrypoint", sessionController.GetEntrypointSession) sessionGroup.GET("/entrypoint/logs", sessionController.GetEntrypointLogs) sessionGroup.POST("/:sessionId/exec", sessionController.SessionExecuteCommand) sessionGroup.GET("/:sessionId", sessionController.GetSession) sessionGroup.DELETE("/:sessionId", sessionController.DeleteSession) sessionGroup.GET("/:sessionId/command/:commandId", sessionController.GetSessionCommand) sessionGroup.POST("/:sessionId/command/:commandId/input", sessionController.SendInput) sessionGroup.GET("/:sessionId/command/:commandId/logs", sessionController.GetSessionCommandLogs) } // PTY endpoints ptyController := pty.NewPTYController(s.logger, s.WorkDir) ptyGroup := processController.Group("/pty") { ptyGroup.GET("", ptyController.ListPTYSessions) ptyGroup.POST("", ptyController.CreatePTYSession) ptyGroup.GET("/:sessionId", ptyController.GetPTYSession) ptyGroup.DELETE("/:sessionId", ptyController.DeletePTYSession) ptyGroup.GET("/:sessionId/connect", ptyController.ConnectPTYSession) ptyGroup.POST("/:sessionId/resize", ptyController.ResizePTYSession) } // Interpreter endpoints interpreterController := interpreter.NewInterpreterController(s.logger, s.WorkDir) interpreterGroup := processController.Group("/interpreter") { interpreterGroup.POST("/context", interpreterController.CreateContext) interpreterGroup.GET("/context", interpreterController.ListContexts) interpreterGroup.DELETE("/context/:id", interpreterController.DeleteContext) interpreterGroup.GET("/execute", interpreterController.Execute) } } gitController := r.Group("/git") { gitController.GET("/branches", git.ListBranches) gitController.GET("/history", git.GetCommitHistory) gitController.GET("/status", git.GetStatus) gitController.POST("/add", git.AddFiles) gitController.POST("/branches", git.CreateBranch) gitController.POST("/checkout", git.CheckoutBranch) gitController.DELETE("/branches", git.DeleteBranch) gitController.POST("/clone", git.CloneRepository) gitController.POST("/commit", git.CommitChanges) gitController.POST("/pull", git.PullChanges) gitController.POST("/push", git.PushChanges) } lspLogger := s.logger.With(slog.String("component", "lsp_service")) lspController := r.Group("/lsp") { // server process lspController.POST("/start", lsp.Start(lspLogger)) lspController.POST("/stop", lsp.Stop(lspLogger)) // lsp operations lspController.POST("/completions", lsp.Completions(lspLogger)) lspController.POST("/did-open", lsp.DidOpen(lspLogger)) lspController.POST("/did-close", lsp.DidClose(lspLogger)) lspController.GET("/document-symbols", lsp.DocumentSymbols(lspLogger)) lspController.GET("/workspacesymbols", lsp.WorkspaceSymbols(lspLogger)) } lazyCU := computeruse.NewLazyComputerUse() s.ComputerUse = lazyCU go func() { // Initialize plugin-based computer use lazily in a background goroutine pluginPath := "/usr/local/lib/daytona-computer-use" // Fallback to local config directory for development if _, err := os.Stat(pluginPath); os.IsNotExist(err) { pluginPath = path.Join(s.configDir, "daytona-computer-use") } impl, err := manager.GetComputerUse(s.logger, pluginPath) if err != nil { s.logger.Error("Computer-Use error", "error", err) s.logger.Info("Continuing without computer-use functionality...") return } lazyCU.Set(impl) s.logger.Info("Computer-use plugin loaded successfully") }() // Register computer-use endpoints with lazy check middleware computerUseController := r.Group("/computeruse") { computerUseHandler := computeruse.Handler{ ComputerUse: lazyCU, } cuRoutes := computerUseController.Group("/", computeruse.LazyCheckMiddleware(lazyCU)) // Computer use status endpoint cuRoutes.GET("/status", computeruse.WrapStatusHandler(lazyCU.GetStatus)) // Computer use management endpoints cuRoutes.POST("/start", computerUseHandler.StartComputerUse) cuRoutes.POST("/stop", computerUseHandler.StopComputerUse) cuRoutes.GET("/process-status", computerUseHandler.GetComputerUseStatus) cuRoutes.GET("/process/:processName/status", computerUseHandler.GetProcessStatus) cuRoutes.POST("/process/:processName/restart", computerUseHandler.RestartProcess) cuRoutes.GET("/process/:processName/logs", computerUseHandler.GetProcessLogs) cuRoutes.GET("/process/:processName/errors", computerUseHandler.GetProcessErrors) // Screenshot endpoints cuRoutes.GET("/screenshot", computeruse.WrapScreenshotHandler(lazyCU.TakeScreenshot)) cuRoutes.GET("/screenshot/region", computeruse.WrapRegionScreenshotHandler(lazyCU.TakeRegionScreenshot)) cuRoutes.GET("/screenshot/compressed", computeruse.WrapCompressedScreenshotHandler(lazyCU.TakeCompressedScreenshot)) cuRoutes.GET("/screenshot/region/compressed", computeruse.WrapCompressedRegionScreenshotHandler(lazyCU.TakeCompressedRegionScreenshot)) // Mouse control endpoints cuRoutes.GET("/mouse/position", computeruse.WrapMousePositionHandler(lazyCU.GetMousePosition)) cuRoutes.POST("/mouse/move", computeruse.WrapMoveMouseHandler(lazyCU.MoveMouse)) cuRoutes.POST("/mouse/click", computeruse.WrapClickHandler(lazyCU.Click)) cuRoutes.POST("/mouse/drag", computeruse.WrapDragHandler(lazyCU.Drag)) cuRoutes.POST("/mouse/scroll", computeruse.WrapScrollHandler(lazyCU.Scroll)) // Keyboard control endpoints cuRoutes.POST("/keyboard/type", computeruse.WrapTypeTextHandler(lazyCU.TypeText)) cuRoutes.POST("/keyboard/key", computeruse.WrapPressKeyHandler(lazyCU.PressKey)) cuRoutes.POST("/keyboard/hotkey", computeruse.WrapPressHotkeyHandler(lazyCU.PressHotkey)) // Display info endpoints cuRoutes.GET("/display/info", computeruse.WrapDisplayInfoHandler(lazyCU.GetDisplayInfo)) cuRoutes.GET("/display/windows", computeruse.WrapWindowsHandler(lazyCU.GetWindows)) } // Recording endpoints - always registered, independent of computer-use plugin recordingController := recordingcontroller.NewRecordingController(s.recordingService) recordingsGroup := computerUseController.Group("/recordings") { recordingsGroup.POST("/start", recordingController.StartRecording) recordingsGroup.POST("/stop", recordingController.StopRecording) recordingsGroup.GET("", recordingController.ListRecordings) recordingsGroup.GET("/:id", recordingController.GetRecording) recordingsGroup.GET("/:id/download", recordingController.DownloadRecording) recordingsGroup.DELETE("/:id", recordingController.DeleteRecording) } portDetector := port.NewPortsDetector() portController := r.Group("/port") { portController.GET("", portDetector.GetPorts) portController.GET("/:port/in-use", portDetector.IsPortInUse) } proxyController := noTelemetryRouter.Group("/proxy") { proxyController.Any("/:port/*path", common_proxy.NewProxyRequestHandler(proxy.GetProxyTarget, nil)) } go portDetector.Start(context.Background()) s.httpServer = &http.Server{ Addr: fmt.Sprintf(":%d", config.TOOLBOX_API_PORT), Handler: r, } // Print to stdout so the runner can know that the daemon is ready fmt.Println("Starting toolbox server on port", config.TOOLBOX_API_PORT) listener, err := net.Listen("tcp", s.httpServer.Addr) if err != nil { return err } return s.httpServer.Serve(listener) } func (s *server) Shutdown() { s.logger.Info("Shutting down toolbox server") // Stop accepting new requests and drain in-flight ones if s.httpServer != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := s.httpServer.Shutdown(ctx); err != nil { s.logger.Error("toolbox HTTP server shutdown error", "error", err) } } // Stop computer use if running if s.ComputerUse != nil { s.logger.Info("Stopping computer use...") _, err := s.ComputerUse.Stop() if err != nil { s.logger.Error("Failed to stop computer use", "error", err) } } // Flush telemetry if s.telemetry.TracerProvider != nil { s.logger.Info("Shutting down tracer provider") telemetry.ShutdownTracer(s.logger, s.telemetry.TracerProvider) } if s.telemetry.MeterProvider != nil { s.logger.Info("Shutting down meter provider") telemetry.ShutdownMeter(s.logger, s.telemetry.MeterProvider) } if s.telemetry.LoggerProvider != nil { s.logger.Info("Shutting down logger provider") telemetry.ShutdownLogger(s.logger, s.telemetry.LoggerProvider) } } ================================================ FILE: apps/daemon/pkg/toolbox/telemetry.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package toolbox import ( "context" "fmt" "os" "github.com/daytonaio/common-go/pkg/log" "github.com/daytonaio/common-go/pkg/telemetry" "github.com/daytonaio/daemon/internal" ) func (s *server) initTelemetry(ctx context.Context, serviceName, entrypointLogFilePath string, organizationId, regionId *string) error { if s.otelEndpoint == nil { s.logger.InfoContext(ctx, "Otel endpoint not provided, skipping telemetry initialization") return nil } if s.telemetry.LoggerProvider != nil { if err := s.telemetry.LoggerProvider.Shutdown(ctx); err != nil { return fmt.Errorf("failed to shutdown existing telemetry logger: %w", err) } } if s.telemetry.MeterProvider != nil { if err := s.telemetry.MeterProvider.Shutdown(ctx); err != nil { return fmt.Errorf("failed to shutdown existing telemetry meter provider: %w", err) } } if s.telemetry.TracerProvider != nil { if err := s.telemetry.TracerProvider.Shutdown(ctx); err != nil { return fmt.Errorf("failed to shutdown existing telemetry tracer provider: %w", err) } } config := telemetry.Config{ ServiceName: serviceName, ServiceVersion: internal.Version, Endpoint: *s.otelEndpoint, Headers: map[string]string{ "sandbox-auth-token": s.authToken, }, } extraLabels := make(map[string]string) if organizationId != nil && *organizationId != "" { extraLabels["daytona_organization_id"] = *organizationId } if regionId != nil && *regionId != "" { extraLabels["daytona_region_id"] = *regionId } if len(extraLabels) > 0 { config.ExtraLabels = extraLabels } // Use a background context telemetryContext := context.Background() // Initialize OpenTelemetry logging newLogger, lp, err := telemetry.InitLogger(telemetryContext, s.logger, config) if err != nil { return fmt.Errorf("failed to initialize logger: %w", err) } s.logger = newLogger if s.entrypointLogCancel != nil { s.entrypointLogCancel() } entrypointCtx, entrypointCancel := context.WithCancel(s.ctx) s.entrypointLogCancel = entrypointCancel go func() { if entrypointLogFilePath == "" { return } entrypointLogFile, err := os.Open(entrypointLogFilePath) if err != nil { s.logger.ErrorContext(ctx, "Failed to open entrypoint log file", "error", err, "daytona-entrypoint", true) return } defer entrypointLogFile.Close() errChan := make(chan error, 1) stdoutChan := make(chan []byte) stderrChan := make(chan []byte) go log.ReadMultiplexedLog(entrypointCtx, entrypointLogFile, true, stdoutChan, stderrChan, errChan) for { select { case <-entrypointCtx.Done(): return case line := <-stdoutChan: s.logger.InfoContext(telemetryContext, string(line), "daytona-entrypoint", true) case line := <-stderrChan: s.logger.ErrorContext(telemetryContext, string(line), "daytona-entrypoint", true) case err := <-errChan: if err != nil { s.logger.ErrorContext(telemetryContext, "Error reading entrypoint log file", "error", err, "daytona-entrypoint", true) } return } } }() // Initialize OpenTelemetry metrics mp, err := telemetry.InitMetrics(ctx, config, "daytona.sandbox") if err != nil { if shutDownErr := lp.Shutdown(telemetryContext); shutDownErr != nil { s.logger.ErrorContext(ctx, "Failed to shutdown logger after metrics initialization failure", "shutdownErr", shutDownErr) } return fmt.Errorf("failed to initialize metrics: %w", err) } // Initialize OpenTelemetry tracing tp, err := telemetry.InitTracer(ctx, config) if err != nil { if shutDownErr := lp.Shutdown(telemetryContext); shutDownErr != nil { s.logger.ErrorContext(ctx, "Failed to shutdown logger after tracer initialization failure", "shutdownErr", shutDownErr) } if shutDownErr := mp.Shutdown(telemetryContext); shutDownErr != nil { s.logger.ErrorContext(ctx, "Failed to shutdown meter provider after tracer initialization failure", "shutdownErr", shutDownErr) } return fmt.Errorf("failed to initialize tracer: %w", err) } s.telemetry.TracerProvider = tp s.telemetry.MeterProvider = mp s.telemetry.LoggerProvider = lp s.logger.InfoContext(ctx, "Telemetry initialized successfully") return nil } ================================================ FILE: apps/daemon/pkg/toolbox/types.go ================================================ // Copyright Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package toolbox type InitializeRequest struct { Token string `json:"token" binding:"required"` } // @name InitializeRequest type WorkDirResponse struct { Dir string `json:"dir" binding:"required"` } // @name WorkDirResponse type UserHomeDirResponse struct { Dir string `json:"dir" binding:"required"` } // @name UserHomeDirResponse ================================================ FILE: apps/daemon/pkg/toolbox/validator.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 package toolbox import ( "reflect" "strconv" "strings" "sync" "github.com/gin-gonic/gin/binding" "github.com/go-playground/validator/v10" ) type DefaultValidator struct { once sync.Once validate *validator.Validate } var _ binding.StructValidator = &DefaultValidator{} type SliceValidationError []error func (err SliceValidationError) Error() string { if len(err) == 0 { return "" } var b strings.Builder for i := 0; i < len(err); i++ { if err[i] != nil { if b.Len() > 0 { b.WriteString("\n") } b.WriteString("[" + strconv.Itoa(i) + "]: " + err[i].Error()) } } return b.String() } func (v *DefaultValidator) ValidateStruct(obj any) error { if obj == nil { return nil } value := reflect.ValueOf(obj) switch value.Kind() { case reflect.Ptr: if value.Elem().Kind() != reflect.Struct { return v.ValidateStruct(value.Elem().Interface()) } return v.validateStruct(obj) case reflect.Struct: return v.validateStruct(obj) case reflect.Slice, reflect.Array: count := value.Len() validateRet := make(SliceValidationError, 0) for i := 0; i < count; i++ { if err := v.ValidateStruct(value.Index(i).Interface()); err != nil { validateRet = append(validateRet, err) } } if len(validateRet) == 0 { return nil } return validateRet default: return nil } } func (v *DefaultValidator) Engine() interface{} { v.lazyinit() return v.validate } func (v *DefaultValidator) validateStruct(obj any) error { v.lazyinit() return v.validate.Struct(obj) } func (v *DefaultValidator) lazyinit() { v.once.Do(func() { v.validate = validator.New(validator.WithRequiredStructEnabled()) v.validate.SetTagName("validate") _ = v.validate.RegisterValidation("optional", func(fl validator.FieldLevel) bool { return true }, true) }) } ================================================ FILE: apps/daemon/project.json ================================================ { "name": "daemon", "$schema": "../../node_modules/nx/schemas/project-schema.json", "projectType": "application", "sourceRoot": "apps/daemon", "tags": [], "targets": { "prepare": { "executor": "@nx-go/nx-go:serve", "options": { "cwd": ".", "main": "{projectRoot}/tools/xterm.go" }, "cache": true, "inputs": ["{projectRoot}/tools/xterm.go"], "outputs": ["{projectRoot}/pkg/terminal/static/*"], "configurations": { "production": {} } }, "build": { "executor": "@nx-go/nx-go:build", "inputs": [ "goProduction", "^goProduction", { "env": "VERSION" }, { "dependentTasksOutputFiles": "**/*", "transitive": true } ], "options": { "main": "{projectRoot}/cmd/daemon/main.go", "outputPath": "dist/apps/daemon" }, "configurations": { "production": { "flags": ["-ldflags \"-X 'github.com/daytonaio/daemon/internal.Version=$VERSION'\""] } }, "dependsOn": ["build-amd64"] }, "build-amd64": { "executor": "@nx-go/nx-go:build", "options": { "main": "{projectRoot}/cmd/daemon/main.go", "outputPath": "dist/apps/daemon-amd64", "env": { "GOARCH": "amd64", "GOOS": "linux", "CGO_ENABLED": "0" }, "flags": ["-ldflags \"-X 'github.com/daytonaio/daemon/internal.Version=$VERSION'\""] }, "dependsOn": ["prepare"], "inputs": [ "goProduction", "^goProduction", { "env": "VERSION" }, { "dependentTasksOutputFiles": "**/*", "transitive": true } ] }, "serve": { "executor": "@nx-go/nx-go:serve", "options": { "cmd": "gow", "cwd": ".", "main": "{projectRoot}/cmd/daemon/main.go" }, "configurations": { "production": {} } }, "format": { "executor": "nx:run-commands", "options": { "command": "cd {projectRoot} && go fmt ./... && prettier --write \"**/*.{yaml,html,json}\"" } }, "test": { "executor": "@nx-go/nx-go:test" }, "lint": { "executor": "@nx-go/nx-go:lint" }, "openapi": { "executor": "nx:run-commands", "cache": true, "inputs": ["{projectRoot}/pkg/toolbox/**/*.go", "!{projectRoot}/pkg/toolbox/**/*_test.go"], "outputs": [ "{projectRoot}/pkg/toolbox/docs/swagger.json", "{projectRoot}/pkg/toolbox/docs/swagger.yaml", "{projectRoot}/pkg/toolbox/docs/docs.go" ], "options": { "cwd": "{projectRoot}/pkg/toolbox", "command": "swag fmt && swag init --parseDependency --parseInternal --parseDepth 1 -o docs -g server.go && prettier --write \"**/*.{yaml,html,json}\"" } } } } ================================================ FILE: apps/daemon/tools/xterm.go ================================================ // Copyright 2025 Daytona Platforms Inc. // SPDX-License-Identifier: AGPL-3.0 // download_xterm.go package main import ( "fmt" "io" "net/http" "os" "path/filepath" "runtime" ) const ( XTERM_VERSION = "5.3.0" XTERM_FIT_VERSION = "0.8.0" ) func main() { // Get project root path _, filename, _, _ := runtime.Caller(0) projectRoot := filepath.Join(filepath.Dir(filename), "..") // Create static directory structure staticDir := filepath.Join(projectRoot, "pkg", "terminal", "static") err := os.MkdirAll(staticDir, 0755) if err != nil { fmt.Printf("Error creating directory %s: %v\n", staticDir, err) os.Exit(1) } // Files to download from cdnjs files := map[string]string{ filepath.Join(staticDir, "xterm.js"): fmt.Sprintf("https://cdn.jsdelivr.net/npm/xterm@%s/lib/xterm.js", XTERM_VERSION), filepath.Join(staticDir, "xterm.css"): fmt.Sprintf("https://cdn.jsdelivr.net/npm/xterm@%s/css/xterm.css", XTERM_VERSION), filepath.Join(staticDir, "xterm-addon-fit.js"): fmt.Sprintf("https://cdn.jsdelivr.net/npm/xterm-addon-fit@%s/lib/xterm-addon-fit.js", XTERM_FIT_VERSION), } // Download each file for filePath, url := range files { fmt.Printf("Downloading %s...\n", filePath) resp, err := http.Get(url) if err != nil { fmt.Printf("Error downloading %s: %v\n", url, err) os.Exit(1) } defer resp.Body.Close() file, err := os.Create(filePath) if err != nil { fmt.Printf("Error creating file %s: %v\n", filePath, err) os.Exit(1) } _, err = io.Copy(file, resp.Body) file.Close() if err != nil { fmt.Printf("Error writing to file %s: %v\n", filePath, err) os.Exit(1) } } fmt.Println("xterm.js files downloaded successfully") } ================================================ FILE: apps/dashboard/.prettierignore ================================================ /public ================================================ FILE: apps/dashboard/.storybook/main.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import type { StorybookConfig } from '@storybook/react-vite' import { mergeConfig } from 'vite' import path from 'path' const config: StorybookConfig = { stories: ['../src/**/*.stories.@(ts|tsx)'], addons: ['@storybook/addon-essentials'], framework: { name: '@storybook/react-vite', options: {}, }, typescript: { reactDocgen: false, }, viteFinal: async (config) => { return mergeConfig(config, { resolve: { alias: [ { find: '@daytonaio/sdk', replacement: path.resolve(__dirname, '../../../libs/sdk-typescript/src'), }, { find: '@', replacement: path.resolve(__dirname, '../src'), }, ], }, }) }, } export default config ================================================ FILE: apps/dashboard/.storybook/preview.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import type { Preview } from '@storybook/react' import '../src/index.css' const preview: Preview = { globalTypes: { theme: { description: 'Toggle light/dark theme', toolbar: { title: 'Theme', icon: 'sun', items: [ { value: 'light', icon: 'sun', title: 'Light' }, { value: 'dark', icon: 'moon', title: 'Dark' }, ], dynamicTitle: true, }, }, }, initialGlobals: { theme: 'light', }, decorators: [ (Story, context) => { const theme = context.globals.theme || 'light' document.documentElement.classList.toggle('dark', theme === 'dark') return }, ], parameters: { controls: { matchers: { color: /(background|color)$/i, date: /Date$/i, }, }, }, } export default preview ================================================ FILE: apps/dashboard/.storybook/tsconfig.json ================================================ { "extends": "../tsconfig.app.json", "compilerOptions": { "composite": false, "declaration": false, "declarationMap": false }, "include": ["../src/**/*.ts", "../src/**/*.tsx", "./**/*.ts"], "exclude": ["../src/**/*.spec.ts", "../src/**/*.test.ts"] } ================================================ FILE: apps/dashboard/eslint.config.mjs ================================================ import nx from '@nx/eslint-plugin' import baseConfig from '../../eslint.config.mjs' export default [ ...baseConfig, ...nx.configs['flat/react'], { files: ['**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx'], // Override or add rules here rules: {}, }, ] ================================================ FILE: apps/dashboard/index.html ================================================ Daytona
================================================ FILE: apps/dashboard/postcss.config.js ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ const { join } = require('path') // Note: If you use library-specific PostCSS/Tailwind configuration then you should remove the `postcssConfig` build // option from your application's configuration (i.e. project.json). // // See: https://nx.dev/guides/using-tailwind-css-in-react#step-4:-applying-configuration-to-libraries module.exports = { plugins: { tailwindcss: { config: join(__dirname, 'tailwind.config.js'), }, autoprefixer: {}, }, } ================================================ FILE: apps/dashboard/project.json ================================================ { "name": "dashboard", "$schema": "../../node_modules/nx/schemas/project-schema.json", "sourceRoot": "apps/dashboard/src", "projectType": "application", "tags": [], "targets": { "build": { "configurations": { "production": { "mode": "production" } }, "dependsOn": [ { "target": "build", "projects": "sdk-typescript" } ] }, "storybook": { "executor": "nx:run-commands", "options": { "command": "storybook dev -p 6006 -c .storybook", "cwd": "apps/dashboard" } }, "build-storybook": { "executor": "nx:run-commands", "options": { "command": "storybook build -c .storybook -o ../../dist/storybook", "cwd": "apps/dashboard" } }, "format": { "executor": "nx:run-commands", "options": { "command": "cd {projectRoot} && prettier --write \"**/*.{ts,tsx,js,jsx,json,css,mjs,html}\" --config ../../.prettierrc" } } } } ================================================ FILE: apps/dashboard/public/mockServiceWorker.js ================================================ /* eslint-disable */ /* tslint:disable */ /** * Mock Service Worker. * @see https://github.com/mswjs/msw * - Please do NOT modify this file. */ const PACKAGE_VERSION = '2.12.2' const INTEGRITY_CHECKSUM = '4db4a41e972cec1b64cc569c66952d82' const IS_MOCKED_RESPONSE = Symbol('isMockedResponse') const activeClientIds = new Set() addEventListener('install', function () { self.skipWaiting() }) addEventListener('activate', function (event) { event.waitUntil(self.clients.claim()) }) addEventListener('message', async function (event) { const clientId = Reflect.get(event.source || {}, 'id') if (!clientId || !self.clients) { return } const client = await self.clients.get(clientId) if (!client) { return } const allClients = await self.clients.matchAll({ type: 'window', }) switch (event.data) { case 'KEEPALIVE_REQUEST': { sendToClient(client, { type: 'KEEPALIVE_RESPONSE', }) break } case 'INTEGRITY_CHECK_REQUEST': { sendToClient(client, { type: 'INTEGRITY_CHECK_RESPONSE', payload: { packageVersion: PACKAGE_VERSION, checksum: INTEGRITY_CHECKSUM, }, }) break } case 'MOCK_ACTIVATE': { activeClientIds.add(clientId) sendToClient(client, { type: 'MOCKING_ENABLED', payload: { client: { id: client.id, frameType: client.frameType, }, }, }) break } case 'CLIENT_CLOSED': { activeClientIds.delete(clientId) const remainingClients = allClients.filter((client) => { return client.id !== clientId }) // Unregister itself when there are no more clients if (remainingClients.length === 0) { self.registration.unregister() } break } } }) addEventListener('fetch', function (event) { const requestInterceptedAt = Date.now() // Bypass navigation requests. if (event.request.mode === 'navigate') { return } // Opening the DevTools triggers the "only-if-cached" request // that cannot be handled by the worker. Bypass such requests. if ( event.request.cache === 'only-if-cached' && event.request.mode !== 'same-origin' ) { return } // Bypass all requests when there are no active clients. // Prevents the self-unregistered worked from handling requests // after it's been terminated (still remains active until the next reload). if (activeClientIds.size === 0) { return } const requestId = crypto.randomUUID() event.respondWith(handleRequest(event, requestId, requestInterceptedAt)) }) /** * @param {FetchEvent} event * @param {string} requestId * @param {number} requestInterceptedAt */ async function handleRequest(event, requestId, requestInterceptedAt) { const client = await resolveMainClient(event) const requestCloneForEvents = event.request.clone() const response = await getResponse( event, client, requestId, requestInterceptedAt, ) // Send back the response clone for the "response:*" life-cycle events. // Ensure MSW is active and ready to handle the message, otherwise // this message will pend indefinitely. if (client && activeClientIds.has(client.id)) { const serializedRequest = await serializeRequest(requestCloneForEvents) // Clone the response so both the client and the library could consume it. const responseClone = response.clone() sendToClient( client, { type: 'RESPONSE', payload: { isMockedResponse: IS_MOCKED_RESPONSE in response, request: { id: requestId, ...serializedRequest, }, response: { type: responseClone.type, status: responseClone.status, statusText: responseClone.statusText, headers: Object.fromEntries(responseClone.headers.entries()), body: responseClone.body, }, }, }, responseClone.body ? [serializedRequest.body, responseClone.body] : [], ) } return response } /** * Resolve the main client for the given event. * Client that issues a request doesn't necessarily equal the client * that registered the worker. It's with the latter the worker should * communicate with during the response resolving phase. * @param {FetchEvent} event * @returns {Promise} */ async function resolveMainClient(event) { const client = await self.clients.get(event.clientId) if (activeClientIds.has(event.clientId)) { return client } if (client?.frameType === 'top-level') { return client } const allClients = await self.clients.matchAll({ type: 'window', }) return allClients .filter((client) => { // Get only those clients that are currently visible. return client.visibilityState === 'visible' }) .find((client) => { // Find the client ID that's recorded in the // set of clients that have registered the worker. return activeClientIds.has(client.id) }) } /** * @param {FetchEvent} event * @param {Client | undefined} client * @param {string} requestId * @param {number} requestInterceptedAt * @returns {Promise} */ async function getResponse(event, client, requestId, requestInterceptedAt) { // Clone the request because it might've been already used // (i.e. its body has been read and sent to the client). const requestClone = event.request.clone() function passthrough() { // Cast the request headers to a new Headers instance // so the headers can be manipulated with. const headers = new Headers(requestClone.headers) // Remove the "accept" header value that marked this request as passthrough. // This prevents request alteration and also keeps it compliant with the // user-defined CORS policies. const acceptHeader = headers.get('accept') if (acceptHeader) { const values = acceptHeader.split(',').map((value) => value.trim()) const filteredValues = values.filter( (value) => value !== 'msw/passthrough', ) if (filteredValues.length > 0) { headers.set('accept', filteredValues.join(', ')) } else { headers.delete('accept') } } return fetch(requestClone, { headers }) } // Bypass mocking when the client is not active. if (!client) { return passthrough() } // Bypass initial page load requests (i.e. static assets). // The absence of the immediate/parent client in the map of the active clients // means that MSW hasn't dispatched the "MOCK_ACTIVATE" event yet // and is not ready to handle requests. if (!activeClientIds.has(client.id)) { return passthrough() } // Notify the client that a request has been intercepted. const serializedRequest = await serializeRequest(event.request) const clientMessage = await sendToClient( client, { type: 'REQUEST', payload: { id: requestId, interceptedAt: requestInterceptedAt, ...serializedRequest, }, }, [serializedRequest.body], ) switch (clientMessage.type) { case 'MOCK_RESPONSE': { return respondWithMock(clientMessage.data) } case 'PASSTHROUGH': { return passthrough() } } return passthrough() } /** * @param {Client} client * @param {any} message * @param {Array} transferrables * @returns {Promise} */ function sendToClient(client, message, transferrables = []) { return new Promise((resolve, reject) => { const channel = new MessageChannel() channel.port1.onmessage = (event) => { if (event.data && event.data.error) { return reject(event.data.error) } resolve(event.data) } client.postMessage(message, [ channel.port2, ...transferrables.filter(Boolean), ]) }) } /** * @param {Response} response * @returns {Response} */ function respondWithMock(response) { // Setting response status code to 0 is a no-op. // However, when responding with a "Response.error()", the produced Response // instance will have status code set to 0. Since it's not possible to create // a Response instance with status code 0, handle that use-case separately. if (response.status === 0) { return Response.error() } const mockedResponse = new Response(response.body, response) Reflect.defineProperty(mockedResponse, IS_MOCKED_RESPONSE, { value: true, enumerable: true, }) return mockedResponse } /** * @param {Request} request */ async function serializeRequest(request) { return { url: request.url, mode: request.mode, method: request.method, headers: Object.fromEntries(request.headers.entries()), cache: request.cache, credentials: request.credentials, destination: request.destination, integrity: request.integrity, redirect: request.redirect, referrer: request.referrer, referrerPolicy: request.referrerPolicy, body: await request.arrayBuffer(), keepalive: request.keepalive, } } ================================================ FILE: apps/dashboard/src/App.css ================================================ #root { max-width: 1280px; margin: 0 auto; padding: 2rem; text-align: center; } .logo { height: 6em; padding: 1.5em; will-change: filter; transition: filter 300ms; } .logo:hover { filter: drop-shadow(0 0 2em #646cffaa); } .logo.react:hover { filter: drop-shadow(0 0 2em #61dafbaa); } @keyframes logo-spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } @media (prefers-reduced-motion: no-preference) { a:nth-of-type(2) .logo { animation: logo-spin infinite 20s linear; } } .card { padding: 2em; } .read-the-docs { color: #888; } ================================================ FILE: apps/dashboard/src/App.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useSelectedOrganization } from '@/hooks/useSelectedOrganization' import Onboarding from '@/pages/Onboarding' import OrganizationMembers from '@/pages/OrganizationMembers' import OrganizationSettings from '@/pages/OrganizationSettings' import UserOrganizationInvitations from '@/pages/UserOrganizationInvitations' import { NotificationSocketProvider } from '@/providers/NotificationSocketProvider' import { OrganizationsProvider } from '@/providers/OrganizationsProvider' import { SelectedOrganizationProvider } from '@/providers/SelectedOrganizationProvider' import { UserOrganizationInvitationsProvider } from '@/providers/UserOrganizationInvitationsProvider' import { initPylon } from '@/vendor/pylon' import { OrganizationRolePermissionsEnum, OrganizationUserRoleEnum } from '@daytonaio/api-client' import { useFeatureFlagEnabled, usePostHog } from 'posthog-js/react' import React, { Suspense, useEffect } from 'react' import { useAuth } from 'react-oidc-context' import { Navigate, Route, Routes, useLocation } from 'react-router-dom' import { BannerProvider } from './components/Banner' import { CommandPaletteProvider } from './components/CommandPalette' import LoadingFallback from './components/LoadingFallback' import { Button } from './components/ui/button' import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from './components/ui/dialog' import { DAYTONA_DOCS_URL, DAYTONA_SLACK_URL } from './constants/ExternalLinks' import { FeatureFlags } from './enums/FeatureFlags' import { RoutePath, getRouteSubPath } from './enums/RoutePath' import { useConfig } from './hooks/useConfig' import AccountSettings from './pages/AccountSettings' import AuditLogs from './pages/AuditLogs' import Dashboard from './pages/Dashboard' import EmailVerify from './pages/EmailVerify' import Experimental from './pages/Experimental' import Keys from './pages/Keys' import LandingPage from './pages/LandingPage' import Limits from './pages/Limits' import Logout from './pages/Logout' import NotFound from './pages/NotFound' import Playground from './pages/Playground' import Regions from './pages/Regions' import Registries from './pages/Registries' import Runners from './pages/Runners' import Sandboxes from './pages/Sandboxes' import Snapshots from './pages/Snapshots' import Spending from './pages/Spending' import Volumes from './pages/Volumes' import Wallet from './pages/Wallet' import WebhookEndpointDetails from './pages/WebhookEndpointDetails' import Webhooks from './pages/Webhooks' import { SandboxDetails } from './components/sandboxes' import { ApiProvider } from './providers/ApiProvider' import { RegionsProvider } from './providers/RegionsProvider' import { SvixProvider } from './providers/SvixProvider' // Simple redirection components for external URLs const DocsRedirect = () => { React.useEffect(() => { window.open(DAYTONA_DOCS_URL, '_blank') window.location.href = RoutePath.DASHBOARD }, []) return null } const SlackRedirect = () => { React.useEffect(() => { window.open(DAYTONA_SLACK_URL, '_blank') window.location.href = RoutePath.DASHBOARD }, []) return null } function App() { const config = useConfig() const location = useLocation() const posthog = usePostHog() const { error: authError, isAuthenticated, user, signoutRedirect } = useAuth() useEffect(() => { if (isAuthenticated && user && posthog?.get_distinct_id() !== user.profile.sub) { posthog?.identify(user.profile.sub, { email: user.profile.email, name: user.profile.name, }) } if (import.meta.env.PROD && config.pylonAppId && isAuthenticated && user) { initPylon(config.pylonAppId, { chat_settings: { app_id: config.pylonAppId, email: user.profile.email || '', name: user.profile.name || '', avatar_url: user.profile.picture, email_hash: user.profile?.email_hash as string | undefined, }, }) } }, [isAuthenticated, user, posthog, config.pylonAppId]) // Hack for tracking PostHog pageviews in SPAs useEffect(() => { if (import.meta.env.PROD) { posthog?.capture('$pageview', { $current_url: window.location.href, }) } }, [location, posthog]) if (authError) { return ( Authentication Error {authError.message} ) } return ( } /> } /> } /> } /> }> } > } /> } /> } /> } /> } /> } /> } /> } /> {config.billingApiUrl && ( <> } /> } /> } /> )} } /> { // TODO: uncomment when we allow creating custom roles /* } /> */ } } /> } /> } /> } /> } /> } /> } /> } /> } /> } /> } /> } /> ) } function NonPersonalOrganizationPageWrapper({ children }: { children: React.ReactNode }) { const { selectedOrganization } = useSelectedOrganization() if (selectedOrganization?.personal) { return } return children } function OwnerAccessOrganizationPageWrapper({ children }: { children: React.ReactNode }) { const { authenticatedUserOrganizationMember } = useSelectedOrganization() if (authenticatedUserOrganizationMember?.role !== OrganizationUserRoleEnum.OWNER) { return } return children } function RequiredPermissionsOrganizationPageWrapper({ children, requiredPermissions, }: { children: React.ReactNode requiredPermissions: OrganizationRolePermissionsEnum[] }) { const { authenticatedUserHasPermission } = useSelectedOrganization() if (!requiredPermissions.every((permission) => authenticatedUserHasPermission(permission))) { return } return children } function RequiredFeatureFlagWrapper({ children, flagKey }: { children: React.ReactNode; flagKey: FeatureFlags }) { const flagEnabled = useFeatureFlagEnabled(flagKey) if (!flagEnabled) { return } return children } export default App ================================================ FILE: apps/dashboard/src/api/apiClient.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { BillingApiClient } from '@/billing-api/billingApiClient' import { DashboardConfig } from '@/types/DashboardConfig' import { Configuration as AnalyticsConfiguration, TelemetryApi as AnalyticsTelemetryApi, UsageApi as AnalyticsUsageApi, } from '@daytonaio/analytics-api-client' import { ApiKeysApi, AuditApi, Configuration, DockerRegistryApi, OrganizationsApi, RegionsApi, RunnersApi, SandboxApi, SnapshotsApi, ToolboxApi, UsersApi, VolumesApi, WebhooksApi, } from '@daytonaio/api-client' import axios, { AxiosError } from 'axios' import { DaytonaError } from './errors' export class ApiClient { private config: Configuration private _snapshotApi: SnapshotsApi private _sandboxApi: SandboxApi private _userApi: UsersApi private _apiKeyApi: ApiKeysApi private _dockerRegistryApi: DockerRegistryApi private _organizationsApi: OrganizationsApi private _billingApi: BillingApiClient private _volumeApi: VolumesApi private _toolboxApi: ToolboxApi private _auditApi: AuditApi private _regionsApi: RegionsApi private _runnersApi: RunnersApi private _webhooksApi: WebhooksApi private _analyticsUsageApi: AnalyticsUsageApi | null private _analyticsTelemetryApi: AnalyticsTelemetryApi | null constructor(config: DashboardConfig, accessToken: string) { this.config = new Configuration({ basePath: config.apiUrl, accessToken: accessToken, }) const axiosInstance = axios.create() axiosInstance.interceptors.response.use( (response) => { return response }, (error) => { let errorMessage: string if (error instanceof AxiosError && error.message.includes('timeout of')) { errorMessage = 'Operation timed out' } else { errorMessage = error.response?.data?.message || error.response?.data || error.message || String(error) } throw DaytonaError.fromString(String(errorMessage), { cause: error instanceof Error ? error : undefined }) }, ) // Initialize APIs this._snapshotApi = new SnapshotsApi(this.config, undefined, axiosInstance) this._sandboxApi = new SandboxApi(this.config, undefined, axiosInstance) this._userApi = new UsersApi(this.config, undefined, axiosInstance) this._apiKeyApi = new ApiKeysApi(this.config, undefined, axiosInstance) this._dockerRegistryApi = new DockerRegistryApi(this.config, undefined, axiosInstance) this._organizationsApi = new OrganizationsApi(this.config, undefined, axiosInstance) this._billingApi = new BillingApiClient(config.billingApiUrl || window.location.origin, accessToken) this._volumeApi = new VolumesApi(this.config, undefined, axiosInstance) this._toolboxApi = new ToolboxApi(this.config, undefined, axiosInstance) this._auditApi = new AuditApi(this.config, undefined, axiosInstance) this._regionsApi = new RegionsApi(this.config, undefined, axiosInstance) this._runnersApi = new RunnersApi(this.config, undefined, axiosInstance) this._webhooksApi = new WebhooksApi(this.config, undefined, axiosInstance) if (config.analyticsApiUrl) { const analyticsConfig = new AnalyticsConfiguration({ basePath: config.analyticsApiUrl, accessToken: accessToken, baseOptions: { headers: { Authorization: `Bearer ${accessToken}`, }, }, }) this._analyticsUsageApi = new AnalyticsUsageApi(analyticsConfig, undefined, axiosInstance) this._analyticsTelemetryApi = new AnalyticsTelemetryApi(analyticsConfig, undefined, axiosInstance) } else { this._analyticsUsageApi = null this._analyticsTelemetryApi = null } } public setAccessToken(accessToken: string) { this.config.accessToken = accessToken } public get snapshotApi() { return this._snapshotApi } public get sandboxApi() { return this._sandboxApi } public get userApi() { return this._userApi } public get apiKeyApi() { return this._apiKeyApi } public get dockerRegistryApi() { return this._dockerRegistryApi } public get organizationsApi() { return this._organizationsApi } public get billingApi() { return this._billingApi } public get volumeApi() { return this._volumeApi } public get toolboxApi() { return this._toolboxApi } public get auditApi() { return this._auditApi } public get regionsApi() { return this._regionsApi } public get runnersApi() { return this._runnersApi } public get webhooksApi() { return this._webhooksApi } public get analyticsUsageApi() { return this._analyticsUsageApi } public get analyticsTelemetryApi() { return this._analyticsTelemetryApi } public async webhookRequest(method: string, url: string, data?: any) { // Use the existing axios instance that's already configured with interceptors const axiosInstance = axios.create({ baseURL: this.config.basePath, headers: { Authorization: `Bearer ${this.config.accessToken}`, }, }) return axiosInstance.request({ method, url, data, }) } public get axiosInstance() { return axios.create({ baseURL: this.config.basePath, headers: { Authorization: `Bearer ${this.config.accessToken}`, }, }) } } ================================================ FILE: apps/dashboard/src/api/errors.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export class DaytonaError extends Error { public static fromError(error: Error): DaytonaError { if (String(error).includes('Organization is suspended')) { return new OrganizationSuspendedError(error.message, { cause: error.cause, }) } return new DaytonaError(error.message, { cause: error.cause, }) } public static fromString(error: string, options?: { cause?: Error }): DaytonaError { return DaytonaError.fromError(new Error(error, options)) } } export class OrganizationSuspendedError extends DaytonaError {} ================================================ FILE: apps/dashboard/src/assets/Logo.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export function Logo() { return ( ) } export function LogoText() { return ( ) } ================================================ FILE: apps/dashboard/src/billing-api/billingApiClient.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DaytonaError } from '@/api/errors' import axios, { AxiosInstance } from 'axios' import { AutomaticTopUp, OrganizationEmail, OrganizationTier, OrganizationUsage, OrganizationWallet, PaginatedInvoices, PaymentUrl, Tier, WalletTopUpRequest, } from './types' export class BillingApiClient { private axiosInstance: AxiosInstance constructor(apiUrl: string, accessToken: string) { this.axiosInstance = axios.create({ baseURL: apiUrl, headers: { Authorization: `Bearer ${accessToken}`, }, }) this.axiosInstance.interceptors.response.use( (response) => { return response }, (error) => { const errorMessage = error.response?.data?.message || error.response?.data || error.message || String(error) throw DaytonaError.fromString(String(errorMessage)) }, ) } public async getOrganizationUsage(organizationId: string): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/usage`) return response.data } public async getPastOrganizationUsage(organizationId: string, periods?: number): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/usage/past?periods=${periods || 12}`) return response.data } public async getOrganizationWallet(organizationId: string): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/wallet`) return response.data } public async setAutomaticTopUp(organizationId: string, automaticTopUp?: AutomaticTopUp): Promise { await this.axiosInstance.put(`/organization/${organizationId}/wallet/automatic-top-up`, automaticTopUp) } public async getOrganizationBillingPortalUrl(organizationId: string): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/portal-url`) return response.data } public async getOrganizationCheckoutUrl(organizationId: string): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/checkout-url`) return response.data } public async redeemCoupon(organizationId: string, couponCode: string): Promise { const response = await this.axiosInstance.post(`/organization/${organizationId}/redeem-coupon/${couponCode}`) return response.data?.message || 'Coupon redeemed successfully' } public async getOrganizationTier(organizationId: string): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/tier`) const orgTier: OrganizationTier = { tier: response.data.tier, largestSuccessfulPaymentDate: response.data.largestSuccessfulPaymentDate ? new Date(response.data.largestSuccessfulPaymentDate) : undefined, largestSuccessfulPaymentCents: response.data.largestSuccessfulPaymentCents, expiresAt: response.data.expiresAt ? new Date(response.data.expiresAt) : undefined, hasVerifiedBusinessEmail: response.data.hasVerifiedBusinessEmail, } return orgTier } public async upgradeTier(organizationId: string, tier: number): Promise { await this.axiosInstance.post(`/organization/${organizationId}/tier/upgrade`, { tier }) } public async downgradeTier(organizationId: string, tier: number): Promise { await this.axiosInstance.post(`/organization/${organizationId}/tier/downgrade`, { tier }) } public async listTiers(): Promise { const response = await this.axiosInstance.get('/tier') return response.data } public async listOrganizationEmails(organizationId: string): Promise { const response = await this.axiosInstance.get(`/organization/${organizationId}/email`) return response.data.map((email: any) => ({ ...email, verifiedAt: email.verifiedAt ? new Date(email.verifiedAt) : undefined, })) } public async addOrganizationEmail(organizationId: string, email: string): Promise { await this.axiosInstance.post(`/organization/${organizationId}/email`, { email }) } public async deleteOrganizationEmail(organizationId: string, email: string): Promise { await this.axiosInstance.delete(`/organization/${organizationId}/email`, { data: { email } }) } public async verifyOrganizationEmail(organizationId: string, email: string, token: string): Promise { await this.axiosInstance.post(`/organization/${organizationId}/email/verify`, { email, token }) } public async resendOrganizationEmailVerification(organizationId: string, email: string): Promise { await this.axiosInstance.post(`/organization/${organizationId}/email/resend`, { email }) } public async listInvoices(organizationId: string, page?: number, perPage?: number): Promise { const params = new URLSearchParams() if (page !== undefined) { params.append('page', page.toString()) } if (perPage !== undefined) { params.append('perPage', perPage.toString()) } const queryString = params.toString() const url = `/organization/${organizationId}/invoices${queryString ? `?${queryString}` : ''}` const response = await this.axiosInstance.get(url) return response.data } public async createInvoicePaymentUrl(organizationId: string, invoiceId: string): Promise { const response = await this.axiosInstance.post(`/organization/${organizationId}/invoices/${invoiceId}/payment-url`) return response.data } public async voidInvoice(organizationId: string, invoiceId: string): Promise { await this.axiosInstance.post(`/organization/${organizationId}/invoices/${invoiceId}/void`) } public async topUpWallet(organizationId: string, amountCents: number): Promise { const response = await this.axiosInstance.post(`/organization/${organizationId}/wallet/top-up`, { amountCents, } as WalletTopUpRequest) return response.data } } ================================================ FILE: apps/dashboard/src/billing-api/index.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export * from './billingApiClient' export * from './types' ================================================ FILE: apps/dashboard/src/billing-api/types/Invoice.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface InvoiceErrorDetail { details?: Record errorCode: string } export type InvoicePaymentStatus = 'pending' | 'succeeded' | 'failed' export type InvoiceStatus = 'draft' | 'finalized' | 'failed' | 'voided' | 'pending' export type InvoiceType = 'subscription' | 'add_on' | 'one_off' export interface Invoice { currency: string errorDetails?: InvoiceErrorDetail[] fileUrl?: string id: string issuingDate: string number: string paymentDueDate: string paymentOverdue: boolean paymentStatus: InvoicePaymentStatus sequentialId: number status: InvoiceStatus totalAmountCents: number totalDueAmountCents: number type: InvoiceType } export interface PaginatedInvoices { items: Invoice[] totalItems: number totalPages: number } export interface PaymentUrl { url: string } ================================================ FILE: apps/dashboard/src/billing-api/types/OrganizationEmail.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type OrganizationEmail = { email: string verified: boolean owner: boolean business: boolean verifiedAt?: Date } ================================================ FILE: apps/dashboard/src/billing-api/types/OrganizationTier.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type OrganizationTier = { tier: number largestSuccessfulPaymentDate?: Date largestSuccessfulPaymentCents: number expiresAt?: Date hasVerifiedBusinessEmail: boolean } ================================================ FILE: apps/dashboard/src/billing-api/types/OrganizationUsage.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface OrganizationUsage { from: Date to: Date issuingDate: string amountCents: number totalAmountCents: number taxesAmountCents: number usageCharges: UsageCharge[] } export interface UsageCharge { units: string eventsCount: number amountCents: number billableMetric: BillableMetricCode } export enum BillableMetricCode { CPU_USAGE = 'cpu_usage', GPU_USAGE = 'gpu_usage', RAM_USAGE = 'ram_usage', DISK_USAGE = 'disk_usage', UNKNOWN = 'unknown', } ================================================ FILE: apps/dashboard/src/billing-api/types/OrganizationWallet.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export interface OrganizationWallet { balanceCents: number ongoingBalanceCents: number name: string creditCardConnected: boolean automaticTopUp?: AutomaticTopUp hasFailedOrPendingInvoice?: boolean } export type AutomaticTopUp = { thresholdAmount: number targetAmount: number } export interface WalletTopUpRequest { amountCents: number } ================================================ FILE: apps/dashboard/src/billing-api/types/index.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export * from './OrganizationTier' export * from './OrganizationUsage' export * from './OrganizationWallet' export * from './tier' export * from './OrganizationEmail' export * from './Invoice' ================================================ FILE: apps/dashboard/src/billing-api/types/tier.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export type Tier = { tier: number tierLimit: TierLimit minTopUpAmountCents: number topUpIntervalDays: number } export type TierLimit = { concurrentCPU: number concurrentRAMGiB: number concurrentDiskGiB: number } ================================================ FILE: apps/dashboard/src/components/AccountProviderIcon.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ComponentType } from 'react' import { Github, Link2, Mail, LucideProps } from 'lucide-react' type Props = { provider: string className?: string } export function AccountProviderIcon(props: Props) { return getIcon(props.provider, props.className) } const getIcon = (provider: string, className?: string) => { const IconComponent = ICON[provider] if (!IconComponent) { return } return } const ICON: { [x: string]: ComponentType } = { github: Github, 'google-oauth2': Mail, } ================================================ FILE: apps/dashboard/src/components/AnnouncementBanner.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { X, Info } from 'lucide-react' import { Button } from './ui/button' interface AnnouncementBannerProps { text: string learnMoreUrl?: string onDismiss: () => void } export function AnnouncementBanner({ text, learnMoreUrl, onDismiss }: AnnouncementBannerProps) { return (

{text}

{learnMoreUrl && ( Learn More )}
) } ================================================ FILE: apps/dashboard/src/components/ApiKeyTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CREATE_API_KEY_PERMISSIONS_GROUPS } from '@/constants/CreateApiKeyPermissionsGroups' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { getRelativeTimeString } from '@/lib/utils' import { ApiKeyList, ApiKeyListPermissionsEnum, CreateApiKeyPermissionsEnum } from '@daytonaio/api-client' import { ColumnDef, flexRender, getCoreRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { KeyRound, Loader2 } from 'lucide-react' import { useMemo, useState } from 'react' import { Pagination } from './Pagination' import { TableEmptyState } from './TableEmptyState' import { Badge } from './ui/badge' import { Button } from './ui/button' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from './ui/dialog' import { Popover, PopoverContent, PopoverTrigger } from './ui/popover' import { Skeleton } from './ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from './ui/table' import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip' interface DataTableProps { data: ApiKeyList[] loading: boolean isLoadingKey: (key: ApiKeyList) => boolean onRevoke: (key: ApiKeyList) => void } export function ApiKeyTable({ data, loading, isLoadingKey, onRevoke }: DataTableProps) { const [sorting, setSorting] = useState([]) const columns = getColumns({ onRevoke, isLoadingKey }) const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), state: { sorting, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) return (
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( <> {Array.from(new Array(5)).map((_, i) => ( {table.getVisibleLeafColumns().map((column, i, arr) => i === arr.length - 1 ? null : ( ), )} ))} ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

API Keys authenticate requests made through the Daytona SDK or CLI.

Generate one and{' '} check out the API Key setup guide .

} /> )}
) } const getExpiresAtColor = (expiresAt: Date | null) => { if (!expiresAt) { return 'text-foreground' } const MILLISECONDS_IN_MINUTE = 1000 * 60 const MINUTES_IN_DAY = 24 * 60 const diffInMinutes = Math.floor((new Date(expiresAt).getTime() - new Date().getTime()) / MILLISECONDS_IN_MINUTE) // Already expired if (diffInMinutes < 0) { return 'text-red-500' } // Expires within a day if (diffInMinutes < MINUTES_IN_DAY) { return 'text-yellow-600 dark:text-yellow-400' } // Expires in more than a day return 'text-foreground' } const getColumns = ({ onRevoke, isLoadingKey, }: { onRevoke: (key: ApiKeyList) => void isLoadingKey: (key: ApiKeyList) => boolean }): ColumnDef[] => { const columns: ColumnDef[] = [ { accessorKey: 'name', header: 'Name', }, { accessorKey: 'value', header: 'Key', }, { accessorKey: 'permissions', header: () => { return
Permissions
}, cell: ({ row }) => { return }, }, { accessorKey: 'createdAt', header: 'Created', cell: ({ row }) => { const createdAt = row.original.createdAt const relativeTime = getRelativeTimeString(createdAt).relativeTimeString const fullDate = new Date(createdAt).toLocaleString() return ( {relativeTime}

{fullDate}

) }, }, { accessorKey: 'lastUsedAt', header: 'Last Used', cell: ({ row }) => { const lastUsedAt = row.original.lastUsedAt const relativeTime = getRelativeTimeString(lastUsedAt).relativeTimeString if (!lastUsedAt) { return {relativeTime} } const fullDate = new Date(lastUsedAt).toLocaleString() return ( {relativeTime}

{fullDate}

) }, }, { accessorKey: 'expiresAt', header: 'Expires', cell: ({ row }) => { const expiresAt = row.original.expiresAt const relativeTime = getRelativeTimeString(expiresAt).relativeTimeString if (!expiresAt) { return {relativeTime} } const fullDate = new Date(expiresAt).toLocaleString() const color = getExpiresAtColor(expiresAt) return ( {relativeTime}

{fullDate}

) }, }, { id: 'actions', size: 80, cell: ({ row }) => { const isLoading = isLoadingKey(row.original) return ( Confirm Key Revocation Are you absolutely sure? This action cannot be undone. This will permanently delete this API key. ) }, }, ] return columns } const allPermissions = Object.values(CreateApiKeyPermissionsEnum) const IMPLICIT_READ_RESOURCES = ['Sandboxes', 'Snapshots', 'Registries', 'Regions'] function PermissionsTooltip({ permissions, availablePermissions, }: { permissions: ApiKeyListPermissionsEnum[] availablePermissions: CreateApiKeyPermissionsEnum[] }) { const isFullAccess = allPermissions.length === permissions.length const isSingleResourceAccess = CREATE_API_KEY_PERMISSIONS_GROUPS.find( (group) => group.permissions.length === permissions.length && group.permissions.every((p) => permissions.includes(p)), ) const availableGroups = useMemo(() => { return CREATE_API_KEY_PERMISSIONS_GROUPS.map((group) => ({ ...group, permissions: group.permissions.filter((p) => availablePermissions.includes(p)), })).filter((group) => group.permissions.length > 0) }, [availablePermissions]) const badgeVariant = isFullAccess ? 'warning' : 'outline' const badgeText = isFullAccess ? 'Full' : isSingleResourceAccess ? isSingleResourceAccess.name : 'Restricted' return ( {badgeText} Access

Permissions

{availableGroups.map((group) => { const selectedPermissions = group.permissions.filter((p) => permissions.includes(p)) const hasImplicitRead = IMPLICIT_READ_RESOURCES.includes(group.name) if (selectedPermissions.length === 0 && !hasImplicitRead) { return null } return (

{group.name}

{hasImplicitRead && ( Read )} {selectedPermissions.map((p) => ( {p.split(':')[0]} ))}
) })}
) } ================================================ FILE: apps/dashboard/src/components/AuditLogTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Pagination } from '@/components/Pagination' import { TableEmptyState } from '@/components/TableEmptyState' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip' import { getRelativeTimeString } from '@/lib/utils' import { AuditLog } from '@daytonaio/api-client' import { ColumnDef, flexRender, getCoreRowModel, useReactTable } from '@tanstack/react-table' import { TextSearch } from 'lucide-react' interface Props { data: AuditLog[] loading: boolean pagination: { pageIndex: number pageSize: number } pageCount: number totalItems: number onPaginationChange: (pagination: { pageIndex: number; pageSize: number }) => void } export function AuditLogTable({ data, loading, pagination, pageCount, onPaginationChange, totalItems }: Props) { const columns = getColumns() const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), manualPagination: true, pageCount: pageCount || 1, onPaginationChange: pagination ? (updater) => { const newPagination = typeof updater === 'function' ? updater(table.getState().pagination) : updater onPaginationChange(newPagination) } : undefined, state: { pagination: { pageIndex: pagination?.pageIndex || 0, pageSize: pagination?.pageSize || 10, }, }, getRowId: (row) => row.id, }) return (
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

Audit logs are detailed records of all actions taken by users in the organization.

} /> )}
) } const getColumns = (): ColumnDef[] => { const columns: ColumnDef[] = [ { header: 'Time', size: 200, cell: ({ row }) => { const createdAt = new Date(row.original.createdAt) const localeString = createdAt.toLocaleString() const relativeTimeString = getRelativeTimeString(row.original.createdAt).relativeTimeString return (
{relativeTimeString}
{localeString}
) }, }, { header: 'User', size: 240, cell: ({ row }) => { const actorEmail = row.original.actorEmail const actorId = row.original.actorId const label = actorEmail || actorId return (
{label}

{label}

) }, }, { header: 'Action', size: 240, cell: ({ row }) => { const action = row.original.action return (
{action}

{action}

) }, }, { header: 'Target', size: 360, cell: ({ row }) => { const targetType = row.original.targetType const targetId = row.original.targetId if (!targetType && !targetId) { return '-' } return (
{targetType &&
{targetType}
} {targetId &&
{targetId}
}
) }, }, { header: 'Outcome', size: 320, cell: ({ row }) => { const statusCode = row.original.statusCode const errorMessage = row.original.errorMessage const outcomeInfo = getOutcomeInfo(statusCode) return (
{outcomeInfo.label}
{!errorMessage ? (
{statusCode || '204'}
) : (
{statusCode || '500'} {` - ${errorMessage}`}

{errorMessage}

)}
) }, }, ] return columns } type OutcomeCategory = 'informational' | 'success' | 'redirect' | 'client-error' | 'server-error' | 'unknown' interface OutcomeInfo { label: string colorClass: string } const getOutcomeCategory = (statusCode: number | null | undefined): OutcomeCategory => { if (!statusCode) return 'unknown' if (statusCode >= 100 && statusCode < 200) return 'informational' if (statusCode >= 200 && statusCode < 300) return 'success' if (statusCode >= 300 && statusCode < 400) return 'redirect' if (statusCode >= 400 && statusCode < 500) return 'client-error' if (statusCode >= 500 && statusCode < 600) return 'server-error' return 'unknown' } const getOutcomeInfo = (statusCode: number | null | undefined): OutcomeInfo => { const category = getOutcomeCategory(statusCode) switch (category) { case 'informational': return { label: 'Info', colorClass: 'text-blue-500 dark:text-blue-300', } case 'success': return { label: 'Success', colorClass: 'text-green-600 dark:text-green-400', } case 'redirect': return { label: 'Redirect', colorClass: 'text-blue-600 dark:text-blue-400', } case 'client-error': case 'server-error': return { label: 'Error', colorClass: 'text-red-600 dark:text-red-400', } case 'unknown': default: return { label: 'Unknown', colorClass: 'text-gray-600 dark:text-gray-400', } } } ================================================ FILE: apps/dashboard/src/components/Banner.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { cva, type VariantProps } from 'class-variance-authority' import { AlertCircleIcon, AlertTriangleIcon, CheckCircle2Icon, ChevronRight, InfoIcon, MegaphoneIcon, XIcon, } from 'lucide-react' import { AnimatePresence, motion } from 'motion/react' import { createContext, useCallback, useContext, useEffect, useMemo, useState } from 'react' import { v4 as uuidv4 } from 'uuid' type BannerVariant = 'info' | 'success' | 'warning' | 'error' | 'neutral' const variantIcons = { info: InfoIcon, success: CheckCircle2Icon, warning: AlertTriangleIcon, error: AlertCircleIcon, neutral: MegaphoneIcon, } const priorityMap: Record = { error: 0, warning: 1, success: 2, info: 3, neutral: 4, } const bannerVariants = cva('relative overflow-hidden backdrop-blur-xl border-y w-full', { variants: { variant: { info: 'bg-info-background text-info-foreground border-info-separator', success: 'bg-success-background text-success-foreground border-success-separator', warning: 'bg-warning-background text-warning-foreground border-warning-separator', error: 'bg-destructive-background text-destructive-foreground border-destructive-separator', neutral: 'bg-muted/40 border-border', }, }, defaultVariants: { variant: 'info', }, }) interface BannerAction { label: string onClick: () => void } interface BannerNotification { id?: string variant?: BannerVariant title: string description?: string action?: BannerAction icon?: React.ReactNode onDismiss?: () => void isDismissible?: boolean } interface BannerContextValue { notifications: BannerNotification[] addBanner: (notification: BannerNotification) => string removeBanner: (id: string) => void clearBanners: () => void } const BannerContext = createContext(null) export const useBanner = () => { const context = useContext(BannerContext) if (!context) { throw new Error('useBanner must be used within a BannerProvider') } return context } interface BannerProviderProps { children: React.ReactNode defaultNotifications?: BannerNotification[] } export const BannerProvider = ({ children, defaultNotifications = [] }: BannerProviderProps) => { const [notifications, setNotifications] = useState(defaultNotifications) const addBanner = useCallback((notification: BannerNotification) => { const id = notification.id || uuidv4() setNotifications((prev) => { const existingIndex = prev.findIndex((n) => n.id === id) if (existingIndex >= 0) { const updated = [...prev] updated[existingIndex] = { ...notification, id } return updated } return [{ ...notification, id }, ...prev] }) return id }, []) const removeBanner = useCallback((id: string) => { setNotifications((prev) => prev.filter((n) => n.id !== id)) }, []) const clearBanners = useCallback(() => { setNotifications([]) }, []) const sortedNotifications = useMemo(() => { return [...notifications].sort((a, b) => { const variantA = (a.variant || 'info') as BannerVariant const variantB = (b.variant || 'info') as BannerVariant return priorityMap[variantA] - priorityMap[variantB] }) }, [notifications]) const contextValue = useMemo( () => ({ notifications: sortedNotifications, addBanner, removeBanner, clearBanners, }), [sortedNotifications, addBanner, removeBanner, clearBanners], ) return {children} } interface BannerProps extends VariantProps { title: string description?: string action?: BannerAction onDismiss?: () => void total?: number currentIndex?: number onNext?: () => void className?: string icon?: React.ReactNode bannerClassName?: string } export const Banner = ({ variant = 'info', title, description, action, onDismiss, total = 0, currentIndex = 0, onNext, className, bannerClassName, icon, ...props }: BannerProps & React.ComponentProps) => { const IconComponent = variantIcons[variant ?? 'info'] const role = variant === 'error' || variant === 'warning' ? 'alert' : 'status' return (
{icon || }
{title} {description && ( <> {description} )}
{action && ( )} {total > 1 && (
{currentIndex + 1}/{total} onNext?.()} aria-label="Next Notification">
)}
{onDismiss && ( onDismiss()} aria-label="Dismiss"> )}
) } function BannerButton({ className, ...props }: React.ComponentProps<'button'>) { return ( ) : ( Use to navigate )} ) } function Breadcrumbs() { const { pageStack, pages } = useCommandPalette() const { goToPage } = useCommandPaletteActions() return (
{pageStack.map((id, i) => { const isLast = i === pageStack.length - 1 const page = pages.get(id) return ( {i > 0 && ( )} ) })}
) } function CommandGroupRenderer({ group }: { group: CommandGroup }) { const sortedCommands = useMemo(() => Array.from(group.commands.values()), [group.commands]) if (sortedCommands.length === 0) return null return ( {sortedCommands.map((cmd) => ( ))} ) } function CommandItem({ config }: { config: CommandConfig }) { const { pushPage, setIsOpen } = useCommandPaletteActions() const handleSelect = useCallback(() => { if (config.page) { pushPage(config.page) } else { config.onSelect?.() if (!config.chainable) { setIsOpen(false) } } }, [config, pushPage, setIsOpen]) const value = config.value ?? (typeof config.label === 'string' ? config.label : config.id) return (
{config.loading ? ( ) : ( config.icon && {config.icon} )} {config.label}
{config.page && }
) } function CommandFooter({ children, hideResultsCount = false, className, }: { children: ReactNode hideResultsCount?: boolean className?: string }) { const resultsCount = useCommandState((state) => state.filtered.count) return (
{children} {!hideResultsCount && {pluralize(resultsCount, 'result', 'results')}}
) } function CommandEmpty({ search }: { search: string }) { return ( No results found for "{search}". ) } function PulseBar({ mode, isVisible, className }: { mode: 'flash' | 'pulse'; isVisible: boolean; className?: string }) { const gradientBackground = `linear-gradient(90deg, transparent, #66F0C2, #00C241, #66F0C2, transparent)` if (!isVisible) return null return (
{mode === 'flash' && ( )} {mode === 'pulse' && ( )}
) } export function CommandHighlight({ children, className, ...props }: { children: ReactNode } & React.HTMLAttributes) { return ( {children} ) } export function CommandError({ message = 'Something went wrong', onRetry, className, }: { message?: string onRetry?: () => void className?: string }) { return (

{message}

{onRetry && ( )}
) } export function CommandLoading({ count = 3, className }: { count?: number; className?: string }) { return (
{Array.from({ length: count }).map((_, i) => (
))}
) } export { Kbd, KbdGroup } from './ui/kbd' ================================================ FILE: apps/dashboard/src/components/ComparisonTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { ChevronDown, ChevronRight } from 'lucide-react' import { FC, Fragment, ReactNode, useState } from 'react' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from './ui/table' interface ComparisonRow { label: ReactNode values: ReactNode[] } export interface ComparisonSection { id?: string title: ReactNode collapsed?: boolean collapsible?: boolean rows: ComparisonRow[] } interface Props { columns: string[] headerLabel?: string currentColumn?: number currentRow?: number data: ComparisonSection[] className?: string } export function ComparisonTable({ columns = [], headerLabel, currentColumn, currentRow, data = [], className }: Props) { return (
{headerLabel} {columns.map((column, index) => ( {column} ))} {data.map((section, idx) => ( ))}
) } interface CollapsibleSectionProps { section: ComparisonSection currentColumn?: number currentRow?: number } const CollapsibleSection: FC = ({ section, currentColumn, currentRow }) => { const [isOpen, setIsOpen] = useState(section.collapsible ? !section.collapsed : true) return ( {section.collapsible && ( setIsOpen(!isOpen)} className={cn('cursor-pointer border-b border-border group select-none hover:bg-muted')} aria-expanded={isOpen} >
{isOpen ? : } {section.title}
)} {isOpen && section.rows.map((row, rowIdx) => ( {row.label} {row.values.map((val, colIdx) => { const isActive = colIdx === currentColumn return ( {val} ) })} ))}
) } ================================================ FILE: apps/dashboard/src/components/CopyButton.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useCopyToClipboard } from '@/hooks/useCopyToClipboard' import { cn } from '@/lib/utils' import { AnimatePresence, motion } from 'framer-motion' import { CheckIcon, CopyIcon } from 'lucide-react' import { ComponentProps } from 'react' import TooltipButton from './TooltipButton' const MotionCopyIcon = motion(CopyIcon) const MotionCheckIcon = motion(CheckIcon) const iconProps = { initial: { opacity: 0, y: 5 }, animate: { opacity: 1, y: 0 }, exit: { opacity: 0, y: -5 }, transition: { duration: 0.1 }, } function CopyButton({ value, className, tooltipText, variant = 'ghost', autoHide, onClick, ...props }: { value: string; tooltipText?: string; autoHide?: boolean } & Omit< ComponentProps, 'tooltipText' >) { const [copied, copy] = useCopyToClipboard() return ( { copy(value) onClick?.(e) e.stopPropagation() }} className={cn( 'font-sans text-muted-foreground hover:text-foreground', { 'opacity-0 -translate-x-1': autoHide && !copied, 'group-hover/copy-button:opacity-100 group-hover/copy-button:translate-x-0 group-focus-within/copy-button:opacity-100 group-focus-within/copy-button:translate-x-0': autoHide, 'opacity-100 translate-x-0': autoHide && copied, }, className, )} variant={variant} {...props} > {copied ? ( ) : ( )} ) } export { CopyButton } ================================================ FILE: apps/dashboard/src/components/CreateApiKeyDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Button } from '@/components/ui/button' import { DatePicker } from '@/components/ui/date-picker' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { Spinner } from '@/components/ui/spinner' import { AnimatePresence, motion } from 'framer-motion' import { CheckIcon, CopyIcon, EyeIcon, EyeOffIcon, InfoIcon } from 'lucide-react' import { Field, FieldDescription, FieldError, FieldGroup, FieldLabel } from '@/components/ui/field' import { Input } from '@/components/ui/input' import { InputGroup, InputGroupButton, InputGroupInput } from '@/components/ui/input-group' import { Label } from '@/components/ui/label' import { CREATE_API_KEY_PERMISSIONS_GROUPS } from '@/constants/CreateApiKeyPermissionsGroups' import { useCreateApiKeyMutation } from '@/hooks/mutations/useCreateApiKeyMutation' import { useCopyToClipboard } from '@/hooks/useCopyToClipboard' import { handleApiError } from '@/lib/error-handling' import { getMaskedToken } from '@/lib/utils' import { ApiKeyResponse, CreateApiKeyPermissionsEnum } from '@daytonaio/api-client' import { useForm } from '@tanstack/react-form' import { Plus } from 'lucide-react' import React, { useCallback, useEffect, useMemo, useState } from 'react' import { toast } from 'sonner' import { z } from 'zod' import { Alert, AlertDescription, AlertTitle } from './ui/alert' import { Tabs, TabsContent, TabsList, TabsTrigger } from './ui/tabs' import { ToggleGroup, ToggleGroupItem } from './ui/toggle-group' interface CreateApiKeyDialogProps { availablePermissions: CreateApiKeyPermissionsEnum[] apiUrl: string className?: string organizationId?: string } const isReadPermission = (permission: CreateApiKeyPermissionsEnum) => permission.startsWith('read:') const isWritePermission = (permission: CreateApiKeyPermissionsEnum) => permission.startsWith('write:') const isDeletePermission = (permission: CreateApiKeyPermissionsEnum) => permission.startsWith('delete:') const IMPLICIT_READ_RESOURCES = ['Sandboxes', 'Snapshots', 'Registries', 'Regions'] const formSchema = z.object({ name: z.string().min(1, 'Name is required'), expiresAt: z.date().optional(), permissions: z.array(z.enum(CreateApiKeyPermissionsEnum)), }) type FormValues = z.infer export const CreateApiKeyDialog: React.FC = ({ availablePermissions, apiUrl, className, organizationId, }) => { const [open, setOpen] = useState(false) const { reset: resetCreateApiKeyMutation, ...createApiKeyMutation } = useCreateApiKeyMutation() const availableGroups = useMemo(() => { return CREATE_API_KEY_PERMISSIONS_GROUPS.map((group) => ({ ...group, permissions: group.permissions.filter((p) => availablePermissions.includes(p)), })).filter((group) => group.permissions.length > 0) }, [availablePermissions]) const form = useForm({ defaultValues: { name: '', expiresAt: undefined, permissions: availablePermissions, } as FormValues, validators: { onSubmit: formSchema, }, onSubmit: async ({ value }) => { if (!organizationId) { toast.error('Select an organization to create an API key.') return } try { await createApiKeyMutation.mutateAsync({ organizationId, name: value.name.trim(), permissions: value.permissions, expiresAt: value.expiresAt ?? null, }) toast.success('API key created successfully') } catch (error) { handleApiError(error, 'Failed to create API key') } }, }) const resetState = useCallback(() => { form.reset({ name: '', expiresAt: undefined, permissions: availablePermissions, }) resetCreateApiKeyMutation() }, [resetCreateApiKeyMutation, form, availablePermissions]) useEffect(() => { if (open) { resetState() } }, [open, resetState]) const createdKey = createApiKeyMutation.data return ( { setOpen(isOpen) }} > {createdKey ? 'API Key Created' : 'Create New API Key'} {createdKey ? 'Your API key has been created successfully.' : 'Choose which actions this API key will be authorized to perform.'} {createdKey ? ( ) : (
{ e.preventDefault() e.stopPropagation() form.handleSubmit() }} > {(field) => { const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Key Name field.handleChange(e.target.value)} placeholder="Name" /> {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }} {(field) => ( Expires Optional expiration date for the API key. )} { if (value === 'full-access') { form.setFieldValue('permissions', availablePermissions) } else if (value === 'sandbox-access') { form.setFieldValue('permissions', [ CreateApiKeyPermissionsEnum.WRITE_SANDBOXES, CreateApiKeyPermissionsEnum.DELETE_SANDBOXES, ]) } else { form.setFieldValue('permissions', []) } }} > Full Access Sandboxes Restricted Sandboxes Access This key grants read and write access to the Sandboxes resource. Full Access This key grants full access to all resources. For better security, we recommend creating a restricted key. {availableGroups.length > 0 && ( {(field) => ( {availableGroups.map((group) => { const readPermission = group.permissions.find(isReadPermission) const writePermission = group.permissions.find(isWritePermission) const deletePermission = group.permissions.find(isDeletePermission) const hasImplicitRead = IMPLICIT_READ_RESOURCES.includes(group.name) return (
field.state.value.includes(p))} onValueChange={(newGroupSelection) => { const permissionsWithoutThisGroup = field.state.value.filter( (p) => !group.permissions.includes(p), ) field.handleChange([ ...permissionsWithoutThisGroup, ...newGroupSelection, ] as CreateApiKeyPermissionsEnum[]) }} > {hasImplicitRead ? ( Read* ) : ( {readPermission ? 'Read' : '-'} )} {writePermission ? 'Write' : '-'} {deletePermission ? 'Delete' : '-'}
) })}
{field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )}

*Read access is always granted for these resources.

)}
)}
)} {!createdKey && ( [state.canSubmit, state.isSubmitting]} children={([canSubmit, isSubmitting]) => ( )} /> )}
) } const MotionCopyIcon = motion(CopyIcon) const MotionCheckIcon = motion(CheckIcon) const iconProps = { initial: { opacity: 0, y: 5 }, animate: { opacity: 1, y: 0 }, exit: { opacity: 0, y: -5 }, transition: { duration: 0.1 }, } function CreatedKeyDisplay({ createdKey, apiUrl }: { createdKey: ApiKeyResponse; apiUrl: string }) { const [copiedApiKey, copyApiKey] = useCopyToClipboard() const [copiedApiUrl, copyApiUrl] = useCopyToClipboard() const [apiKeyRevealed, setApiKeyRevealed] = useState(false) return (
You can only view this key once. Store it safely. API Key setApiKeyRevealed(!apiKeyRevealed)}> {apiKeyRevealed ? : } copyApiKey(createdKey.value)}> {copiedApiKey ? ( ) : ( )} API URL copyApiUrl(apiUrl)}> {copiedApiUrl ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/CreateRegionDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { CreateRegion, CreateRegionResponse } from '@daytonaio/api-client' import { Button } from '@/components/ui/button' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { toast } from 'sonner' import { Plus, Copy } from 'lucide-react' import { getMaskedToken } from '@/lib/utils' const DEFAULT_FORM_DATA = { name: '', proxyUrl: '', sshGatewayUrl: '', snapshotManagerUrl: '', } interface CreateRegionDialogProps { onCreateRegion: (data: CreateRegion) => Promise writePermitted: boolean loadingData: boolean } export const CreateRegionDialog: React.FC = ({ onCreateRegion, writePermitted, loadingData, }) => { const [open, setOpen] = useState(false) const [loading, setLoading] = useState(false) const [createdRegion, setCreatedRegion] = useState(null) const [isProxyApiKeyRevealed, setIsProxyApiKeyRevealed] = useState(false) const [isSshGatewayApiKeyRevealed, setIsSshGatewayApiKeyRevealed] = useState(false) const [isSnapshotManagerPasswordRevealed, setIsSnapshotManagerPasswordRevealed] = useState(false) const [formData, setFormData] = useState(DEFAULT_FORM_DATA) const handleCreate = async () => { setLoading(true) try { const createRegionData: CreateRegion = { name: formData.name, proxyUrl: formData.proxyUrl.trim() || null, sshGatewayUrl: formData.sshGatewayUrl.trim() || null, snapshotManagerUrl: formData.snapshotManagerUrl.trim() || null, } const region = await onCreateRegion(createRegionData) if (region) { if ( !region.proxyApiKey && !region.sshGatewayApiKey && !region.snapshotManagerUsername && !region.snapshotManagerPassword ) { setOpen(false) setCreatedRegion(null) } else { setCreatedRegion(region) } setFormData(DEFAULT_FORM_DATA) } } finally { setLoading(false) } } const copyToClipboard = async (text: string) => { try { await navigator.clipboard.writeText(text) toast.success('Copied to clipboard') } catch (err) { console.error('Failed to copy text:', err) toast.error('Failed to copy to clipboard') } } if (!writePermitted) { return null } return ( { setOpen(isOpen) if (!isOpen) { setCreatedRegion(null) setFormData(DEFAULT_FORM_DATA) setIsProxyApiKeyRevealed(false) setIsSshGatewayApiKeyRevealed(false) setIsSnapshotManagerPasswordRevealed(false) } }} > {createdRegion ? 'New Region Created' : 'Create New Region'} {!createdRegion ? 'Add a new region for grouping runners and sandboxes.' : createdRegion.proxyApiKey || createdRegion.sshGatewayApiKey || createdRegion.snapshotManagerUsername || createdRegion.snapshotManagerPassword ? "Save these credentials securely. You won't be able to see them again." : ''} {createdRegion && (createdRegion.proxyApiKey || createdRegion.sshGatewayApiKey || createdRegion.snapshotManagerUsername || createdRegion.snapshotManagerPassword) ? (
{createdRegion.proxyApiKey && (
setIsProxyApiKeyRevealed(true)} onMouseLeave={() => setIsProxyApiKeyRevealed(false)} > {isProxyApiKeyRevealed ? createdRegion.proxyApiKey : getMaskedToken(createdRegion.proxyApiKey)} copyToClipboard(createdRegion.proxyApiKey!)} />
)} {createdRegion.sshGatewayApiKey && (
setIsSshGatewayApiKeyRevealed(true)} onMouseLeave={() => setIsSshGatewayApiKeyRevealed(false)} > {isSshGatewayApiKeyRevealed ? createdRegion.sshGatewayApiKey : getMaskedToken(createdRegion.sshGatewayApiKey)} copyToClipboard(createdRegion.sshGatewayApiKey!)} />
)} {createdRegion.snapshotManagerUsername && (
{createdRegion.snapshotManagerUsername} copyToClipboard(createdRegion.snapshotManagerUsername!)} />
)} {createdRegion.snapshotManagerPassword && (
setIsSnapshotManagerPasswordRevealed(true)} onMouseLeave={() => setIsSnapshotManagerPasswordRevealed(false)} > {isSnapshotManagerPasswordRevealed ? createdRegion.snapshotManagerPassword : getMaskedToken(createdRegion.snapshotManagerPassword)} copyToClipboard(createdRegion.snapshotManagerPassword!)} />
)}
) : (
{ e.preventDefault() await handleCreate() }} >
{ setFormData((prev) => ({ ...prev, name: e.target.value })) }} placeholder="us-east-1" />

Region name must contain only letters, numbers, underscores, periods, and hyphens.

{ setFormData((prev) => ({ ...prev, proxyUrl: e.target.value })) }} placeholder="https://proxy.example.com" />

(Optional) URL of the custom proxy for this region

{ setFormData((prev) => ({ ...prev, sshGatewayUrl: e.target.value })) }} placeholder="https://ssh-gateway.example.com" />

(Optional) URL of the custom SSH gateway for this region

{ setFormData((prev) => ({ ...prev, snapshotManagerUrl: e.target.value })) }} placeholder="https://snapshot-manager.example.com" />

(Optional) URL of the custom snapshot manager for this region

)} {!createdRegion && (loading ? ( ) : ( ))}
) } ================================================ FILE: apps/dashboard/src/components/CreateRunnerDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState, useEffect } from 'react' import { Region, CreateRunner, CreateRunnerResponse } from '@daytonaio/api-client' import { Button } from '@/components/ui/button' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' import { toast } from 'sonner' import { Plus, Copy } from 'lucide-react' import { getMaskedToken } from '@/lib/utils' const DEFAULT_FORM_DATA = { name: '', regionId: '', } interface CreateRunnerDialogProps { regions: Region[] onCreateRunner: (data: CreateRunner) => Promise } export const CreateRunnerDialog: React.FC = ({ regions, onCreateRunner }) => { const [open, setOpen] = useState(false) const [loading, setLoading] = useState(false) const [createdRunner, setCreatedRunner] = useState(null) const [isTokenRevealed, setIsTokenRevealed] = useState(false) const [formData, setFormData] = useState(DEFAULT_FORM_DATA) const [formErrors, setFormErrors] = useState>({}) useEffect(() => { if (regions.length > 0 && !formData.regionId) { setFormData((prev) => ({ ...prev, regionId: regions[0].id })) } }, [regions, formData.regionId]) const validateForm = () => { const errors: Record = {} if (!formData.name.trim()) { errors.name = 'Name is required' } if (!formData.regionId) { errors.regionId = 'Region is required' } setFormErrors(errors) return Object.keys(errors).length === 0 } const handleCreate = async () => { if (!validateForm()) { return } setLoading(true) try { const runner = await onCreateRunner({ name: formData.name.trim(), regionId: formData.regionId, }) if (runner) { setCreatedRunner(runner) setFormData({ ...DEFAULT_FORM_DATA, regionId: regions.length > 0 ? regions[0].id : '', }) setFormErrors({}) } } finally { setLoading(false) } } const copyToClipboard = async (text: string) => { try { await navigator.clipboard.writeText(text) toast.success('Copied to clipboard') } catch (err) { console.error('Failed to copy text:', err) toast.error('Failed to copy to clipboard') } } if (regions.length === 0) { return null } return ( { setOpen(isOpen) if (!isOpen) { setCreatedRunner(null) setFormData({ ...DEFAULT_FORM_DATA, regionId: regions.length > 0 ? regions[0].id : '', }) setFormErrors({}) } }} > Create New Runner Add configuration for a new runner in your selected region. {createdRunner ? (
setIsTokenRevealed(true)} onMouseLeave={() => setIsTokenRevealed(false)} > {isTokenRevealed ? createdRunner.apiKey : getMaskedToken(createdRunner.apiKey)} copyToClipboard(createdRunner.apiKey)} />

Save this token securely. You won't be able to see it again.

) : (
{ e.preventDefault() await handleCreate() }} >
{formErrors.regionId &&

{formErrors.regionId}

}
{ setFormData((prev) => ({ ...prev, name: e.target.value })) }} placeholder="runner-1" /> {formErrors.name &&

{formErrors.name}

}
)} {!createdRunner && (loading ? ( ) : ( ))}
) } ================================================ FILE: apps/dashboard/src/components/DebouncedInput.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useEffect, useState } from 'react' import { Input } from './ui/input' export const DebouncedInput = ({ value: initialValue, onChange, debounce = 500, ...props }: { value: string | number onChange: (value: string | number) => void debounce?: number } & Omit, 'onChange'>) => { const [value, setValue] = useState(initialValue) useEffect(() => { setValue(initialValue) }, [initialValue]) useEffect(() => { const timeout = setTimeout(() => { onChange(value) }, debounce) return () => clearTimeout(timeout) }, [value]) return setValue(e.target.value)} /> } ================================================ FILE: apps/dashboard/src/components/EllipsisWithTooltip.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { Slot } from '@radix-ui/react-slot' import { useRef, useState } from 'react' import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip' export function EllipsisWithTooltip({ children, asChild, className, ...props }: { children: React.ReactNode className?: string asChild?: boolean }) { const [isOpen, setIsOpen] = useState(false) const triggerRef = useRef(null) const Comp = asChild ? Slot : 'div' return ( { if (shouldOpen) { const isTruncated = triggerRef.current && triggerRef.current.scrollWidth > triggerRef.current.clientWidth if (isTruncated) { setIsOpen(true) } } else { setIsOpen(false) } }} delayDuration={300} > {children} {children} ) } ================================================ FILE: apps/dashboard/src/components/ErrorBoundaryFallback.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Dialog, DialogHeader, DialogDescription, DialogTitle, DialogContent } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' import { FallbackProps } from 'react-error-boundary' export function ErrorBoundaryFallback({ error, resetErrorBoundary }: FallbackProps) { return ( Something went wrong We're having trouble loading the dashboard. This could be due to a temporary service issue or network problem. Please try again or contact support if the issue persists.

Error Details:

{error?.message || 'Unknown error'}

{error?.stack && (
Stack Trace (click to expand)
                {error.stack}
              
)}
) } ================================================ FILE: apps/dashboard/src/components/Invoices/InvoicesTableActions.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { MoreHorizontalIcon } from 'lucide-react' import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle, AlertDialogTrigger, } from '../ui/alert-dialog' import { Button } from '../ui/button' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuSeparator, DropdownMenuTrigger, } from '../ui/dropdown-menu' import { InvoicesTableActionsProps } from './types' export function InvoicesTableActions({ invoice, onView, onVoid, onPay }: InvoicesTableActionsProps) { if (!onView && !onVoid && !onPay) { return null } return (
{onView && ( onView?.(invoice)}> View )} {onPay && ( onPay?.(invoice)}> Pay )} {onVoid && ( <> e.preventDefault()} variant="destructive" > Void Void Invoice Are you sure you want to void the invoice {invoice.number}?
This action cannot be undone.
Cancel onVoid?.(invoice)} variant="destructive"> Void
)}
) } ================================================ FILE: apps/dashboard/src/components/Invoices/InvoicesTableHeader.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Search } from 'lucide-react' import React from 'react' import { DebouncedInput } from '../DebouncedInput' import { InvoicesTableHeaderProps } from './types' export function InvoicesTableHeader({ table }: InvoicesTableHeaderProps) { const [globalFilter, setGlobalFilter] = React.useState('') React.useEffect(() => { const down = (e: KeyboardEvent) => { if (e.key === 'k' && (e.metaKey || e.ctrlKey)) { e.preventDefault() // Focus search input const searchInput = document.querySelector('[data-search-input]') as HTMLInputElement if (searchInput) { searchInput.focus() } } } document.addEventListener('keydown', down) return () => document.removeEventListener('keydown', down) }, []) return (
{ setGlobalFilter(String(value)) table.setGlobalFilter(String(value)) }} className="pl-8" data-search-input />
) } ================================================ FILE: apps/dashboard/src/components/Invoices/columns.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Invoice } from '@/billing-api/types/Invoice' import { formatAmount } from '@/lib/utils' import { ColumnDef } from '@tanstack/react-table' import { ArrowDown, ArrowUp } from 'lucide-react' import React from 'react' import { Badge } from '../ui/badge' import { InvoicesTableActions } from './InvoicesTableActions' interface SortableHeaderProps { column: any label: string dataState?: string } const SortableHeader: React.FC = ({ column, label, dataState }) => { return (
column.toggleSorting(column.getIsSorted() === 'asc')} className="flex items-center" {...(dataState && { 'data-state': dataState })} > {label} {column.getIsSorted() === 'asc' ? ( ) : column.getIsSorted() === 'desc' ? ( ) : (
)}
) } interface GetColumnsProps { onViewInvoice?: (invoice: Invoice) => void onVoidInvoice?: (invoice: Invoice) => void onPayInvoice?: (invoice: Invoice) => void } export function getColumns({ onViewInvoice, onVoidInvoice, onPayInvoice }: GetColumnsProps): ColumnDef[] { const columns: ColumnDef[] = [ { id: 'number', header: ({ column }) => { return }, accessorKey: 'number', cell: ({ row }) => { return (
{row.original.number}
) }, sortingFn: (rowA, rowB) => { return rowA.original.number.localeCompare(rowB.original.number) }, }, { id: 'issuingDate', size: 140, header: ({ column }) => { return }, cell: ({ row }) => { const date = new Date(row.original.issuingDate) return (
{date.toLocaleDateString('en-US', { year: 'numeric', month: 'short', day: 'numeric' })}
) }, accessorFn: (row) => new Date(row.issuingDate).getTime(), sortingFn: (rowA, rowB) => { return new Date(rowA.original.issuingDate).getTime() - new Date(rowB.original.issuingDate).getTime() }, }, { id: 'paymentDueDate', size: 140, header: ({ column }) => { return }, cell: ({ row }) => { const date = new Date(row.original.paymentDueDate) return (
{date.toLocaleDateString('en-US', { year: 'numeric', month: 'short', day: 'numeric' })}
) }, accessorFn: (row) => new Date(row.paymentDueDate).getTime(), sortingFn: (rowA, rowB) => { return new Date(rowA.original.paymentDueDate).getTime() - new Date(rowB.original.paymentDueDate).getTime() }, }, { id: 'totalAmountCents', size: 120, header: ({ column }) => { return }, cell: ({ row }) => { return (
{formatAmount(row.original.totalAmountCents)}
) }, accessorKey: 'totalAmountCents', sortingFn: (rowA, rowB) => { return rowA.original.totalAmountCents - rowB.original.totalAmountCents }, }, { id: 'paymentStatus', size: 120, header: ({ column }) => { return }, cell: ({ row }) => { const invoice = row.original const isSucceeded = invoice.paymentStatus === 'succeeded' const isFailed = invoice.paymentStatus === 'failed' const isOverdue = invoice.paymentOverdue let variant: 'success' | 'destructive' | 'secondary' = 'secondary' let label = 'Pending' if (isSucceeded) { variant = 'success' label = 'Paid' } else if (isOverdue || isFailed) { variant = 'destructive' label = isOverdue ? 'Overdue' : 'Failed' } if (invoice.status === 'voided') { label = 'Voided' } return (
{label}
) }, accessorKey: 'paymentStatus', sortingFn: (rowA, rowB) => { const statusOrder = { succeeded: 0, pending: 1, failed: 2 } return (statusOrder[rowA.original.paymentStatus] ?? 3) - (statusOrder[rowB.original.paymentStatus] ?? 3) }, }, { id: 'type', size: 120, header: ({ column }) => { return }, cell: ({ row }) => { const type = row.original.type const displayType = type === 'subscription' ? 'Subscription' : 'One Time' return (
{displayType}
) }, accessorKey: 'type', sortingFn: (rowA, rowB) => { return rowA.original.type.localeCompare(rowB.original.type) }, }, { id: 'actions', size: 100, enableHiding: false, cell: ({ row }) => { const isPayable = row.original.paymentStatus === 'pending' && row.original.status === 'finalized' const isViewable = Boolean(row.original.fileUrl) const isVoidable = row.original.status === 'finalized' && ['pending', 'failed'].includes(row.original.paymentStatus) return (
) }, }, ] return columns } ================================================ FILE: apps/dashboard/src/components/Invoices/index.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { flexRender } from '@tanstack/react-table' import { FileText } from 'lucide-react' import { Pagination } from '../Pagination' import { TableEmptyState } from '../TableEmptyState' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '../ui/table' import { InvoicesTableHeader } from './InvoicesTableHeader' import { InvoicesTableProps } from './types' import { useInvoicesTable } from './useInvoicesTable' export function InvoicesTable({ data, pagination, totalItems, pageCount, onPaginationChange, loading, onViewInvoice, onVoidInvoice, onRowClick, onPayInvoice, }: InvoicesTableProps) { const { table } = useInvoicesTable({ data, pagination, pageCount, onPaginationChange, onViewInvoice, onVoidInvoice, onPayInvoice, }) return ( <> {table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( header.column.getCanSort() && header.column.toggleSorting(header.column.getIsSorted() === 'asc') } className={cn( 'sticky top-0 z-[3] border-b border-border', header.column.getCanSort() ? 'hover:bg-muted cursor-pointer' : '', )} > {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( onRowClick?.(row.original)} > {row.getVisibleCells().map((cell) => ( { if (cell.column.id === 'actions') { e.stopPropagation() } }} className="border-b border-border" style={{ width: cell.column.id === 'number' ? '20%' : 'auto', maxWidth: cell.column.getSize() + 80, minWidth: cell.column.getSize(), }} sticky={cell.column.id === 'actions' ? 'right' : undefined} > {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

Invoices will appear here once they are generated.

} /> )}
) } ================================================ FILE: apps/dashboard/src/components/Invoices/types.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Invoice } from '@/billing-api' import { Table } from '@tanstack/react-table' export interface InvoicesTableProps { data: Invoice[] totalItems: number pagination: { pageIndex: number pageSize: number } pageCount: number onPaginationChange: (pagination: { pageIndex: number; pageSize: number }) => void loading: boolean onViewInvoice?: (invoice: Invoice) => void onVoidInvoice?: (invoice: Invoice) => void onRowClick?: (invoice: Invoice) => void onPayInvoice?: (invoice: Invoice) => void } export interface InvoicesTableActionsProps { invoice: Invoice onView?: (invoice: Invoice) => void onVoid?: (invoice: Invoice) => void onPay?: (invoice: Invoice) => void } export interface InvoicesTableHeaderProps { table: Table } ================================================ FILE: apps/dashboard/src/components/Invoices/useInvoicesTable.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Invoice } from '@/billing-api/types/Invoice' import { ColumnFiltersState, getCoreRowModel, getFacetedRowModel, getFacetedUniqueValues, getFilteredRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { useMemo, useState } from 'react' import { getColumns } from './columns' interface UseInvoicesTableProps { pagination: { pageIndex: number pageSize: number } pageCount: number onPaginationChange: (pagination: { pageIndex: number; pageSize: number }) => void data: Invoice[] onViewInvoice?: (invoice: Invoice) => void onVoidInvoice?: (invoice: Invoice) => void onPayInvoice?: (invoice: Invoice) => void } export function useInvoicesTable({ data, pagination, pageCount, onPaginationChange, onViewInvoice, onVoidInvoice, onPayInvoice, }: UseInvoicesTableProps) { const [sorting, setSorting] = useState([ { id: 'issuingDate', desc: true, }, ]) const [columnFilters, setColumnFilters] = useState([]) const columns = useMemo( () => getColumns({ onViewInvoice, onVoidInvoice, onPayInvoice, }), [onViewInvoice, onVoidInvoice, onPayInvoice], ) const table = useReactTable({ data, columns, onColumnFiltersChange: setColumnFilters, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), getFacetedRowModel: getFacetedRowModel(), getFacetedUniqueValues: getFacetedUniqueValues(), getFilteredRowModel: getFilteredRowModel(), manualPagination: true, pageCount: pageCount, onPaginationChange: (updater) => { const newPagination = typeof updater === 'function' ? updater(table.getState().pagination) : updater onPaginationChange(newPagination) }, state: { sorting, columnFilters, pagination: { pageIndex: pagination.pageIndex, pageSize: pagination.pageSize, }, }, defaultColumn: { size: 100, }, getRowId: (row) => row.id, }) return { table, sorting, columnFilters, } } ================================================ FILE: apps/dashboard/src/components/LimitUsageChart.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ChartConfig, ChartContainer, ChartLegend, ChartLegendContent, ChartTooltip, ChartTooltipContent, } from '@/components/ui/chart' import { FacetFilter } from '@/components/ui/facet-filter' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' import type { RegionUsageOverview } from '@daytonaio/api-client' import { useMemo, useState } from 'react' import { Bar, BarChart, CartesianGrid, ReferenceLine, XAxis, YAxis } from 'recharts' type ResourceKey = 'cpu' | 'ram' | 'storage' const clamp = (v: number) => Math.max(0, Math.min(100, Math.round(v * 10) / 10)) const formatDate = (value: string) => new Date(value).toLocaleDateString(undefined, { month: 'short', day: '2-digit' }) interface PastUsage { date: string peakCpuUsage: number peakMemoryUsage: number peakDiskUsage: number } export function LimitUsageChart({ defaultPeriod = 30, defaultResources = ['ram', 'cpu', 'storage'], pastUsage = [], currentUsage, title, }: { defaultPeriod?: number defaultResources?: ResourceKey[] pastUsage: PastUsage[] currentUsage?: RegionUsageOverview | null title?: React.ReactNode }) { const [period, setPeriod] = useState(defaultPeriod.toString()) const [selected, setSelected] = useState>(new Set(defaultResources)) const data = useMemo(() => { if (!currentUsage) { return [] } const { totalCpuQuota, totalMemoryQuota, totalDiskQuota } = currentUsage return pastUsage.slice(-Number(period)).map((r) => ({ date: r.date, cpu: clamp((r.peakCpuUsage / totalCpuQuota) * 100), ram: clamp((r.peakMemoryUsage / totalMemoryQuota) * 100), storage: clamp((r.peakDiskUsage / totalDiskQuota) * 100), })) }, [pastUsage, currentUsage, period]) const config: ChartConfig = useMemo(() => { const full: Record = { cpu: { label: 'CPU', color: 'hsl(var(--chart-3))' }, ram: { label: 'RAM', color: 'hsl(var(--chart-2))' }, storage: { label: 'Storage', color: 'hsl(var(--chart-1))' }, } return Object.fromEntries(Object.entries(full).filter(([k]) => selected.has(k as ResourceKey))) as ChartConfig }, [selected]) return (
{title}
setSelected(new Set(key as Set))} />
`${v}%`} /> formatDate(label)} valueFormatter={(value) => `${value}%`} /> } /> } /> {selected.has('storage') && } {selected.has('ram') && } {selected.has('cpu') && }
) } ================================================ FILE: apps/dashboard/src/components/LiveIndicator.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useQueryCountdown } from '@/hooks/useQueryCountdown' import { cn } from '@/lib/utils' import { Tooltip } from './Tooltip' export function LiveIndicator({ isUpdating, intervalMs, lastUpdatedAt, }: { isUpdating: boolean intervalMs: number lastUpdatedAt: number }) { const refreshingIn = useQueryCountdown(lastUpdatedAt, intervalMs) return (
Data is refreshed every {intervalMs / 1000} seconds.
Refreshing{' '} {isUpdating ? ( '' ) : ( <> in {refreshingIn}s )} ...
} label={
Live
} /> ) } ================================================ FILE: apps/dashboard/src/components/LoadingFallback.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logo, LogoText } from '@/assets/Logo' import { Sidebar as SidebarComponent, SidebarContent, SidebarFooter, SidebarGroup, SidebarInset, SidebarProvider, } from '@/components/ui/sidebar' import { motion } from 'framer-motion' import { Loader2 } from 'lucide-react' import { useEffect, useState } from 'react' import { Skeleton } from './ui/skeleton' const LoadingFallback = () => { const [showLongLoadingMessage, setShowLongLoadingMessage] = useState(false) useEffect(() => { const timer = setTimeout(() => { setShowLongLoadingMessage(true) }, 5_000) return () => clearTimeout(timer) }, []) return (

This is taking longer than expected...

If this issue persists, contact us at{' '} support@daytona.io .

) } export default LoadingFallback ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/CancelOrganizationInvitationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React from 'react' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' interface CancelOrganizationInvitationDialogProps { open: boolean onOpenChange: (open: boolean) => void onCancelInvitation: () => Promise loading: boolean } export const CancelOrganizationInvitationDialog: React.FC = ({ open, onOpenChange, onCancelInvitation, loading, }) => { const handleCancelInvitation = async () => { const success = await onCancelInvitation() if (success) { onOpenChange(false) } } return ( Cancel Invitation Are you sure you want to cancel this invitation to join the organization? {loading ? ( ) : ( )} ) } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/CreateOrganizationInvitationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ViewerOrganizationRoleCheckbox } from '@/components/OrganizationMembers/ViewerOrganizationRoleCheckbox' import { Button } from '@/components/ui/button' import { Checkbox } from '@/components/ui/checkbox' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { RadioGroup, RadioGroupItem } from '@/components/ui/radio-group' import { CreateOrganizationInvitationRoleEnum, OrganizationRole } from '@daytonaio/api-client' import { Plus } from 'lucide-react' import React, { useEffect, useState } from 'react' interface CreateOrganizationInvitationDialogProps { availableRoles: OrganizationRole[] loadingAvailableRoles: boolean onCreateInvitation: ( email: string, role: CreateOrganizationInvitationRoleEnum, assignedRoleIds: string[], ) => Promise className?: string } export const CreateOrganizationInvitationDialog: React.FC = ({ availableRoles, loadingAvailableRoles, onCreateInvitation, className, }) => { const [open, setOpen] = useState(false) const [email, setEmail] = useState('') const [role, setRole] = useState(CreateOrganizationInvitationRoleEnum.MEMBER) const [assignedRoleIds, setAssignedRoleIds] = useState([]) const [loading, setLoading] = useState(false) const [developerRole, setDeveloperRole] = useState(null) useEffect(() => { if (!loadingAvailableRoles) { const developerRole = availableRoles.find((r) => r.name === 'Developer') if (developerRole) { setDeveloperRole(developerRole) setAssignedRoleIds([developerRole.id]) } } }, [loadingAvailableRoles, availableRoles]) const handleRoleAssignmentToggle = (roleId: string) => { setAssignedRoleIds((current) => { if (current.includes(roleId)) { return current.filter((p) => p !== roleId) } else { return [...current, roleId] } }) } const handleCreateInvitation = async () => { setLoading(true) const success = await onCreateInvitation( email, role, role === CreateOrganizationInvitationRoleEnum.OWNER ? [] : assignedRoleIds, ) if (success) { setOpen(false) setEmail('') setRole(CreateOrganizationInvitationRoleEnum.MEMBER) if (developerRole) { setAssignedRoleIds([developerRole.id]) } else { setAssignedRoleIds([]) } } setLoading(false) } return ( { setOpen(isOpen) if (!isOpen) { setEmail('') setRole(CreateOrganizationInvitationRoleEnum.MEMBER) if (developerRole) { setAssignedRoleIds([developerRole.id]) } else { setAssignedRoleIds([]) } } }} > Invite Member Give them access to the organization with an appropriate role and assignments.
{ e.preventDefault() await handleCreateInvitation() }} >
setEmail(e.target.value)} placeholder="mail@example.com" />
setRole(value)} >

Full administrative access to the organization and its resources

Access to organization resources is based on assignments

{role === CreateOrganizationInvitationRoleEnum.MEMBER && !loadingAvailableRoles && (
{availableRoles.map((availableRole) => (
handleRoleAssignmentToggle(availableRole.id)} />
{availableRole.description && (

{availableRole.description}

)}
))}
)}
{loading ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/OrganizationInvitationTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useState } from 'react' import { MoreHorizontal } from 'lucide-react' import { ColumnDef, flexRender, getCoreRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { OrganizationInvitation, OrganizationRole, UpdateOrganizationInvitationRoleEnum } from '@daytonaio/api-client' import { Pagination } from '@/components/Pagination' import { Button } from '@/components/ui/button' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu' import { TableHeader, TableRow, TableHead, TableBody, TableCell, Table } from '@/components/ui/table' import { CancelOrganizationInvitationDialog } from '@/components/OrganizationMembers/CancelOrganizationInvitationDialog' import { UpdateOrganizationInvitationDialog } from './UpdateOrganizationInvitationDialog' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { TableEmptyState } from '../TableEmptyState' interface DataTableProps { data: OrganizationInvitation[] loadingData: boolean availableRoles: OrganizationRole[] loadingAvailableRoles: boolean onCancelInvitation: (invitationId: string) => Promise onUpdateInvitation: ( invitationId: string, role: UpdateOrganizationInvitationRoleEnum, assignedRoleIds: string[], ) => Promise loadingInvitationAction: Record } export function OrganizationInvitationTable({ data, loadingData, availableRoles, loadingAvailableRoles, onCancelInvitation, onUpdateInvitation, loadingInvitationAction, }: DataTableProps) { const [sorting, setSorting] = useState([]) const [invitationToCancel, setInvitationToCancel] = useState(null) const [isCancelDialogOpen, setIsCancelDialogOpen] = useState(false) const [invitationToUpdate, setInvitationToUpdate] = useState(null) const [isUpdateDialogOpen, setIsUpdateDialogOpen] = useState(false) const handleCancel = (invitationId: string) => { setInvitationToCancel(invitationId) setIsCancelDialogOpen(true) } const handleUpdate = (invitation: OrganizationInvitation) => { setInvitationToUpdate(invitation) setIsUpdateDialogOpen(true) } const handleConfirmCancel = async () => { if (invitationToCancel) { const success = await onCancelInvitation(invitationToCancel) if (success) { setInvitationToCancel(null) setIsCancelDialogOpen(false) } return success } return false } const handleConfirmUpdate = async (role: UpdateOrganizationInvitationRoleEnum, assignedRoleIds: string[]) => { if (invitationToUpdate) { const success = await onUpdateInvitation(invitationToUpdate.id, role, assignedRoleIds) if (success) { setInvitationToUpdate(null) setIsUpdateDialogOpen(false) } return success } return false } const columns = getColumns({ onCancel: handleCancel, onUpdate: handleUpdate }) const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), state: { sorting, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) return ( <>
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loadingData ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( )}
{invitationToUpdate && ( { setIsUpdateDialogOpen(open) if (!open) { setInvitationToUpdate(null) } }} invitation={invitationToUpdate} availableRoles={availableRoles} loadingAvailableRoles={loadingAvailableRoles} onUpdateInvitation={handleConfirmUpdate} /> )} {invitationToCancel && ( { setIsCancelDialogOpen(open) if (!open) { setInvitationToCancel(null) } }} onCancelInvitation={handleConfirmCancel} loading={loadingInvitationAction[invitationToCancel]} /> )} ) } const getColumns = ({ onCancel, onUpdate, }: { onCancel: (invitationId: string) => void onUpdate: (invitation: OrganizationInvitation) => void }): ColumnDef[] => { const columns: ColumnDef[] = [ { accessorKey: 'email', header: 'Email', }, { accessorKey: 'invitedBy', header: 'Invited by', }, { accessorKey: 'expiresAt', header: 'Expires', cell: ({ row }) => { return new Date(row.original.expiresAt).toLocaleDateString() }, }, { accessorKey: 'status', header: 'Status', cell: ({ row }) => { const isExpired = new Date(row.original.expiresAt) < new Date() return isExpired ? Expired : 'Pending' }, }, { id: 'actions', cell: ({ row }) => { const isExpired = new Date(row.original.expiresAt) < new Date() if (isExpired) { return null } return (
onUpdate(row.original)}> Edit onCancel(row.original.id)} > Cancel
) }, }, ] return columns } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/OrganizationMemberTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useState } from 'react' import { MoreHorizontal } from 'lucide-react' import { ColumnDef, flexRender, getCoreRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { OrganizationRole, OrganizationUser, OrganizationUserRoleEnum } from '@daytonaio/api-client' import { Pagination } from '@/components/Pagination' import { Button } from '@/components/ui/button' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu' import { TableHeader, TableRow, TableHead, TableBody, TableCell, Table } from '@/components/ui/table' import { RemoveOrganizationMemberDialog } from '@/components/OrganizationMembers/RemoveOrganizationMemberDialog' import { UpdateOrganizationMemberAccess } from '@/components/OrganizationMembers/UpdateOrganizationMemberAccessDialog' import { capitalize } from '@/lib/utils' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { TableEmptyState } from '../TableEmptyState' interface DataTableProps { data: OrganizationUser[] loadingData: boolean availableAssignments: OrganizationRole[] loadingAvailableAssignments: boolean onUpdateMemberAccess: (userId: string, role: OrganizationUserRoleEnum, assignedRoleIds: string[]) => Promise onRemoveMember: (userId: string) => Promise loadingMemberAction: Record ownerMode: boolean } export function OrganizationMemberTable({ data, loadingData, availableAssignments, loadingAvailableAssignments, onUpdateMemberAccess, onRemoveMember, loadingMemberAction, ownerMode, }: DataTableProps) { const [sorting, setSorting] = useState([]) const [memberToUpdate, setMemberToUpdate] = useState(null) const [isUpdateMemberAccessDialogOpen, setIsUpdateMemberAccessDialogOpen] = useState(false) const [memberToRemove, setMemberToRemove] = useState(null) const [isRemoveDialogOpen, setIsRemoveDialogOpen] = useState(false) const columns = getColumns({ onUpdateMemberRole: (member) => { setMemberToUpdate(member) setIsUpdateMemberAccessDialogOpen(true) }, onUpdateAssignedRoles: (member) => { setMemberToUpdate(member) setIsUpdateMemberAccessDialogOpen(true) }, onRemove: (userId: string) => { setMemberToRemove(userId) setIsRemoveDialogOpen(true) }, ownerMode, }) const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), state: { sorting, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) const handleUpdateMemberAccess = async (role: OrganizationUserRoleEnum, assignedRoleIds: string[]) => { if (memberToUpdate) { const success = await onUpdateMemberAccess(memberToUpdate.userId, role, assignedRoleIds) if (success) { setMemberToUpdate(null) setIsUpdateMemberAccessDialogOpen(false) } return success } return false } const handleConfirmRemove = async () => { if (memberToRemove) { const success = await onRemoveMember(memberToRemove) if (success) { setMemberToRemove(null) setIsRemoveDialogOpen(false) } return success } return false } return ( <>
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loadingData ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( )}
{memberToUpdate && ( { setIsUpdateMemberAccessDialogOpen(open) if (!open) { setMemberToUpdate(null) } }} initialRole={memberToUpdate.role} initialAssignments={memberToUpdate.assignedRoles} availableAssignments={availableAssignments} loadingAvailableAssignments={loadingAvailableAssignments} onUpdateAccess={handleUpdateMemberAccess} processingUpdateAccess={loadingMemberAction[memberToUpdate.userId]} /> )} {memberToRemove && ( { setIsRemoveDialogOpen(open) if (!open) { setMemberToRemove(null) } }} onRemoveMember={handleConfirmRemove} loading={loadingMemberAction[memberToRemove]} /> )} ) } const getColumns = ({ onUpdateMemberRole, onUpdateAssignedRoles, onRemove, ownerMode, }: { onUpdateMemberRole: (member: OrganizationUser) => void onUpdateAssignedRoles: (member: OrganizationUser) => void onRemove: (userId: string) => void ownerMode: boolean }): ColumnDef[] => { const columns: ColumnDef[] = [ { accessorKey: 'email', header: 'Email', }, { accessorKey: 'role', header: () => { return
Role
}, cell: ({ row }) => { const role = capitalize(row.original.role) if (!ownerMode) { return
{role}
} return ( ) }, }, ] if (ownerMode) { const extraColumns: ColumnDef[] = [ { accessorKey: 'assignedRoles', header: () => { return
Assignments
}, cell: ({ row }) => { if (row.original.role === OrganizationUserRoleEnum.OWNER) { return
Full Access
} const roleCount = row.original.assignedRoles?.length || 0 const roleText = roleCount === 1 ? '1 role' : `${roleCount} roles` return ( ) }, }, { id: 'actions', cell: ({ row }) => { return (
onUpdateMemberRole(row.original)}> Change Role {row.original.role !== OrganizationUserRoleEnum.OWNER && ( onUpdateAssignedRoles(row.original)}> Manage Assignments )} onRemove(row.original.userId)} > Remove
) }, }, ] columns.push(...extraColumns) } return columns } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/RemoveOrganizationMemberDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React from 'react' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' interface RemoveOrganizationMemberDialogProps { open: boolean onOpenChange: (open: boolean) => void onRemoveMember: () => Promise loading: boolean } export const RemoveOrganizationMemberDialog: React.FC = ({ open, onOpenChange, onRemoveMember, loading, }) => { const handleRemoveMember = async () => { const success = await onRemoveMember() if (success) { onOpenChange(false) } } return ( Remove Member Are you sure you want to remove this member from the organization? Any API keys they have created will become ineffective. {loading ? ( ) : ( )} ) } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/UpdateOrganizationInvitationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { UpdateOrganizationInvitationRoleEnum, OrganizationRole, OrganizationInvitation, OrganizationInvitationRoleEnum, } from '@daytonaio/api-client' import { Button } from '@/components/ui/button' import { Checkbox } from '@/components/ui/checkbox' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { RadioGroup, RadioGroupItem } from '@/components/ui/radio-group' import { ViewerOrganizationRoleCheckbox } from '@/components/OrganizationMembers/ViewerOrganizationRoleCheckbox' interface UpdateOrganizationInvitationDialogProps { open: boolean onOpenChange: (open: boolean) => void invitation: OrganizationInvitation availableRoles: OrganizationRole[] loadingAvailableRoles: boolean onUpdateInvitation: (role: UpdateOrganizationInvitationRoleEnum, assignedRoleIds: string[]) => Promise } export const UpdateOrganizationInvitationDialog: React.FC = ({ open, onOpenChange, invitation, availableRoles, loadingAvailableRoles, onUpdateInvitation, }) => { const [role, setRole] = useState(invitation.role) const [assignedRoleIds, setAssignedRoleIds] = useState(invitation.assignedRoles.map((role) => role.id)) const [loading, setLoading] = useState(false) const handleRoleAssignmentToggle = (roleId: string) => { setAssignedRoleIds((current) => { if (current.includes(roleId)) { return current.filter((p) => p !== roleId) } else { return [...current, roleId] } }) } const handleUpdateInvitation = async () => { setLoading(true) const success = await onUpdateInvitation(role, role === OrganizationInvitationRoleEnum.OWNER ? [] : assignedRoleIds) if (success) { onOpenChange(false) setRole(invitation.role) setAssignedRoleIds(invitation.assignedRoles.map((role) => role.id)) } setLoading(false) } return ( { onOpenChange(isOpen) if (!isOpen) { setRole(invitation.role) setRole(invitation.role) setAssignedRoleIds(invitation.assignedRoles.map((role) => role.id)) } }} > Update Invitation Modify organization access for the invited member.
setRole(value)} >

Full administrative access to the organization and its resources

Access to organization resources is based on assignments

{role === OrganizationInvitationRoleEnum.MEMBER && !loadingAvailableRoles && (
{availableRoles.map((availableRole) => (
handleRoleAssignmentToggle(availableRole.id)} />
{availableRole.description && (

{availableRole.description}

)}
))}
)}
{loading ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/UpdateOrganizationMemberAccessDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { CreateOrganizationInvitationRoleEnum, OrganizationRole, OrganizationUserRoleEnum } from '@daytonaio/api-client' import { Button } from '@/components/ui/button' import { Checkbox } from '@/components/ui/checkbox' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Label } from '@/components/ui/label' import { RadioGroup, RadioGroupItem } from '@/components/ui/radio-group' import { ViewerOrganizationRoleCheckbox } from '@/components/OrganizationMembers/ViewerOrganizationRoleCheckbox' interface UpdateOrganizationMemberAccessProps { open: boolean onOpenChange: (open: boolean) => void initialRole: OrganizationUserRoleEnum initialAssignments: OrganizationRole[] availableAssignments: OrganizationRole[] loadingAvailableAssignments: boolean onUpdateAccess: (role: OrganizationUserRoleEnum, assignedRoleIds: string[]) => Promise processingUpdateAccess: boolean } export const UpdateOrganizationMemberAccess: React.FC = ({ open, onOpenChange, initialRole, initialAssignments, availableAssignments, loadingAvailableAssignments, onUpdateAccess, processingUpdateAccess, }) => { const [role, setRole] = useState(initialRole) const [assignedRoleIds, setAssignedRoleIds] = useState(initialAssignments.map((a) => a.id)) const handleRoleAssignmentToggle = (roleId: string) => { setAssignedRoleIds((current) => { if (current.includes(roleId)) { return current.filter((p) => p !== roleId) } else { return [...current, roleId] } }) } const handleUpdateAccess = async () => { const success = await onUpdateAccess(role, role === OrganizationUserRoleEnum.OWNER ? [] : assignedRoleIds) if (success) { onOpenChange(false) setRole(initialRole) setAssignedRoleIds(initialAssignments.map((a) => a.id)) } } return ( { onOpenChange(isOpen) if (!isOpen) { setRole(initialRole) setAssignedRoleIds(initialAssignments.map((a) => a.id)) } }} > Update Access Manage access to the organization with an appropriate role and assignments. {role !== OrganizationUserRoleEnum.OWNER && ( Removing assignments will automatically revoke any API keys this member created using permissions granted from those assignments. )}
{ e.preventDefault() await handleUpdateAccess() }} >
setRole(value)} >

Full administrative access to the organization and its resources

Access to organization resources is based on assignments

{role === CreateOrganizationInvitationRoleEnum.MEMBER && !loadingAvailableAssignments && availableAssignments.length > 0 && (
{availableAssignments.map((assignment) => (
handleRoleAssignmentToggle(assignment.id)} />
{assignment.description &&

{assignment.description}

}
))}
)}
{processingUpdateAccess ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/OrganizationMembers/ViewerOrganizationRoleCheckbox.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React from 'react' import { Checkbox } from '@/components/ui/checkbox' import { Label } from '@/components/ui/label' export const ViewerOrganizationRoleCheckbox: React.FC = () => { return (

Grants read access to sandboxes, snapshots, and registries in the organization

) } ================================================ FILE: apps/dashboard/src/components/OrganizationRoles/CreateOrganizationRoleDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Button } from '@/components/ui/button' import { Checkbox } from '@/components/ui/checkbox' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { ORGANIZATION_ROLE_PERMISSIONS_GROUPS } from '@/constants/OrganizationPermissionsGroups' import { OrganizationRolePermissionGroup } from '@/types/OrganizationRolePermissionGroup' import { OrganizationRolePermissionsEnum } from '@daytonaio/api-client' import { Plus } from 'lucide-react' import React, { useState } from 'react' interface CreateOrganizationRoleDialogProps { onCreateRole: (name: string, description: string, permissions: OrganizationRolePermissionsEnum[]) => Promise className?: string } export const CreateOrganizationRoleDialog: React.FC = ({ onCreateRole, className, }) => { const [open, setOpen] = useState(false) const [name, setName] = useState('') const [description, setDescription] = useState('') const [permissions, setPermissions] = useState([]) const [loading, setLoading] = useState(false) const handleCreateRole = async () => { setLoading(true) const success = await onCreateRole(name, description, permissions) if (success) { setOpen(false) setName('') setDescription('') setPermissions([]) } setLoading(false) } const isGroupChecked = (group: OrganizationRolePermissionGroup) => { return group.permissions.every((permission) => permissions.includes(permission)) } // Toggle all permissions in a group const handleGroupToggle = (group: OrganizationRolePermissionGroup) => { if (isGroupChecked(group)) { // If all checked, uncheck all setPermissions((current) => current.filter((p) => !group.permissions.includes(p))) } else { // If not all checked, check all setPermissions((current) => { const newPermissions = [...current] group.permissions.forEach((key) => { if (!newPermissions.includes(key)) { newPermissions.push(key) } }) return newPermissions }) } } // Toggle a single permission const handlePermissionToggle = (permission: OrganizationRolePermissionsEnum) => { setPermissions((current) => { if (current.includes(permission)) { return current.filter((p) => p !== permission) } else { return [...current, permission] } }) } return ( { setOpen(isOpen) if (!isOpen) { setName('') setDescription('') setPermissions([]) } }} > Create Role Define a custom role for managing access to the organization.
{ e.preventDefault() await handleCreateRole() }} >
setName(e.target.value)} placeholder="Name" />
setDescription(e.target.value)} placeholder="Description" />
{ORGANIZATION_ROLE_PERMISSIONS_GROUPS.map((group) => { const groupIsChecked = isGroupChecked(group) return (
handleGroupToggle(group)} />
{group.permissions.map((permission) => (
handlePermissionToggle(permission)} disabled={groupIsChecked} className={`${groupIsChecked ? 'pointer-events-none' : ''}`} />
))}
) })}
{loading ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/OrganizationRoles/DeleteOrganizationRoleDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React from 'react' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' interface DeleteOrganizationRoleDialogProps { open: boolean onOpenChange: (open: boolean) => void onDeleteRole: () => Promise loading: boolean } export const DeleteOrganizationRoleDialog: React.FC = ({ open, onOpenChange, onDeleteRole, loading, }) => { const handleDeleteRole = async () => { const success = await onDeleteRole() if (success) { onOpenChange(false) } } return ( Delete Role Are you sure you want to delete this role? {loading ? ( ) : ( )} ) } ================================================ FILE: apps/dashboard/src/components/OrganizationRoles/OrganizationRoleTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DeleteOrganizationRoleDialog } from '@/components/OrganizationRoles/DeleteOrganizationRoleDialog' import { UpdateOrganizationRoleDialog } from '@/components/OrganizationRoles/UpdateOrganizationRoleDialog' import { Pagination } from '@/components/Pagination' import { Button } from '@/components/ui/button' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { OrganizationRole, OrganizationRolePermissionsEnum } from '@daytonaio/api-client' import { ColumnDef, flexRender, getCoreRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { MoreHorizontal } from 'lucide-react' import { useState } from 'react' import { TableEmptyState } from '../TableEmptyState' interface DataTableProps { data: OrganizationRole[] loadingData: boolean onUpdateRole: ( roleId: string, name: string, description: string, permissions: OrganizationRolePermissionsEnum[], ) => Promise onDeleteRole: (roleId: string) => Promise loadingRoleAction: Record } export function OrganizationRoleTable({ data, loadingData, onUpdateRole, onDeleteRole, loadingRoleAction, }: DataTableProps) { const [sorting, setSorting] = useState([]) const [roleToDelete, setRoleToDelete] = useState(null) const [isDeleteDialogOpen, setIsDeleteDialogOpen] = useState(false) const [roleToUpdate, setRoleToUpdate] = useState(null) const [isUpdateDialogOpen, setIsUpdateDialogOpen] = useState(false) const columns = getColumns({ onUpdate: (role) => { setRoleToUpdate(role) setIsUpdateDialogOpen(true) }, onDelete: (userId: string) => { setRoleToDelete(userId) setIsDeleteDialogOpen(true) }, }) const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), state: { sorting, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) const handleUpdateRole = async ( name: string, description: string, permissions: OrganizationRolePermissionsEnum[], ) => { if (roleToUpdate) { const success = await onUpdateRole(roleToUpdate.id, name, description, permissions) if (success) { setRoleToUpdate(null) setIsUpdateDialogOpen(false) } return success } return false } const handleConfirmDeleteRole = async () => { if (roleToDelete) { const success = await onDeleteRole(roleToDelete) if (success) { setRoleToDelete(null) setIsDeleteDialogOpen(false) } return success } return false } return ( <>
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loadingData ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( )}
{roleToUpdate && ( { setIsUpdateDialogOpen(open) if (!open) { setRoleToUpdate(null) } }} initialData={roleToUpdate} onUpdateRole={handleUpdateRole} /> )} {roleToDelete && ( { setIsDeleteDialogOpen(open) if (!open) { setRoleToDelete(null) } }} onDeleteRole={handleConfirmDeleteRole} loading={loadingRoleAction[roleToDelete]} /> )} ) } const getColumns = ({ onUpdate, onDelete, }: { onUpdate: (role: OrganizationRole) => void onDelete: (roleId: string) => void }): ColumnDef[] => { const columns: ColumnDef[] = [ { accessorKey: 'name', header: 'Name', cell: ({ row }) => { return
{row.original.name}
}, }, { accessorKey: 'description', header: 'Description', cell: ({ row }) => { return (
{row.original.description}

{row.original.description}

) }, }, { accessorKey: 'permissions', header: () => { return
Permissions
}, cell: ({ row }) => { const permissions = row.original.permissions.join(', ') return (
{permissions || '-'}
{permissions && (

{permissions}

)}
) }, }, { id: 'actions', cell: ({ row }) => { if (row.original.isGlobal) { return null } return (
onUpdate(row.original)}> Edit onDelete(row.original.id)} > Delete
) }, }, ] return columns } ================================================ FILE: apps/dashboard/src/components/OrganizationRoles/UpdateOrganizationRoleDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { OrganizationRole, OrganizationRolePermissionsEnum } from '@daytonaio/api-client' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' import { Checkbox } from '@/components/ui/checkbox' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { ORGANIZATION_ROLE_PERMISSIONS_GROUPS } from '@/constants/OrganizationPermissionsGroups' import { OrganizationRolePermissionGroup } from '@/types/OrganizationRolePermissionGroup' interface UpdateOrganizationRoleDialogProps { open: boolean onOpenChange: (open: boolean) => void initialData: OrganizationRole onUpdateRole: (name: string, description: string, permissions: OrganizationRolePermissionsEnum[]) => Promise } export const UpdateOrganizationRoleDialog: React.FC = ({ open, onOpenChange, initialData, onUpdateRole, }) => { const [name, setName] = useState(initialData.name) const [description, setDescription] = useState(initialData.description) const [permissions, setPermissions] = useState(initialData.permissions) const [loading, setLoading] = useState(false) const handleUpdateRole = async () => { setLoading(true) const success = await onUpdateRole(name, description, permissions) if (success) { onOpenChange(false) setName('') setDescription('') setPermissions([]) } setLoading(false) } const isGroupChecked = (group: OrganizationRolePermissionGroup) => { return group.permissions.every((permission) => permissions.includes(permission)) } // Toggle all permissions in a group const handleGroupToggle = (group: OrganizationRolePermissionGroup) => { if (isGroupChecked(group)) { // If all checked, uncheck all setPermissions((current) => current.filter((p) => !group.permissions.includes(p))) } else { // If not all checked, check all setPermissions((current) => { const newPermissions = [...current] group.permissions.forEach((key) => { if (!newPermissions.includes(key)) { newPermissions.push(key) } }) return newPermissions }) } } // Toggle a single permission const handlePermissionToggle = (permission: OrganizationRolePermissionsEnum) => { setPermissions((current) => { if (current.includes(permission)) { return current.filter((p) => p !== permission) } else { return [...current, permission] } }) } return ( { onOpenChange(isOpen) if (!isOpen) { setName('') setDescription('') setPermissions([]) } }} > Edit Role Modify permissions for the custom organization role.
{ e.preventDefault() await handleUpdateRole() }} >
setName(e.target.value)} placeholder="Name" />
setDescription(e.target.value)} placeholder="Description" />
{ORGANIZATION_ROLE_PERMISSIONS_GROUPS.map((group) => { const groupIsChecked = isGroupChecked(group) return (
handleGroupToggle(group)} />
{group.permissions.map((permission) => (
handlePermissionToggle(permission)} disabled={groupIsChecked} className={`${groupIsChecked ? 'pointer-events-none' : ''}`} />
))}
) })}
{loading ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/Organizations/CreateOrganizationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useState } from 'react' import { Check, Copy } from 'lucide-react' import { Organization, Region } from '@daytonaio/api-client' import { Button } from '@/components/ui/button' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Input } from '@/components/ui/input' import { Link } from 'react-router-dom' import { Label } from '@/components/ui/label' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' import { RoutePath } from '@/enums/RoutePath' interface CreateOrganizationDialogProps { open: boolean billingApiUrl?: string regions: Region[] loadingRegions: boolean getRegionName: (regionId: string) => string | undefined onOpenChange: (open: boolean) => void onCreateOrganization: (name: string, defaultRegionId: string) => Promise } export const CreateOrganizationDialog: React.FC = ({ open, billingApiUrl, regions, loadingRegions, getRegionName, onOpenChange, onCreateOrganization, }) => { const [name, setName] = useState('') const [defaultRegionId, setDefaultRegionId] = useState(undefined) const [loading, setLoading] = useState(false) const [createdOrg, setCreatedOrg] = useState(null) const [copied, setCopied] = useState(null) const handleCreateOrganization = async () => { if (!name.trim() || !defaultRegionId) { return } setLoading(true) const org = await onCreateOrganization(name.trim(), defaultRegionId) if (org) { // TODO: Return when we fix the selected org states // setCreatedOrg(org) // setName('') // setDefaultRegionId(undefined) // setLoading(false) } else { setLoading(false) } } const copyToClipboard = async (text: string, label: string) => { try { await navigator.clipboard.writeText(text) setCopied(label) setTimeout(() => setCopied(null), 2000) } catch (err) { console.error('Failed to copy text:', err) } } return ( { onOpenChange(isOpen) if (!isOpen) { setName('') setDefaultRegionId(undefined) setCreatedOrg(null) setCopied(null) } }} > {createdOrg ? 'New Organization' : 'Create New Organization'} {createdOrg ? 'You can switch between organizations in the top left corner of the sidebar.' : 'Create a new organization to share resources and collaborate with others.'} {createdOrg ? (
{createdOrg.id} {(copied === 'Organization ID' && ) || ( copyToClipboard(createdOrg.id, 'Organization ID')} /> )}
{createdOrg.defaultRegionId && (
)}

Your organization is created.

{billingApiUrl ? ( <> To get started, add a payment method on the{' '} { onOpenChange(false) }} > wallet page . ) : null}

) : !loadingRegions && regions.length === 0 ? (

No regions available

Organization cannot be created because no regions are available.

) : (
{ e.preventDefault() await handleCreateOrganization() }} >
setName(e.target.value)} placeholder="Name" />

The region that will be used as the default target for creating sandboxes in this organization.

)} {!createdOrg && (loading ? ( ) : ( ))}
) } ================================================ FILE: apps/dashboard/src/components/Organizations/DeleteOrganizationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { Button } from '@/components/ui/button' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' interface DeleteOrganizationDialogProps { organizationName: string onDeleteOrganization: () => Promise loading: boolean } export const DeleteOrganizationDialog: React.FC = ({ organizationName, onDeleteOrganization, loading, }) => { const [open, setOpen] = useState(false) const [confirmName, setConfirmName] = useState('') const handleDeleteOrganization = async () => { const success = await onDeleteOrganization() if (success) { setOpen(false) setConfirmName('') } } return ( { setOpen(isOpen) if (!isOpen) { setConfirmName('') } }} > Delete Organization This will permanently delete all associated data. This action cannot be undone.
{ e.preventDefault() await handleDeleteOrganization() }} >
setConfirmName(e.target.value)} placeholder={organizationName} />
{loading ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/Organizations/LeaveOrganizationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { Button } from '@/components/ui/button' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' interface LeaveOrganizationDialogProps { onLeaveOrganization: () => Promise loading: boolean } export const LeaveOrganizationDialog: React.FC = ({ onLeaveOrganization, loading }) => { const [open, setOpen] = useState(false) const handleLeaveOrganization = async () => { const success = await onLeaveOrganization() if (success) { setOpen(false) } } return ( Leave Organization Are you sure you want to leave this organization? {loading ? ( ) : ( )} ) } ================================================ FILE: apps/dashboard/src/components/Organizations/OrganizationPicker.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuSeparator, DropdownMenuTrigger, } from '@/components/ui/dropdown-menu' import { SidebarMenuButton, SidebarMenuItem } from '@/components/ui/sidebar' import { useApi } from '@/hooks/useApi' import { useOrganizations } from '@/hooks/useOrganizations' import { useRegions } from '@/hooks/useRegions' import { useSelectedOrganization } from '@/hooks/useSelectedOrganization' import { handleApiError } from '@/lib/error-handling' import { Organization } from '@daytonaio/api-client' import { Building2, ChevronsUpDown, Copy, PlusCircle, SquareUserRound } from 'lucide-react' import { useEffect, useMemo, useState } from 'react' import { toast } from 'sonner' import { useCopyToClipboard } from 'usehooks-ts' import { CommandHighlight, useRegisterCommands, type CommandConfig } from '../CommandPalette' import { CreateOrganizationDialog } from './CreateOrganizationDialog' function useOrganizationCommands() { const { organizations } = useOrganizations() const { selectedOrganization, onSelectOrganization } = useSelectedOrganization() const [, copyToClipboard] = useCopyToClipboard() const commands: CommandConfig[] = useMemo(() => { const cmds: CommandConfig[] = [] if (selectedOrganization) { cmds.push({ id: 'copy-org-id', label: 'Copy Organization ID', icon: , onSelect: () => { copyToClipboard(selectedOrganization.id) toast.success('Organization ID copied to clipboard') }, }) } for (const org of organizations) { if (org.id === selectedOrganization?.id) continue cmds.push({ id: `switch-org-${org.id}`, label: ( <> Switch to {org.name} ), value: `switch to organization ${org.name}`, icon: , onSelect: () => onSelectOrganization(org.id), }) } return cmds }, [organizations, selectedOrganization, copyToClipboard, onSelectOrganization]) useRegisterCommands(commands, { groupId: 'organization', groupLabel: 'Organization', groupOrder: 5 }) } export const OrganizationPicker: React.FC = () => { const { organizationsApi } = useApi() const { organizations, refreshOrganizations } = useOrganizations() const { selectedOrganization, onSelectOrganization } = useSelectedOrganization() const { sharedRegions: regions, loadingSharedRegions: loadingRegions, getRegionName } = useRegions() const [optimisticSelectedOrganization, setOptimisticSelectedOrganization] = useState(selectedOrganization) const [loadingSelectOrganization, setLoadingSelectOrganization] = useState(false) useOrganizationCommands() useEffect(() => { setOptimisticSelectedOrganization(selectedOrganization) }, [selectedOrganization]) const handleSelectOrganization = async (organizationId: string) => { const organization = organizations.find((org) => org.id === organizationId) if (!organization) { return } setOptimisticSelectedOrganization(organization) setLoadingSelectOrganization(true) const success = await onSelectOrganization(organizationId) if (!success) { setOptimisticSelectedOrganization(selectedOrganization) } setLoadingSelectOrganization(false) } const [showCreateOrganizationDialog, setShowCreateOrganizationDialog] = useState(false) const handleCreateOrganization = async (name: string, defaultRegionId: string) => { try { const organization = ( await organizationsApi.createOrganization({ name: name.trim(), defaultRegionId, }) ).data toast.success('Organization created successfully') await refreshOrganizations(organization.id) return organization } catch (error) { handleApiError(error, 'Failed to create organization') return null } } const getOrganizationIcon = (organization: Organization) => { if (organization.personal) { return } return } // personal first, then alphabetical const sortedOrganizations = useMemo(() => { return organizations.sort((a, b) => { if (a.personal && !b.personal) { return -1 } else if (!a.personal && b.personal) { return 1 } else { return a.name.localeCompare(b.name) } }) }, [organizations]) if (!optimisticSelectedOrganization) { return null } return (
{optimisticSelectedOrganization.name[0].toUpperCase()}
{optimisticSelectedOrganization.name}
{sortedOrganizations.map((org) => ( handleSelectOrganization(org.id)} className="cursor-pointer flex items-center gap-2" > {getOrganizationIcon(org)} {org.name} ))}
setShowCreateOrganizationDialog(true)} > Create Organization
) } ================================================ FILE: apps/dashboard/src/components/Organizations/SetDefaultRegionDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useState } from 'react' import { Region } from '@daytonaio/api-client' import { Button } from '@/components/ui/button' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Label } from '@/components/ui/label' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' interface SetDefaultRegionDialogProps { open: boolean onOpenChange: (open: boolean) => void regions: Region[] loadingRegions: boolean onSetDefaultRegion: (defaultRegionId: string) => Promise } export const SetDefaultRegionDialog: React.FC = ({ open, onOpenChange, regions, loadingRegions, onSetDefaultRegion, }) => { const [defaultRegionId, setDefaultRegionId] = useState(undefined) const [loading, setLoading] = useState(false) const handleSetDefaultRegion = async () => { if (!defaultRegionId) { return } setLoading(true) const success = await onSetDefaultRegion(defaultRegionId) // TODO: Return when we fix the selected org states // if (success) { // onOpenChange(false) // } // setLoading(false) } return ( { onOpenChange(isOpen) if (!isOpen) { setDefaultRegionId(undefined) } }} > Set Default Region Your organization needs a default region to create sandboxes and manage resources. {!loadingRegions && regions.length === 0 ? (

No regions available

Default region cannot be set because no regions are available.

) : (
{ e.preventDefault() await handleSetDefaultRegion() }} >
)} {loading ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/PageLayout.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { type ComponentProps } from 'react' import { BannerStack } from './Banner' import { SidebarTrigger } from './ui/sidebar' function PageLayout({ className, ...props }: ComponentProps<'div'>) { return
} function PageHeader({ className, children, ...props }: ComponentProps<'header'>) { return (
{children}
) } function PageTitle({ className, children, ...props }: ComponentProps<'h1'>) { return (

{children}

) } function PageDescription({ className, ...props }: ComponentProps<'p'>) { return

} function PageBanner({ className, children, ...props }: ComponentProps<'div'>) { return (

{children}
) } function PageContent({ className, size = 'default', ...props }: ComponentProps<'main'> & { size?: 'default' | 'full' }) { return ( <>
) } export { PageContent, PageDescription, PageHeader, PageLayout, PageTitle } ================================================ FILE: apps/dashboard/src/components/Pagination.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Table } from '@tanstack/react-table' import { ChevronLeft, ChevronRight, ChevronsLeft, ChevronsRight } from 'lucide-react' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from './ui/select' import { Button } from './ui/button' import { PAGE_SIZE_OPTIONS } from '../constants/Pagination' interface PaginationProps { table: Table selectionEnabled?: boolean className?: string entityName?: string totalItems?: number } export function Pagination({ table, selectionEnabled, className, entityName, totalItems, }: PaginationProps) { return (
{selectionEnabled ? (
{table.getFilteredSelectedRowModel().rows.length} of {totalItems ?? table.getFilteredRowModel().rows.length}{' '} item(s) selected.
) : (
{totalItems ?? table.getFilteredRowModel().rows.length} total item(s)
)}
Page {table.getState().pagination.pageIndex + 1} of {table.getPageCount() || 1}
) } ================================================ FILE: apps/dashboard/src/components/Playground/ActionForm.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Label } from '@/components/ui/label' import { PlaygroundActionFormDataBasic } from '@/contexts/PlaygroundContext' import { PlaygroundActions } from '@/enums/Playground' import { usePlayground } from '@/hooks/usePlayground' import PlaygroundActionRunButton from './ActionRunButton' type PlaygroundActionFormProps = { actionFormItem: PlaygroundActionFormDataBasic onRunActionClick?: () => Promise disable?: boolean hideRunActionButton?: boolean } function PlaygroundActionForm({ actionFormItem, onRunActionClick, disable, hideRunActionButton, }: PlaygroundActionFormProps) { const { runningActionMethod, actionRuntimeError } = usePlayground() return ( <>

{actionFormItem.description}

{!hideRunActionButton && ( )}
{actionRuntimeError[actionFormItem.methodName] && (

{actionRuntimeError[actionFormItem.methodName]}

)}
) } export default PlaygroundActionForm ================================================ FILE: apps/dashboard/src/components/Playground/ActionRunButton.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { Loader2, Play } from 'lucide-react' import TooltipButton from '../TooltipButton' type PlaygroundActionRunButtonProps = { isDisabled: boolean isRunning: boolean onRunActionClick?: () => Promise className?: string } const PlaygroundActionRunButton: React.FC = ({ isDisabled, isRunning, onRunActionClick, className, }) => { return ( {isRunning ? : } ) } export default PlaygroundActionRunButton ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/CheckboxInput.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Checkbox } from '@/components/ui/checkbox' import { ParameterFormItem } from '@/contexts/PlaygroundContext' type FormCheckboxInputProps = { checkedValue: boolean | undefined formItem: ParameterFormItem onChangeHandler: (checked: boolean) => void } const FormCheckboxInput: React.FC = ({ checkedValue, formItem, onChangeHandler }) => { return (
onChangeHandler(!!value)} />
) } export default FormCheckboxInput ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/InlineInputFormControl.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { ReactNode } from 'react' import { ParameterFormItem } from '@/contexts/PlaygroundContext' import InputLabel from './Label' type InlineInputFormControlProps = { formItem: ParameterFormItem children: ReactNode } const InlineInputFormControl: React.FC = ({ formItem, children }) => { return (
{children}
) } export default InlineInputFormControl ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/Label.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Label } from '@/components/ui/label' import { ParameterFormItem } from '@/contexts/PlaygroundContext' type InputLabelProps = { formItem: ParameterFormItem } const InputLabel: React.FC = ({ formItem }) => { return ( ) } export default InputLabel ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/NumberInput.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Input } from '@/components/ui/input' import { NumberParameterFormItem } from '@/contexts/PlaygroundContext' import React from 'react' type FormNumberInputProps = { numberValue: number | undefined numberFormItem: NumberParameterFormItem onChangeHandler: (value: number | undefined) => void disabled?: boolean } const FormNumberInput: React.FC = ({ numberValue, numberFormItem, onChangeHandler, disabled, }) => { return ( { const newValue = e.target.value ? Number(e.target.value) : undefined onChangeHandler(newValue) }} disabled={disabled} /> ) } export default FormNumberInput ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/SelectInput.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' import { ParameterFormItem } from '@/contexts/PlaygroundContext' import { Loader2 } from 'lucide-react' type SelectOption = { value: string label: string } type FormSelectInputProps = { selectOptions: SelectOption[] selectValue: string | undefined formItem: ParameterFormItem onChangeHandler: (value: string) => void loading?: boolean } const FormSelectInput: React.FC = ({ selectOptions, selectValue, formItem, onChangeHandler, loading, }) => { return ( ) } export default FormSelectInput ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/StackedInputFormControl.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { ReactNode } from 'react' import { ParameterFormItem } from '@/contexts/PlaygroundContext' import InputLabel from './Label' type StackedInputFormControlProps = { formItem: ParameterFormItem children: ReactNode } const StackedInputFormControl: React.FC = ({ formItem, children }) => { return (
{children}
) } export default StackedInputFormControl ================================================ FILE: apps/dashboard/src/components/Playground/Inputs/TextInput.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Input } from '@/components/ui/input' import { ParameterFormItem } from '@/contexts/PlaygroundContext' type FormTextInputProps = { textValue: string | undefined formItem: ParameterFormItem onChangeHandler: (value: string) => void } const FormTextInput: React.FC = ({ textValue, formItem, onChangeHandler }) => { return ( onChangeHandler(e.target.value)} /> ) } export default FormTextInput ================================================ FILE: apps/dashboard/src/components/Playground/PlaygroundLayout.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ScrollArea } from '@/components/ui/scroll-area' import { cn } from '@/lib/utils' function PlaygroundLayout({ children, className }: { children: React.ReactNode; className?: string }) { return (
{children}
) } function PlaygroundLayoutSidebar({ children }: { children: React.ReactNode }) { return (
{children}
) } function PlaygroundLayoutContent({ children, className }: { children: React.ReactNode; className?: string }) { return (
{children}
) } export { PlaygroundLayout, PlaygroundLayoutContent, PlaygroundLayoutSidebar } ================================================ FILE: apps/dashboard/src/components/Playground/ResponseCard.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ReactNode } from 'react' type ResponseCardProps = { responseContent: string | ReactNode } const ResponseCard: React.FC = ({ responseContent }) => { return (
        {typeof responseContent === 'string' ? {responseContent} : responseContent}
      
) } export default ResponseCard ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/CodeSnippets/index.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CodeLanguage } from '@daytonaio/sdk' import { PythonSnippetGenerator } from './python' import { CodeSnippetGenerator } from './types' import { TypeScriptSnippetGenerator } from './typescript' export const codeSnippetGenerators: Record, CodeSnippetGenerator> = { [CodeLanguage.PYTHON]: PythonSnippetGenerator, [CodeLanguage.TYPESCRIPT]: TypeScriptSnippetGenerator, } export type { CodeSnippetActionFlags, CodeSnippetGenerator, CodeSnippetParams } from './types' ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/CodeSnippets/python.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CodeSnippetGenerator } from './types' import { joinGroupedSections } from './utils' export const PythonSnippetGenerator: CodeSnippetGenerator = { getImports(p) { return ( [ 'from daytona import Daytona', p.actions.useConfigObject ? 'DaytonaConfig' : '', p.config.useSandboxCreateParams ? p.config.createSandboxFromSnapshot ? 'CreateSandboxFromSnapshotParams' : 'CreateSandboxFromImageParams' : '', p.config.useResources ? 'Resources' : '', p.config.createSandboxFromImage ? 'Image' : '', ] .filter(Boolean) .join(', ') + '\n' ) }, getConfig(p) { if (!p.actions.useConfigObject) return '' return ['\n# Define the configuration', 'config = DaytonaConfig()'].filter(Boolean).join('\n') + '\n' }, getClientInit(p) { return ['# Initialize the Daytona client', `daytona = Daytona(${p.actions.useConfigObject ? 'config' : ''})`] .filter(Boolean) .join('\n') }, getResources(p) { if (!p.config.useResources) return '' const ind = '\t' return [ '\n\n# Create a Sandbox with custom resources\nresources = Resources(', p.config.useResourcesCPU ? `${ind}cpu=${p.state['resources']['cpu']}, # ${p.state['resources']['cpu']} CPU cores` : '', p.config.useResourcesMemory ? `${ind}memory=${p.state['resources']['memory']}, # ${p.state['resources']['memory']}GB RAM` : '', p.config.useResourcesDisk ? `${ind}disk=${p.state['resources']['disk']}, # ${p.state['resources']['disk']}GB disk space` : '', ')', ] .filter(Boolean) .join('\n') }, getSandboxParams(p) { if (!p.config.useSandboxCreateParams) return '' const ind = '\t' return [ `\n\nparams = ${p.config.createSandboxFromSnapshot ? 'CreateSandboxFromSnapshotParams' : 'CreateSandboxFromImageParams'}(`, p.config.useCustomSandboxSnapshotName ? `${ind}snapshot="${p.state['snapshotName']}",` : '', p.config.createSandboxFromImage ? `${ind}image=Image.debian_slim("3.13"),` : '', p.config.useResources ? `${ind}resources=resources,` : '', p.config.useLanguageParam ? `${ind}language="${p.state['language']}",` : '', ...(p.config.createSandboxParamsExist ? [ p.config.useAutoStopInterval ? `${ind}auto_stop_interval=${p.state['createSandboxBaseParams']['autoStopInterval']}, # ${p.state['createSandboxBaseParams']['autoStopInterval'] == 0 ? 'Disables the auto-stop feature' : `Sandbox will be stopped after ${p.state['createSandboxBaseParams']['autoStopInterval']} minute${(p.state['createSandboxBaseParams']['autoStopInterval'] as number) > 1 ? 's' : ''}`}` : '', p.config.useAutoArchiveInterval ? `${ind}auto_archive_interval=${p.state['createSandboxBaseParams']['autoArchiveInterval']}, # Auto-archive after a Sandbox has been stopped for ${p.state['createSandboxBaseParams']['autoArchiveInterval'] == 0 ? '30 days' : `${p.state['createSandboxBaseParams']['autoArchiveInterval']} minutes`}` : '', p.config.useAutoDeleteInterval ? `${ind}auto_delete_interval=${p.state['createSandboxBaseParams']['autoDeleteInterval']}, # ${p.state['createSandboxBaseParams']['autoDeleteInterval'] == 0 ? 'Sandbox will be deleted immediately after stopping' : p.state['createSandboxBaseParams']['autoDeleteInterval'] == -1 ? 'Auto-delete functionality disabled' : `Auto-delete after a Sandbox has been stopped for ${p.state['createSandboxBaseParams']['autoDeleteInterval']} minutes`}` : '', ] : []), ')', ] .filter(Boolean) .join('\n') }, getSandboxCreate(p) { return [ '\n# Create the Sandbox instance', `sandbox = daytona.create(${p.config.useSandboxCreateParams ? 'params' : ''})`, 'print(f"Sandbox created:{sandbox.id}")', ].join('\n') }, getCodeRun(p) { if (!p.actions.codeToRunExists) return '' const ind = '\t' return [ '\n\n# Run code securely inside the Sandbox', 'codeRunResponse = sandbox.process.code_run(', `'''${p.state['codeRunParams'].languageCode}'''`, ')', 'if codeRunResponse.exit_code != 0:', `${ind}print(f"Error: {codeRunResponse.exit_code} {codeRunResponse.result}")`, 'else:', `${ind}print(codeRunResponse.result)`, ].join('\n') }, getShellRun(p) { if (!p.actions.shellCommandExists) return '' return [ '\n\n# Execute shell commands', `shellRunResponse = sandbox.process.exec("${p.state['shellCommandRunParams'].shellCommand}")`, 'print(shellRunResponse.result)', ].join('\n') }, getFileSystemOps(p) { const sections: string[] = [] const ind = '\t' if (p.actions.fileSystemCreateFolderParamsSet) { sections.push( [ '# Create folder with specific permissions', `sandbox.fs.create_folder("${p.state['createFolderParams'].folderDestinationPath}", "${p.state['createFolderParams'].permissions}")`, ].join('\n'), ) } if (p.actions.fileSystemListFilesLocationSet) { sections.push( [ '# List files in a directory', `files = sandbox.fs.list_files("${p.state['listFilesParams'].directoryPath}")`, 'for file in files:', `${ind}print(f"Name: {file.name}")`, `${ind}print(f"Is directory: {file.is_dir}")`, `${ind}print(f"Size: {file.size}")`, `${ind}print(f"Modified: {file.mod_time}")`, ].join('\n'), ) } if (p.actions.fileSystemDeleteFileRequiredParamsSet) { sections.push( [ `# Delete ${p.actions.useFileSystemDeleteFileRecursive ? 'directory' : 'file'}`, `sandbox.fs.delete_file("${p.state['deleteFileParams'].filePath}"${p.actions.useFileSystemDeleteFileRecursive ? ', True' : ''})`, ].join('\n'), ) } return joinGroupedSections(sections) }, getGitOps(p) { const sections: string[] = [] const ind = '\t' if (p.actions.gitCloneOperationRequiredParamsSet) { sections.push( [ '# Clone git repository', 'sandbox.git.clone(', `${ind}url="${p.state['gitCloneParams'].repositoryURL}",`, `${ind}path="${p.state['gitCloneParams'].cloneDestinationPath}",`, p.actions.useGitCloneBranch ? `${ind}branch="${p.state['gitCloneParams'].branchToClone}",` : '', p.actions.useGitCloneCommitId ? `${ind}commit_id="${p.state['gitCloneParams'].commitToClone}",` : '', p.actions.useGitCloneUsername ? `${ind}username="${p.state['gitCloneParams'].authUsername}",` : '', p.actions.useGitClonePassword ? `${ind}password="${p.state['gitCloneParams'].authPassword}"` : '', ')', ] .filter(Boolean) .join('\n'), ) } if (p.actions.gitStatusOperationLocationSet) { sections.push( [ '# Get repository status', `status = sandbox.git.status("${p.state['gitStatusParams'].repositoryPath}")`, 'print(f"Current branch: {status.current_branch}")', 'print(f"Commits ahead: {status.ahead}")', 'print(f"Commits behind: {status.behind}")', 'for file_status in status.file_status:', '\tprint(f"File: {file_status.name}")', ].join('\n'), ) } if (p.actions.gitBranchesOperationLocationSet) { sections.push( [ '# List branches', `branchesResponse = sandbox.git.branches("${p.state['gitBranchesParams'].repositoryPath}")`, 'for branch in branchesResponse.branches:', '\tprint(f"Branch: {branch}")', ].join('\n'), ) } return joinGroupedSections(sections) }, buildFullSnippet(p) { const imports = this.getImports(p) const config = this.getConfig(p) const client = this.getClientInit(p) const resources = this.getResources(p) const params = this.getSandboxParams(p) const create = this.getSandboxCreate(p) const codeRun = this.getCodeRun(p) const shell = this.getShellRun(p) const fsOps = this.getFileSystemOps(p) const gitOps = this.getGitOps(p) return `${imports}${config}\n${client}${resources}${params}\n${create}${fsOps}${gitOps}${codeRun}${shell}` }, } ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/CodeSnippets/types.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxParams, SandboxParametersInfo } from '@/contexts/PlaygroundContext' export interface CodeSnippetActionFlags { useConfigObject: boolean fileSystemListFilesLocationSet: boolean fileSystemCreateFolderParamsSet: boolean fileSystemDeleteFileRequiredParamsSet: boolean useFileSystemDeleteFileRecursive: boolean shellCommandExists: boolean codeToRunExists: boolean gitCloneOperationRequiredParamsSet: boolean useGitCloneBranch: boolean useGitCloneCommitId: boolean useGitCloneUsername: boolean useGitClonePassword: boolean gitStatusOperationLocationSet: boolean gitBranchesOperationLocationSet: boolean } export interface CodeSnippetParams { state: SandboxParams config: SandboxParametersInfo actions: CodeSnippetActionFlags } export interface CodeSnippetGenerator { getImports(p: CodeSnippetParams): string getConfig(p: CodeSnippetParams): string getClientInit(p: CodeSnippetParams): string getResources(p: CodeSnippetParams): string getSandboxParams(p: CodeSnippetParams): string getSandboxCreate(p: CodeSnippetParams): string getCodeRun(p: CodeSnippetParams): string getShellRun(p: CodeSnippetParams): string getFileSystemOps(p: CodeSnippetParams): string getGitOps(p: CodeSnippetParams): string buildFullSnippet(p: CodeSnippetParams): string } ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/CodeSnippets/typescript.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CodeSnippetGenerator } from './types' import { joinGroupedSections } from './utils' export const TypeScriptSnippetGenerator: CodeSnippetGenerator = { getImports(p) { return ( [ 'import { Daytona', p.actions.useConfigObject ? 'DaytonaConfig' : '', p.config.createSandboxFromImage ? 'Image' : '', ] .filter(Boolean) .join(', ') + " } from '@daytonaio/sdk'\n" ) }, getConfig(p) { if (!p.actions.useConfigObject) return '' return ['\n// Define the configuration', 'const config: DaytonaConfig = { }'].filter(Boolean).join('\n') + '\n' }, getClientInit(p) { return [ '\t// Initialize the Daytona client', `\tconst daytona = new Daytona(${p.actions.useConfigObject ? 'config' : ''})`, ] .filter(Boolean) .join('\n') }, getResources(p) { if (!p.config.useResources) return '' const ind = '\t\t\t\t' return [ `${ind.slice(0, -1)}resources: {`, p.config.useResourcesCPU ? `${ind}cpu: ${p.state['resources']['cpu']}, // ${p.state['resources']['cpu']} CPU cores` : '', p.config.useResourcesMemory ? `${ind}memory: ${p.state['resources']['memory']}, // ${p.state['resources']['memory']}GB RAM` : '', p.config.useResourcesDisk ? `${ind}disk: ${p.state['resources']['disk']}, // ${p.state['resources']['disk']}GB disk space` : '', `${ind.slice(0, -1)}}`, ] .filter(Boolean) .join('\n') }, getSandboxParams(p) { if (!p.config.useSandboxCreateParams) return '' const ind = '\t\t\t' return [ `{`, p.config.useCustomSandboxSnapshotName ? `${ind}snapshot: '${p.state['snapshotName']}',` : '', p.config.createSandboxFromImage ? `${ind}image: Image.debianSlim("3.13"),` : '', this.getResources(p), p.config.useLanguageParam ? `${ind}language: '${p.state['language']}',` : '', ...(p.config.createSandboxParamsExist ? [ p.config.useAutoStopInterval ? `${ind}autoStopInterval: ${p.state['createSandboxBaseParams']['autoStopInterval']}, // ${p.state['createSandboxBaseParams']['autoStopInterval'] == 0 ? 'Disables the auto-stop feature' : `Sandbox will be stopped after ${p.state['createSandboxBaseParams']['autoStopInterval']} minute${(p.state['createSandboxBaseParams']['autoStopInterval'] as number) > 1 ? 's' : ''}`}` : '', p.config.useAutoArchiveInterval ? `${ind}autoArchiveInterval: ${p.state['createSandboxBaseParams']['autoArchiveInterval']}, // Auto-archive after a Sandbox has been stopped for ${p.state['createSandboxBaseParams']['autoArchiveInterval'] == 0 ? '30 days' : `${p.state['createSandboxBaseParams']['autoArchiveInterval']} minutes`}` : '', p.config.useAutoDeleteInterval ? `${ind}autoDeleteInterval: ${p.state['createSandboxBaseParams']['autoDeleteInterval']}, // ${p.state['createSandboxBaseParams']['autoDeleteInterval'] == 0 ? 'Sandbox will be deleted immediately after stopping' : p.state['createSandboxBaseParams']['autoDeleteInterval'] == -1 ? 'Auto-delete functionality disabled' : `Auto-delete after a Sandbox has been stopped for ${p.state['createSandboxBaseParams']['autoDeleteInterval']} minutes`}` : '', ] : []), `${ind.slice(0, -1)}}`, ] .filter(Boolean) .join('\n') }, getSandboxCreate(p) { return [ '\t\t// Create the Sandbox instance', `\t\tconst sandbox = await daytona.create(${p.config.useSandboxCreateParams ? this.getSandboxParams(p) : ''})`, ].join('\n') }, getCodeRun(p) { if (!p.actions.codeToRunExists) return '' const ind = '\t\t' return [ `\n\n${ind}// Run code securely inside the Sandbox`, `${ind}const codeRunResponse = await sandbox.process.codeRun(\``, `${(p.state['codeRunParams'].languageCode ?? '').replace(/`/g, '\\`').replace(/\$\{/g, '\\${')}`, // Escape backticks and ${ to prevent breaking the template literal `${ind}\`)`, `${ind}if (codeRunResponse.exitCode !== 0) {`, `${ind + '\t'}console.error("Error running code:", codeRunResponse.exitCode, codeRunResponse.result)`, `${ind}} else {`, `${ind + '\t'}console.log(codeRunResponse.result)`, `${ind}}`, ].join('\n') }, getShellRun(p) { if (!p.actions.shellCommandExists) return '' const ind = '\t\t' return [ `\n\n${ind}// Execute shell commands`, `${ind}const shellRunResponse = await sandbox.process.executeCommand('${p.state['shellCommandRunParams'].shellCommand}')`, `${ind}console.log(shellRunResponse.result)`, ].join('\n') }, getFileSystemOps(p) { const sections: string[] = [] const ind = '\t\t\t' const base = ind.slice(0, -1) if (p.actions.fileSystemCreateFolderParamsSet) { sections.push( [ `${base}// Create folder with specific permissions`, `${base}await sandbox.fs.createFolder("${p.state['createFolderParams'].folderDestinationPath}", "${p.state['createFolderParams'].permissions}")`, ].join('\n'), ) } if (p.actions.fileSystemListFilesLocationSet) { sections.push( [ `${base}// List files in a directory`, `${base}const files = await sandbox.fs.listFiles("${p.state['listFilesParams'].directoryPath}")`, `${base}files.forEach(file => {`, `${ind}console.log(\`Name: \${file.name}\`)`, `${ind}console.log(\`Is directory: \${file.isDir}\`)`, `${ind}console.log(\`Size: \${file.size}\`)`, `${ind}console.log(\`Modified: \${file.modTime}\`)`, `${base}})`, ].join('\n'), ) } if (p.actions.fileSystemDeleteFileRequiredParamsSet) { sections.push( [ `${base}// Delete ${p.actions.useFileSystemDeleteFileRecursive ? 'directory' : 'file'}`, `${base}await sandbox.fs.deleteFile("${p.state['deleteFileParams'].filePath}"${p.actions.useFileSystemDeleteFileRecursive ? ', true' : ''})`, ].join('\n'), ) } return joinGroupedSections(sections) }, getGitOps(p) { const sections: string[] = [] const ind = '\t\t\t' const base = ind.slice(0, -1) if (p.actions.gitCloneOperationRequiredParamsSet) { sections.push( [ `${base}// Clone git repository`, `${base}await sandbox.git.clone(`, `${ind}"${p.state['gitCloneParams'].repositoryURL}",`, `${ind}"${p.state['gitCloneParams'].cloneDestinationPath}",`, p.actions.useGitCloneBranch ? `${ind}"${p.state['gitCloneParams'].branchToClone}",` : '', p.actions.useGitCloneCommitId ? `${ind}"${p.state['gitCloneParams'].commitToClone}",` : '', p.actions.useGitCloneUsername ? `${ind}"${p.state['gitCloneParams'].authUsername}",` : '', p.actions.useGitClonePassword ? `${ind}"${p.state['gitCloneParams'].authPassword}"` : '', `${base})`, ] .filter(Boolean) .join('\n'), ) } if (p.actions.gitStatusOperationLocationSet) { sections.push( [ `${base}// Get repository status`, `${base}const status = await sandbox.git.status("${p.state['gitStatusParams'].repositoryPath}")`, `${base}console.log(\`Current branch: \${status.currentBranch}\`)`, `${base}console.log(\`Commits ahead: \${status.ahead}\`)`, `${base}console.log(\`Commits behind: \${status.behind}\`)`, `${base}status.fileStatus.forEach(file => {`, `${ind}console.log(\`File: \${file.name}\`)`, `${base}})`, ].join('\n'), ) } if (p.actions.gitBranchesOperationLocationSet) { sections.push( [ `${base}// List branches`, `${base}const branchesResponse = await sandbox.git.branches("${p.state['gitBranchesParams'].repositoryPath}")`, `${base}branchesResponse.branches.forEach(branch => {`, `${ind}console.log(\`Branch: \${branch}\`)`, `${base}})`, ].join('\n'), ) } return joinGroupedSections(sections) }, buildFullSnippet(p) { const imports = this.getImports(p) const config = this.getConfig(p) const client = this.getClientInit(p) const create = this.getSandboxCreate(p) const codeRun = this.getCodeRun(p) const shell = this.getShellRun(p) const fsOps = this.getFileSystemOps(p) const gitOps = this.getGitOps(p) return `${imports}${config} async function main() { ${client} \ttry { ${create}${fsOps}${gitOps}${codeRun}${shell} \t} catch (error) { \t\tconsole.error("Sandbox flow error:", error) \t} } main().catch(console.error)` }, } ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/CodeSnippets/utils.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ /** * Joins grouped code snippet sections with consistent spacing. * Each non-empty section gets a `\n\n` prefix, producing a blank line between sections. */ export function joinGroupedSections(sections: string[]): string { const nonEmpty = sections.filter(Boolean) if (nonEmpty.length === 0) return '' return nonEmpty.map((section) => '\n\n' + section).join('') } ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/CodeSnippetsResponse.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import PythonIcon from '@/assets/python.svg' import TypescriptIcon from '@/assets/typescript.svg' import CodeBlock from '@/components/CodeBlock' import { CopyButton } from '@/components/CopyButton' import TooltipButton from '@/components/TooltipButton' import { Button } from '@/components/ui/button' import { ScrollArea } from '@/components/ui/scroll-area' import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs' import { FileSystemActions, GitOperationsActions, ProcessCodeExecutionActions, SandboxParametersSections, } from '@/enums/Playground' import { usePlayground } from '@/hooks/usePlayground' import { usePlaygroundSandbox } from '@/hooks/usePlaygroundSandbox' import { createErrorMessageOutput } from '@/lib/playground' import { cn } from '@/lib/utils' import { CodeLanguage, Sandbox } from '@daytonaio/sdk' import { ChevronUpIcon, Loader2, PanelBottom, Play, XIcon } from 'lucide-react' import { ReactNode, useCallback, useEffect, useMemo, useRef, useState } from 'react' import { Group, Panel, usePanelRef } from 'react-resizable-panels' import ResponseCard from '../ResponseCard' import { Window, WindowContent, WindowTitleBar } from '../Window' import { codeSnippetGenerators, CodeSnippetParams } from './CodeSnippets' const codeSnippetSupportedLanguages = [ { value: CodeLanguage.PYTHON, label: 'Python', icon: PythonIcon }, { value: CodeLanguage.TYPESCRIPT, label: 'TypeScript', icon: TypescriptIcon }, ] as const const SECTION_SCROLL_MARKERS: Partial> = { [SandboxParametersSections.FILE_SYSTEM]: [ '# Create folder', '# List files', '# Delete', '// Create folder', '// List files', '// Delete', ], [SandboxParametersSections.GIT_OPERATIONS]: [ '# Clone git', '# Get repository', '# List branches', '// Clone git', '// Get repository', '// List branches', ], [SandboxParametersSections.PROCESS_CODE_EXECUTION]: [ '# Run code securely', '# Execute shell', '// Run code securely', '// Execute shell', ], } const SandboxCodeSnippetsResponse = ({ className }: { className?: string }) => { const [codeSnippetLanguage, setCodeSnippetLanguage] = useState(CodeLanguage.PYTHON) const [codeSnippetOutput, setCodeSnippetOutput] = useState('') const [isCodeSnippetRunning, setIsCodeSnippetRunning] = useState(false) const { sandboxParametersState, actionRuntimeError, getSandboxParametersInfo, enabledSections, pendingScrollSection, clearPendingScrollSection, } = usePlayground() const { sandbox: { create: createSandbox }, } = usePlaygroundSandbox() const useConfigObject = false // Currently not needed, we use jwtToken for client config const fsOn = enabledSections.includes(SandboxParametersSections.FILE_SYSTEM) const gitOn = enabledSections.includes(SandboxParametersSections.GIT_OPERATIONS) const procOn = enabledSections.includes(SandboxParametersSections.PROCESS_CODE_EXECUTION) const fileSystemListFilesLocationSet = fsOn && !actionRuntimeError[FileSystemActions.LIST_FILES] const fileSystemCreateFolderParamsSet = fsOn && !actionRuntimeError[FileSystemActions.CREATE_FOLDER] const fileSystemDeleteFileRequiredParamsSet = fsOn && !actionRuntimeError[FileSystemActions.DELETE_FILE] const useFileSystemDeleteFileRecursive = fileSystemDeleteFileRequiredParamsSet && sandboxParametersState['deleteFileParams'].recursive === true const shellCommandExists = procOn && !actionRuntimeError[ProcessCodeExecutionActions.SHELL_COMMANDS_RUN] const codeToRunExists = procOn && !actionRuntimeError[ProcessCodeExecutionActions.CODE_RUN] const gitCloneOperationRequiredParamsSet = gitOn && !actionRuntimeError[GitOperationsActions.GIT_CLONE] const useGitCloneBranch = !!sandboxParametersState['gitCloneParams'].branchToClone const useGitCloneCommitId = !!sandboxParametersState['gitCloneParams'].commitToClone const useGitCloneUsername = !!sandboxParametersState['gitCloneParams'].authUsername const useGitClonePassword = !!sandboxParametersState['gitCloneParams'].authPassword const gitStatusOperationLocationSet = gitOn && !actionRuntimeError[GitOperationsActions.GIT_STATUS] const gitBranchesOperationLocationSet = gitOn && !actionRuntimeError[GitOperationsActions.GIT_BRANCHES_LIST] const codeScrollRef = useRef(null) const highlightTimersRef = useRef[]>([]) const scrollToSection = useCallback((section: SandboxParametersSections) => { const viewport = codeScrollRef.current?.querySelector('[data-slot=scroll-area-viewport]') if (!viewport) return const markers = SECTION_SCROLL_MARKERS[section] if (!markers?.length) return const walker = document.createTreeWalker(viewport, NodeFilter.SHOW_TEXT) let node: Text | null while ((node = walker.nextNode() as Text | null)) { const text = node.textContent?.trim() ?? '' if (!markers.some((m) => text.startsWith(m))) continue const span = node.parentElement if (!span) continue const el = (span.closest('[class*="line"]') as HTMLElement | null) ?? span const viewportRect = viewport.getBoundingClientRect() viewport.scrollTo({ top: viewport.scrollTop + el.getBoundingClientRect().top - viewportRect.top - 32, behavior: 'smooth', }) highlightTimersRef.current.forEach(clearTimeout) el.style.backgroundColor = 'rgba(34, 197, 94, 0.2)' el.style.borderRadius = '3px' highlightTimersRef.current = [ setTimeout(() => { el.style.transition = 'background-color 1.5s ease-out' el.style.backgroundColor = 'rgba(34, 197, 94, 0)' }, 500), setTimeout(() => { el.style.backgroundColor = '' el.style.transition = '' el.style.borderRadius = '' }, 2100), ] return } }, []) useEffect(() => { if (!pendingScrollSection) return requestAnimationFrame(() => { scrollToSection(pendingScrollSection) clearPendingScrollSection() }) }, [pendingScrollSection, scrollToSection, clearPendingScrollSection]) const codeSnippetParams = useMemo( () => ({ state: sandboxParametersState, config: getSandboxParametersInfo(), actions: { useConfigObject, fileSystemListFilesLocationSet, fileSystemCreateFolderParamsSet, fileSystemDeleteFileRequiredParamsSet, useFileSystemDeleteFileRecursive, shellCommandExists, codeToRunExists, gitCloneOperationRequiredParamsSet, useGitCloneBranch, useGitCloneCommitId, useGitCloneUsername, useGitClonePassword, gitStatusOperationLocationSet, gitBranchesOperationLocationSet, }, }), [ sandboxParametersState, getSandboxParametersInfo, useConfigObject, fileSystemListFilesLocationSet, fileSystemCreateFolderParamsSet, fileSystemDeleteFileRequiredParamsSet, useFileSystemDeleteFileRecursive, shellCommandExists, codeToRunExists, gitCloneOperationRequiredParamsSet, useGitCloneBranch, useGitCloneCommitId, useGitCloneUsername, useGitClonePassword, gitStatusOperationLocationSet, gitBranchesOperationLocationSet, ], ) const sandboxCodeSnippetsData = useMemo( () => ({ [CodeLanguage.PYTHON]: { code: codeSnippetGenerators[CodeLanguage.PYTHON].buildFullSnippet(codeSnippetParams) }, [CodeLanguage.TYPESCRIPT]: { code: codeSnippetGenerators[CodeLanguage.TYPESCRIPT].buildFullSnippet(codeSnippetParams), }, }), [codeSnippetParams], ) const runCodeSnippet = async () => { setIsCodeSnippetRunning(true) let codeSnippetOutput = 'Creating sandbox...\n' setCodeSnippetOutput(codeSnippetOutput) let sandbox: Sandbox | undefined try { sandbox = await createSandbox() codeSnippetOutput = `Sandbox successfully created: ${sandbox.id}\n` setCodeSnippetOutput(codeSnippetOutput) if (codeToRunExists) { setCodeSnippetOutput(codeSnippetOutput + '\nRunning code...') const codeRunResponse = await sandbox.process.codeRun( sandboxParametersState['codeRunParams'].languageCode as string, ) // codeToRunExists guarantees that value isn't undefined so we put as string to silence TS compiler codeSnippetOutput += `\nCode run result: ${codeRunResponse.result}` setCodeSnippetOutput(codeSnippetOutput) } if (shellCommandExists) { setCodeSnippetOutput(codeSnippetOutput + '\nRunning shell command...') const shellCommandResponse = await sandbox.process.executeCommand( sandboxParametersState['shellCommandRunParams'].shellCommand as string, // shellCommandExists guarantees that value isn't undefined so we put as string to silence TS compiler ) codeSnippetOutput += `\nShell command result: ${shellCommandResponse.result}` setCodeSnippetOutput(codeSnippetOutput) } let codeRunShellCommandFinishedMessage = '\n' if (codeToRunExists && shellCommandExists) { codeRunShellCommandFinishedMessage += '🎉 Code and shell command executed successfully.' } else if (codeToRunExists) { codeRunShellCommandFinishedMessage += '🎉 Code executed successfully.' } else if (shellCommandExists) { codeRunShellCommandFinishedMessage += '🎉 Shell command executed successfully.' } codeSnippetOutput += codeRunShellCommandFinishedMessage + '\n' setCodeSnippetOutput(codeSnippetOutput) if (fileSystemCreateFolderParamsSet) { setCodeSnippetOutput(codeSnippetOutput + '\nCreating directory...') await sandbox.fs.createFolder( sandboxParametersState['createFolderParams'].folderDestinationPath, sandboxParametersState['createFolderParams'].permissions, ) codeSnippetOutput += '\n🎉 Directory created successfully.\n' setCodeSnippetOutput(codeSnippetOutput) } if (fileSystemListFilesLocationSet) { setCodeSnippetOutput(codeSnippetOutput + '\nListing directory files...') const files = await sandbox.fs.listFiles(sandboxParametersState['listFilesParams'].directoryPath) codeSnippetOutput += '\nDirectory content:' codeSnippetOutput += '\n' files.forEach((file) => { codeSnippetOutput += `Name: ${file.name}\n` codeSnippetOutput += `Is directory: ${file.isDir}\n` codeSnippetOutput += `Size: ${file.size}\n` codeSnippetOutput += `Modified: ${file.modTime}\n` }) setCodeSnippetOutput(codeSnippetOutput) } if (fileSystemDeleteFileRequiredParamsSet) { setCodeSnippetOutput( codeSnippetOutput + `\nDeleting ${useFileSystemDeleteFileRecursive ? 'directory' : 'file'}...`, ) await sandbox.fs.deleteFile( sandboxParametersState['deleteFileParams'].filePath, useFileSystemDeleteFileRecursive || false, ) codeSnippetOutput += `\n🎉 ${useFileSystemDeleteFileRecursive ? 'Directory' : 'File'} deleted successfully.\n` setCodeSnippetOutput(codeSnippetOutput) } if (gitCloneOperationRequiredParamsSet) { setCodeSnippetOutput(codeSnippetOutput + '\nCloning repo...') await sandbox.git.clone( sandboxParametersState['gitCloneParams'].repositoryURL, sandboxParametersState['gitCloneParams'].cloneDestinationPath, useGitCloneBranch ? sandboxParametersState['gitCloneParams'].branchToClone : undefined, useGitCloneCommitId ? sandboxParametersState['gitCloneParams'].commitToClone : undefined, useGitCloneUsername ? sandboxParametersState['gitCloneParams'].authUsername : undefined, useGitClonePassword ? sandboxParametersState['gitCloneParams'].authPassword : undefined, ) codeSnippetOutput += '\n🎉 Repository cloned successfully.\n' setCodeSnippetOutput(codeSnippetOutput) } if (gitStatusOperationLocationSet) { setCodeSnippetOutput(codeSnippetOutput + '\nFetching repository status...') const status = await sandbox.git.status(sandboxParametersState['gitStatusParams'].repositoryPath) codeSnippetOutput += `\nCurrent branch: ${status.currentBranch}\n` codeSnippetOutput += `Commits ahead: ${status.ahead}\n` codeSnippetOutput += `Commits behind: ${status.behind}\n` status.fileStatus.forEach((file) => (codeSnippetOutput += `File: ${file.name}\n`)) setCodeSnippetOutput(codeSnippetOutput) } if (gitBranchesOperationLocationSet) { setCodeSnippetOutput(codeSnippetOutput + '\nFetching repository branches...') const response = await sandbox.git.branches(sandboxParametersState['gitBranchesParams'].repositoryPath) codeSnippetOutput += '\n' response.branches.forEach((branch) => (codeSnippetOutput += `Branch: ${branch}\n`)) setCodeSnippetOutput(codeSnippetOutput) } setCodeSnippetOutput(codeSnippetOutput + '\nSandbox session finished.') } catch (error) { console.error(error) setCodeSnippetOutput( <> {codeSnippetOutput}
{createErrorMessageOutput(error)} , ) } finally { setIsCodeSnippetRunning(false) } } const resultPanelRef = usePanelRef() return ( Sandbox Code setCodeSnippetLanguage(languageValue as CodeLanguage)} >
{codeSnippetSupportedLanguages.map((language) => (
{`${language.label} {language.label}
))}
{ if (resultPanelRef.current?.isCollapsed()) { resultPanelRef.current.resize('20%') } else { resultPanelRef.current?.collapse() } }} >
{codeSnippetSupportedLanguages.map((language) => ( ))}
Result
resultPanelRef.current?.resize('80%')} tooltipText="Maximize" className="h-6 w-6" size="sm" variant="ghost" > resultPanelRef.current?.collapse()} >
Code output will be shown here...
) } />
) } export default SandboxCodeSnippetsResponse ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/Parameters/FileSystem.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CreateFolderParams, DeleteFileParams, FileSystemActionFormData, ListFilesParams, ParameterFormData, ParameterFormItem, } from '@/contexts/PlaygroundContext' import { FileSystemActions } from '@/enums/Playground' import { usePlayground } from '@/hooks/usePlayground' import PlaygroundActionForm from '../../ActionForm' import FormCheckboxInput from '../../Inputs/CheckboxInput' import InlineInputFormControl from '../../Inputs/InlineInputFormControl' import FormTextInput from '../../Inputs/TextInput' const SandboxFileSystem: React.FC = () => { const { sandboxParametersState, playgroundActionParamValueSetter } = usePlayground() const createFolderParams = sandboxParametersState['createFolderParams'] const listFilesParams = sandboxParametersState['listFilesParams'] const deleteFileParams = sandboxParametersState['deleteFileParams'] const listFilesDirectoryFormData: ParameterFormItem & { key: 'directoryPath' } = { label: 'Directory location', key: 'directoryPath', placeholder: 'Directory path to list', required: true, } const createFolderParamsFormData: ParameterFormData = [ { label: 'Folder location', key: 'folderDestinationPath', placeholder: 'Path where the directory should be created', required: true, }, { label: 'Permissions', key: 'permissions', placeholder: 'Directory permissions in octal format (e.g. "755")', required: true, }, ] const deleteFileLocationFormData: ParameterFormItem & { key: 'filePath' } = { label: 'File location', key: 'filePath', placeholder: 'Path to the file or directory to delete', required: true, } const deleteFileRecursiveFormData: ParameterFormItem & { key: 'recursive' } = { label: 'Delete directory', key: 'recursive', placeholder: 'If the file is a directory, this must be true to delete it.', } const fileSystemActionsFormData: FileSystemActionFormData[] = [ { methodName: FileSystemActions.CREATE_FOLDER, label: 'createFolder()', description: 'Creates a new directory in the Sandbox at the specified path with the given permissions', parametersFormItems: createFolderParamsFormData, parametersState: createFolderParams, }, { methodName: FileSystemActions.LIST_FILES, label: 'listFiles()', description: 'Lists files and directories in a given path and returns their information', parametersFormItems: [listFilesDirectoryFormData], parametersState: listFilesParams, }, { methodName: FileSystemActions.DELETE_FILE, label: 'deleteFile()', description: 'Deletes a file from the Sandbox', parametersFormItems: [deleteFileLocationFormData, deleteFileRecursiveFormData], parametersState: deleteFileParams, }, ] return (
{fileSystemActionsFormData.map((fileSystemAction) => (
actionFormItem={fileSystemAction} hideRunActionButton />
{fileSystemAction.methodName === FileSystemActions.LIST_FILES && ( playgroundActionParamValueSetter( fileSystemAction, listFilesDirectoryFormData, 'listFilesParams', value, ) } /> )} {fileSystemAction.methodName === FileSystemActions.CREATE_FOLDER && ( <> {createFolderParamsFormData.map((createFolderParamFormItem) => ( playgroundActionParamValueSetter( fileSystemAction, createFolderParamFormItem, 'createFolderParams', value, ) } /> ))} )} {fileSystemAction.methodName === FileSystemActions.DELETE_FILE && ( <> playgroundActionParamValueSetter( fileSystemAction, deleteFileLocationFormData, 'deleteFileParams', value, ) } /> playgroundActionParamValueSetter( fileSystemAction, deleteFileRecursiveFormData, 'deleteFileParams', checked, ) } /> )}
))}
) } export default SandboxFileSystem ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/Parameters/GitOperations.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { GitBranchesParams, GitCloneParams, GitOperationsActionFormData, GitStatusParams, ParameterFormData, ParameterFormItem, } from '@/contexts/PlaygroundContext' import { GitOperationsActions } from '@/enums/Playground' import { usePlayground } from '@/hooks/usePlayground' import PlaygroundActionForm from '../../ActionForm' import InlineInputFormControl from '../../Inputs/InlineInputFormControl' import FormTextInput from '../../Inputs/TextInput' const SandboxGitOperations: React.FC = () => { const { sandboxParametersState, playgroundActionParamValueSetter } = usePlayground() const gitCloneParams = sandboxParametersState['gitCloneParams'] const gitStatusParams = sandboxParametersState['gitStatusParams'] const gitBranchesParams = sandboxParametersState['gitBranchesParams'] const gitCloneParamsFormData: ParameterFormData = [ { label: 'URL', key: 'repositoryURL', placeholder: 'Repository URL to clone from', required: true }, { label: 'Destination', key: 'cloneDestinationPath', placeholder: 'Path where the repository should be cloned', required: true, }, { label: 'Branch', key: 'branchToClone', placeholder: 'Specific branch to clone' }, { label: 'Commit', key: 'commitToClone', placeholder: 'Specific commit to clone' }, { label: 'Username', key: 'authUsername', placeholder: 'Git username for authentication' }, { label: 'Password', key: 'authPassword', placeholder: 'Git password or token for authentication' }, ] const gitRepoLocationFormData: ParameterFormItem & { key: 'repositoryPath' } = { label: 'Repo location', key: 'repositoryPath', placeholder: 'Path to the Git repository root', required: true, } const gitOperationsActionsFormData: GitOperationsActionFormData< GitCloneParams | GitStatusParams | GitBranchesParams >[] = [ { methodName: GitOperationsActions.GIT_CLONE, label: 'clone()', description: 'Clones a Git repository into the specified path', parametersFormItems: gitCloneParamsFormData, parametersState: gitCloneParams, }, { methodName: GitOperationsActions.GIT_STATUS, label: 'status()', description: 'Gets the current Git repository status', parametersFormItems: [gitRepoLocationFormData], parametersState: gitStatusParams, }, { methodName: GitOperationsActions.GIT_BRANCHES_LIST, label: 'branches()', description: 'Lists branches in the repository', parametersFormItems: [gitRepoLocationFormData], parametersState: gitBranchesParams, }, ] return (
{gitOperationsActionsFormData.map((gitOperationsAction) => (
actionFormItem={gitOperationsAction} hideRunActionButton />
{gitOperationsAction.methodName === GitOperationsActions.GIT_CLONE && ( <> {gitCloneParamsFormData.map((gitCloneParamFormItem) => ( playgroundActionParamValueSetter( gitOperationsAction, gitCloneParamFormItem, 'gitCloneParams', value, ) } /> ))} )} {gitOperationsAction.methodName === GitOperationsActions.GIT_STATUS && ( playgroundActionParamValueSetter( gitOperationsAction, gitRepoLocationFormData, 'gitStatusParams', value, ) } /> )} {gitOperationsAction.methodName === GitOperationsActions.GIT_BRANCHES_LIST && ( playgroundActionParamValueSetter( gitOperationsAction, gitRepoLocationFormData, 'gitBranchesParams', value, ) } /> )}
))}
) } export default SandboxGitOperations ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/Parameters/Management.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Tooltip } from '@/components/Tooltip' import { Label } from '@/components/ui/label' import { SANDBOX_SNAPSHOT_DEFAULT_VALUE } from '@/constants/Playground' import { NumberParameterFormItem, ParameterFormItem } from '@/contexts/PlaygroundContext' import { usePlayground } from '@/hooks/usePlayground' import { getLanguageCodeToRun } from '@/lib/playground' import { SnapshotDto } from '@daytonaio/api-client' import { CodeLanguage, Resources } from '@daytonaio/sdk' import { HelpCircleIcon } from 'lucide-react' import InlineInputFormControl from '../../Inputs/InlineInputFormControl' import FormNumberInput from '../../Inputs/NumberInput' import FormSelectInput from '../../Inputs/SelectInput' import StackedInputFormControl from '../../Inputs/StackedInputFormControl' import { useEffect } from 'react' // TODO - Currently, snapshot selection is not supported in the Playground, so props are hardcoded to an empty array and false for loading. We keep snapshot parts commented to enable it in future if requested by users. Also, sandbox creation and code snippet generation suppoort snapshot selection, so they will work when snapshot selection is enabled in the UI without requiring any additional changes. Currently, the snapshot value is fixed to 'Default' type SandboxManagementParametersProps = { snapshotsData: Array snapshotsLoading: boolean } const SandboxManagementParameters: React.FC = ({ snapshotsData, snapshotsLoading, }) => { const { sandboxParametersState, setSandboxParameterValue } = usePlayground() const sandboxLanguage = sandboxParametersState['language'] const sandboxSnapshotName = sandboxParametersState['snapshotName'] const resources = sandboxParametersState['resources'] const sandboxFromImageParams = sandboxParametersState['createSandboxBaseParams'] const languageFormData: ParameterFormItem = { label: 'Language', key: 'language', placeholder: 'Select sandbox language', } // const sandboxSnapshotFormData: ParameterFormItem = { // label: 'Snapshot', // key: 'snapshotName', // placeholder: 'Select sandbox snapshot', // } // Available languages const languageOptions = [ { value: CodeLanguage.PYTHON, label: 'Python (default)', }, { value: CodeLanguage.TYPESCRIPT, label: 'TypeScript', }, { value: CodeLanguage.JAVASCRIPT, label: 'JavaScript', }, ] const resourcesFormData: (NumberParameterFormItem & { key: keyof Resources })[] = [ { label: 'Compute (vCPU)', key: 'cpu', min: 1, max: Infinity, placeholder: '1' }, { label: 'Memory (GiB)', key: 'memory', min: 1, max: Infinity, placeholder: '1' }, { label: 'Storage (GiB)', key: 'disk', min: 1, max: Infinity, placeholder: '3' }, ] const lifecycleParamsFormData: (NumberParameterFormItem & { key: 'autoStopInterval' | 'autoArchiveInterval' | 'autoDeleteInterval' })[] = [ { label: 'Stop (min)', key: 'autoStopInterval', min: 0, max: Infinity, placeholder: '15' }, { label: 'Archive (min)', key: 'autoArchiveInterval', min: 0, max: Infinity, placeholder: '7' }, { label: 'Delete (min)', key: 'autoDeleteInterval', min: -1, max: Infinity, placeholder: '' }, ] // Change code to run based on selected sandbox language useEffect(() => { setSandboxParameterValue('codeRunParams', { languageCode: getLanguageCodeToRun(sandboxParametersState.language), }) }, [sandboxParametersState.language, setSandboxParameterValue]) const nonDefaultSnapshotSelected = sandboxSnapshotName && sandboxSnapshotName !== SANDBOX_SNAPSHOT_DEFAULT_VALUE return ( <> { setSandboxParameterValue(languageFormData.key as 'language', value as CodeLanguage) }} /> {/* ({ value: snapshot.name, label: snapshot.name, })), ]} loading={snapshotsLoading} selectValue={sandboxSnapshotName} formItem={sandboxSnapshotFormData} onChangeHandler={(snapshotName) => { setSandboxParameterValue(sandboxSnapshotFormData.key as 'snapshotName', snapshotName) }} /> */}
{nonDefaultSnapshotSelected && ( Resources cannot be modified when a non-default snapshot is selected.
} label={ } /> )}
{resourcesFormData.map((resourceParamFormItem) => ( { setSandboxParameterValue('resources', { ...resources, [resourceParamFormItem.key]: value }) }} /> ))}
{lifecycleParamsFormData.map((lifecycleParamFormItem) => ( { setSandboxParameterValue('createSandboxBaseParams', { ...sandboxFromImageParams, [lifecycleParamFormItem.key]: value, }) }} /> ))}
) } export default SandboxManagementParameters ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/Parameters/ProcessCodeExecution.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import CodeBlock from '@/components/CodeBlock' import { CodeRunParams, ParameterFormItem, ProcessCodeExecutionOperationsActionFormData, ShellCommandRunParams, } from '@/contexts/PlaygroundContext' import { ProcessCodeExecutionActions } from '@/enums/Playground' import { usePlayground } from '@/hooks/usePlayground' import { CodeLanguage } from '@daytonaio/sdk' import PlaygroundActionForm from '../../ActionForm' import StackedInputFormControl from '../../Inputs/StackedInputFormControl' const SandboxProcessCodeExecution: React.FC = () => { const { sandboxParametersState, setSandboxParameterValue } = usePlayground() const codeRunParams = sandboxParametersState['codeRunParams'] const shellCommandRunParams = sandboxParametersState['shellCommandRunParams'] const codeRunLanguageCodeFormData: ParameterFormItem & { key: 'languageCode' } = { label: 'Code to execute', key: 'languageCode', placeholder: 'Write the code you want to execute inside the sandbox', required: true, } const shellCommandFormData: ParameterFormItem & { key: 'shellCommand' } = { label: 'Shell command', key: 'shellCommand', placeholder: 'Enter a shell command to run inside the sandbox', required: true, } const processCodeExecutionActionsFormData: ProcessCodeExecutionOperationsActionFormData< CodeRunParams | ShellCommandRunParams >[] = [ { methodName: ProcessCodeExecutionActions.CODE_RUN, label: 'codeRun()', description: 'Executes code in the Sandbox using the appropriate language runtime', parametersFormItems: [codeRunLanguageCodeFormData], parametersState: codeRunParams, }, { methodName: ProcessCodeExecutionActions.SHELL_COMMANDS_RUN, label: 'executeCommand()', description: 'Executes a shell command in the Sandbox', parametersFormItems: [shellCommandFormData], parametersState: shellCommandRunParams, }, ] //TODO -> Currently codeRun and executeCommand values are fixed -> when we enable user to define them implement onChange handlers with validatePlaygroundActionWithParams logic return (
{processCodeExecutionActionsFormData.map((processCodeExecutionAction) => (
actionFormItem={processCodeExecutionAction} hideRunActionButton />
{processCodeExecutionAction.methodName === ProcessCodeExecutionActions.CODE_RUN && ( )} {processCodeExecutionAction.methodName === ProcessCodeExecutionActions.SHELL_COMMANDS_RUN && ( )}
))}
) } export default SandboxProcessCodeExecution ================================================ FILE: apps/dashboard/src/components/Playground/Sandbox/Parameters/index.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Accordion, AccordionContent, AccordionItem, AccordionTrigger } from '@/components/ui/accordion' import { Switch } from '@/components/ui/switch' import { SandboxParametersSections } from '@/enums/Playground' import { usePlayground } from '@/hooks/usePlayground' import { cn } from '@/lib/utils' import { BoltIcon, FolderIcon, GitBranchIcon, SquareTerminalIcon } from 'lucide-react' import SandboxFileSystem from './FileSystem' import SandboxGitOperations from './GitOperations' import SandboxManagementParameters from './Management' import SandboxProcessCodeExecution from './ProcessCodeExecution' const sandboxParametersSectionsData = [ { value: SandboxParametersSections.SANDBOX_MANAGEMENT, label: 'Management' }, { value: SandboxParametersSections.FILE_SYSTEM, label: 'File System' }, { value: SandboxParametersSections.GIT_OPERATIONS, label: 'Git Operations' }, { value: SandboxParametersSections.PROCESS_CODE_EXECUTION, label: 'Process & Code Execution' }, ] const sectionIcons = { [SandboxParametersSections.SANDBOX_MANAGEMENT]: , [SandboxParametersSections.GIT_OPERATIONS]: , [SandboxParametersSections.FILE_SYSTEM]: , [SandboxParametersSections.PROCESS_CODE_EXECUTION]: , } const SandboxParameters = ({ className }: { className?: string }) => { const { openedParametersSections, setOpenedParametersSections, enabledSections, enableSection, disableSection } = usePlayground() // TODO - Currently, snapshot selection is not supported in the Playground, so we are using empty array and false for loading. We keep to code commented to enable it in future if requested by users. // const { snapshotApi } = useApi() // const { selectedOrganization } = useSelectedOrganization() // const { data: snapshotsData = [], isLoading: snapshotsLoading } = useQuery({ // queryKey: ['snapshots', selectedOrganization?.id, 'all'], // queryFn: async () => { // if (!selectedOrganization) return [] // const response = await snapshotApi.getAllSnapshots(selectedOrganization.id) // return response.data.items // }, // enabled: !!selectedOrganization, // }) return (

Sandbox Configuration

Manage resources, lifecycle policies, and file systems.

setOpenedParametersSections(sections as SandboxParametersSections[])} > {sandboxParametersSectionsData.map((section) => { const isManagement = section.value === SandboxParametersSections.SANDBOX_MANAGEMENT const isEnabled = enabledSections.includes(section.value as SandboxParametersSections) const isExpanded = openedParametersSections.includes(section.value as SandboxParametersSections) return ( checked ? enableSection(section.value as SandboxParametersSections) : disableSection(section.value as SandboxParametersSections) } size="sm" className="ml-3" /> ) : undefined } >
{sectionIcons[section.value]} {section.label}
{isExpanded && (
{section.value === SandboxParametersSections.FILE_SYSTEM && } {section.value === SandboxParametersSections.GIT_OPERATIONS && } {section.value === SandboxParametersSections.SANDBOX_MANAGEMENT && ( )} {section.value === SandboxParametersSections.PROCESS_CODE_EXECUTION && ( )}
)}
) })}
) } export default SandboxParameters ================================================ FILE: apps/dashboard/src/components/Playground/Terminal/Description.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ 'use client' import { CopyButton } from '@/components/CopyButton' import { TerminalIcon } from 'lucide-react' const sampleCommands = ['ls -la', 'top', 'ps aux', 'df -h'] const TerminalCommand = ({ value }: { value: string }) => { return (
$ {value}
) } const TerminalDescription: React.FC = () => { return (

Web Terminal

Run commands, view files, and debug directly in the browser.

Common Commands

    {sampleCommands.map((cmd) => (
  • ))}
) } export default TerminalDescription ================================================ FILE: apps/dashboard/src/components/Playground/Terminal/WebTerminal.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Button } from '@/components/ui/button' import { Spinner } from '@/components/ui/spinner' import { usePlaygroundSandbox } from '@/hooks/usePlaygroundSandbox' import { AnimatePresence, motion } from 'framer-motion' import { RefreshCcw } from 'lucide-react' import { Window, WindowContent, WindowTitleBar } from '../Window' const motionLoadingProps = { initial: { opacity: 0, y: 10 }, animate: { opacity: 1, y: 0 }, exit: { opacity: 0, y: -10 }, transition: { duration: 0.175 }, } const WebTerminal: React.FC<{ className?: string }> = ({ className }) => { const { sandbox, terminal } = usePlaygroundSandbox() const loadingTerminalUrl = terminal.loading || (!sandbox.instance && !sandbox.error) return ( Sandbox Terminal
{loadingTerminalUrl || !terminal.url ? (
{loadingTerminalUrl ? ( Loading terminal... ) : ( There was an error loading the terminal. {sandbox.instance ? ( ) : ( sandbox.error && {sandbox.error} )} )}
) : ( {spendingTabAvailable && ( )} ) } export default SandboxDetailsSheet ================================================ FILE: apps/dashboard/src/components/SandboxTable/BulkActionAlertDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle, } from '../ui/alert-dialog' export enum BulkAction { Delete = 'delete', Start = 'start', Stop = 'stop', Archive = 'archive', } interface BulkActionData { title: string description: string buttonLabel: string buttonVariant?: 'destructive' } function getBulkActionData(action: BulkAction, count: number): BulkActionData { const countText = count === 1 ? 'this sandbox' : `these ${count} selected sandboxes` switch (action) { case BulkAction.Delete: return { title: 'Delete Sandboxes', description: `Are you sure you want to delete ${countText}? This action cannot be undone.`, buttonLabel: 'Delete', buttonVariant: 'destructive', } case BulkAction.Start: return { title: 'Start Sandboxes', description: `Are you sure you want to start ${countText}?`, buttonLabel: 'Start', } case BulkAction.Stop: return { title: 'Stop Sandboxes', description: `Are you sure you want to stop ${countText}?`, buttonLabel: 'Stop', } case BulkAction.Archive: return { title: 'Archive Sandboxes', description: `Are you sure you want to archive ${countText}? Archived sandboxes can be restored later.`, buttonLabel: 'Archive', } } } interface BulkActionAlertDialogProps { action: BulkAction | null count: number onConfirm: () => void onCancel: () => void } export function BulkActionAlertDialog({ action, count, onConfirm, onCancel }: BulkActionAlertDialogProps) { const data = action ? getBulkActionData(action, count) : null if (!data) return null return ( !open && onCancel()}> <> {data.title} {data.description} Cancel {data.buttonLabel} ) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/SandboxState.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxState as SandboxStateType } from '@daytonaio/api-client' import { Tooltip, TooltipContent, TooltipTrigger } from '../ui/tooltip' import { getStateLabel } from './constants' import { STATE_ICONS } from './state-icons' interface SandboxStateProps { state?: SandboxStateType errorReason?: string recoverable?: boolean } export function SandboxState({ state, errorReason, recoverable }: SandboxStateProps) { if (!state) return null const stateIcon = recoverable ? STATE_ICONS['RECOVERY'] : STATE_ICONS[state] || STATE_ICONS[SandboxStateType.UNKNOWN] const label = getStateLabel(state) if (state === SandboxStateType.ERROR || state === SandboxStateType.BUILD_FAILED) { const errorColor = recoverable ? 'text-yellow-600 dark:text-yellow-400' : 'text-red-600 dark:text-red-400' const errorContent = (
{stateIcon}
{label}
) if (!errorReason) { return errorContent } return ( {errorContent}

{errorReason}

) } return (
{stateIcon}
{label}
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/SandboxTableActions.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { RoutePath } from '@/enums/RoutePath' import { SandboxState } from '@daytonaio/api-client' import { Terminal, MoreVertical, Play, Square, Loader2, Wrench } from 'lucide-react' import { generatePath, useNavigate } from 'react-router-dom' import { Button } from '../ui/button' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuSeparator, DropdownMenuTrigger, } from '../ui/dropdown-menu' import { SandboxTableActionsProps } from './types' import { useMemo } from 'react' export function SandboxTableActions({ sandbox, writePermitted, deletePermitted, isLoading, onStart, onStop, onDelete, onArchive, onVnc, onOpenWebTerminal, onCreateSshAccess, onRevokeSshAccess, onRecover, onScreenRecordings, }: SandboxTableActionsProps) { const navigate = useNavigate() const menuItems = useMemo(() => { const items = [] items.push({ key: 'open', label: 'Open', onClick: () => navigate(generatePath(RoutePath.SANDBOX_DETAILS, { sandboxId: sandbox.id })), disabled: isLoading, }) if (writePermitted) { if (sandbox.state === SandboxState.STARTED) { items.push({ key: 'vnc', label: 'VNC', onClick: () => onVnc(sandbox.id), disabled: isLoading, }) items.push({ key: 'screen-recordings', label: 'Screen Recordings', onClick: () => onScreenRecordings(sandbox.id), disabled: isLoading, }) items.push({ key: 'stop', label: 'Stop', onClick: () => onStop(sandbox.id), disabled: isLoading, }) } else if (sandbox.state === SandboxState.STOPPED || sandbox.state === SandboxState.ARCHIVED) { items.push({ key: 'start', label: 'Start', onClick: () => onStart(sandbox.id), disabled: isLoading, }) } else if (sandbox.state === SandboxState.ERROR && sandbox.recoverable) { items.push({ key: 'recover', label: 'Recover', onClick: () => onRecover(sandbox.id), disabled: isLoading, }) } if (sandbox.state === SandboxState.STOPPED) { items.push({ key: 'archive', label: 'Archive', onClick: () => onArchive(sandbox.id), disabled: isLoading, }) } // Add SSH access options items.push({ key: 'create-ssh', label: 'Create SSH Access', onClick: () => onCreateSshAccess(sandbox.id), disabled: isLoading, }) items.push({ key: 'revoke-ssh', label: 'Revoke SSH Access', onClick: () => onRevokeSshAccess(sandbox.id), disabled: isLoading, }) } if (deletePermitted) { if (items.length > 0 && (sandbox.state === SandboxState.STOPPED || sandbox.state === SandboxState.STARTED)) { items.push({ key: 'separator', type: 'separator' }) } items.push({ key: 'delete', label: 'Delete', onClick: () => onDelete(sandbox.id), disabled: isLoading, className: 'text-red-600 dark:text-red-400', }) } return items }, [ writePermitted, deletePermitted, sandbox.state, sandbox.id, isLoading, sandbox.recoverable, onStart, onStop, onDelete, onArchive, onVnc, onCreateSshAccess, onRevokeSshAccess, onRecover, onScreenRecordings, navigate, ]) if (!writePermitted && !deletePermitted) { return null } return (
{sandbox.state === SandboxState.STARTED ? ( ) : ( )} {menuItems.map((item) => { if (item.type === 'separator') { return } return ( { e.stopPropagation() item.onClick?.() }} className={`cursor-pointer ${item.className || ''}`} disabled={item.disabled} > {item.label} ) })}
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/SandboxTableHeader.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { ArrowUpDown, Calendar, Camera, Check, Columns, Cpu, Globe, HardDrive, ListFilter, MemoryStick, RefreshCw, Square, Tag, } from 'lucide-react' import * as React from 'react' import { DebouncedInput } from '../DebouncedInput' import { TableColumnVisibilityToggle } from '../TableColumnVisibilityToggle' import { Button } from '../ui/button' import { Command, CommandEmpty, CommandGroup, CommandInput, CommandInputButton, CommandItem, CommandList, } from '../ui/command' import { DropdownMenu, DropdownMenuContent, DropdownMenuPortal, DropdownMenuSub, DropdownMenuSubContent, DropdownMenuSubTrigger, DropdownMenuTrigger, } from '../ui/dropdown-menu' import { Popover, PopoverContent, PopoverTrigger } from '../ui/popover' import { LabelFilter, LabelFilterIndicator } from './filters/LabelFilter' import { LastEventFilter, LastEventFilterIndicator } from './filters/LastEventFilter' import { RegionFilter, RegionFilterIndicator } from './filters/RegionFilter' import { ResourceFilter, ResourceFilterIndicator, ResourceFilterValue } from './filters/ResourceFilter' import { SnapshotFilter, SnapshotFilterIndicator } from './filters/SnapshotFilter' import { StateFilter, StateFilterIndicator } from './filters/StateFilter' import { SandboxTableHeaderProps } from './types' const RESOURCE_FILTERS = [ { type: 'cpu' as const, label: 'CPU', icon: Cpu }, { type: 'memory' as const, label: 'Memory', icon: MemoryStick }, { type: 'disk' as const, label: 'Disk', icon: HardDrive }, ] export function SandboxTableHeader({ table, regionOptions, regionsDataIsLoading, snapshots, snapshotsDataIsLoading, snapshotsDataHasMore, onChangeSnapshotSearchValue, onRefresh, isRefreshing = false, }: SandboxTableHeaderProps) { const [open, setOpen] = React.useState(false) const currentSort = table.getState().sorting[0]?.id || '' const sortableColumns = [ { id: 'name', label: 'Name' }, { id: 'state', label: 'State' }, { id: 'snapshot', label: 'Snapshot' }, { id: 'region', label: 'Region' }, { id: 'lastEvent', label: 'Last Event' }, ] return (
table.getColumn('name')?.setFilterValue(value)} placeholder="Search by Name or UUID" className="w-[240px]" /> ['name', 'id', 'labels'].includes(column.id))} getColumnLabel={(id: string) => { switch (id) { case 'name': return 'Name' case 'id': return 'UUID' case 'labels': return 'Labels' default: return id } }} /> { table.resetSorting() setOpen(false) }} > Reset No column found. {sortableColumns.map((column) => ( { const col = table.getColumn(currentValue) if (col) { col.toggleSorting(false) } setOpen(false) }} > {column.label} ))} State table.getColumn('state')?.setFilterValue(value)} /> Snapshot table.getColumn('snapshot')?.setFilterValue(value)} snapshots={snapshots} isLoading={snapshotsDataIsLoading} hasMore={snapshotsDataHasMore} onChangeSnapshotSearchValue={onChangeSnapshotSearchValue} /> Region table.getColumn('region')?.setFilterValue(value)} options={regionOptions} isLoading={regionsDataIsLoading} /> {RESOURCE_FILTERS.map(({ type, label, icon: Icon }) => ( {label} table.getColumn('resources')?.setFilterValue(value)} resourceType={type} /> ))} Labels table.getColumn('labels')?.setFilterValue(value)} /> Last Event table.getColumn('lastEvent')?.setFilterValue(value)} value={(table.getColumn('lastEvent')?.getFilterValue() as Date[]) || []} />
{(table.getColumn('state')?.getFilterValue() as string[])?.length > 0 && ( table.getColumn('state')?.setFilterValue(value)} /> )} {(table.getColumn('snapshot')?.getFilterValue() as string[])?.length > 0 && ( table.getColumn('snapshot')?.setFilterValue(value)} snapshots={snapshots} isLoading={snapshotsDataIsLoading} hasMore={snapshotsDataHasMore} onChangeSnapshotSearchValue={onChangeSnapshotSearchValue} /> )} {(table.getColumn('region')?.getFilterValue() as string[])?.length > 0 && ( table.getColumn('region')?.setFilterValue(value)} options={regionOptions} isLoading={regionsDataIsLoading} /> )} {RESOURCE_FILTERS.map(({ type }) => { const resourceValue = (table.getColumn('resources')?.getFilterValue() as ResourceFilterValue)?.[type] return resourceValue ? ( table.getColumn('resources')?.setFilterValue(value)} resourceType={type} /> ) : null })} {(table.getColumn('labels')?.getFilterValue() as string[])?.length > 0 && ( table.getColumn('labels')?.setFilterValue(value)} /> )} {(table.getColumn('lastEvent')?.getFilterValue() as Date[])?.length > 0 && ( table.getColumn('lastEvent')?.setFilterValue(value)} /> )}
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/columns.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { formatTimestamp, getRelativeTimeString } from '@/lib/utils' import { Sandbox, SandboxDesiredState, SandboxState } from '@daytonaio/api-client' import { ColumnDef } from '@tanstack/react-table' import { ArrowDown, ArrowUp } from 'lucide-react' import React from 'react' import { EllipsisWithTooltip } from '../EllipsisWithTooltip' import { Checkbox } from '../ui/checkbox' import { Tooltip, TooltipContent, TooltipTrigger } from '../ui/tooltip' import { SandboxState as SandboxStateComponent } from './SandboxState' import { SandboxTableActions } from './SandboxTableActions' interface SortableHeaderProps { column: any label: string dataState?: string } const SortableHeader: React.FC = ({ column, label, dataState }) => { return (
column.toggleSorting(column.getIsSorted() === 'asc')} className="flex items-center" {...(dataState && { 'data-state': dataState })} > {label} {column.getIsSorted() === 'asc' ? ( ) : column.getIsSorted() === 'desc' ? ( ) : (
)}
) } interface GetColumnsProps { handleStart: (id: string) => void handleStop: (id: string) => void handleDelete: (id: string) => void handleArchive: (id: string) => void handleVnc: (id: string) => void getWebTerminalUrl: (id: string) => Promise sandboxIsLoading: Record writePermitted: boolean deletePermitted: boolean handleCreateSshAccess: (id: string) => void handleRevokeSshAccess: (id: string) => void handleRecover: (id: string) => void getRegionName: (regionId: string) => string | undefined handleScreenRecordings: (id: string) => void } export function getColumns({ handleStart, handleStop, handleDelete, handleArchive, handleVnc, getWebTerminalUrl, sandboxIsLoading, writePermitted, deletePermitted, handleCreateSshAccess, handleRevokeSshAccess, handleRecover, getRegionName, handleScreenRecordings, }: GetColumnsProps): ColumnDef[] { const handleOpenWebTerminal = async (sandboxId: string) => { const url = await getWebTerminalUrl(sandboxId) if (url) { window.open(url, '_blank') } } const columns: ColumnDef[] = [ { id: 'select', size: 30, header: ({ table }) => ( { for (const row of table.getRowModel().rows) { if (sandboxIsLoading[row.original.id] || row.original.state === SandboxState.DESTROYED) { row.toggleSelected(false) } else { row.toggleSelected(!!value) } } }} aria-label="Select all" className="translate-y-[2px]" /> ), cell: ({ row }) => { return (
row.toggleSelected(!!value)} aria-label="Select row" onClick={(e) => e.stopPropagation()} className="translate-y-[1px]" />
) }, enableSorting: false, enableHiding: false, }, { id: 'name', size: 320, enableSorting: true, enableHiding: true, header: ({ column }) => { return }, accessorKey: 'name', cell: ({ row }) => { const displayName = getDisplayName(row.original) return (
{displayName}
) }, }, { id: 'id', size: 320, enableSorting: false, enableHiding: true, header: () => { return UUID }, accessorKey: 'id', cell: ({ row }) => { return (
{row.original.id}
) }, }, { id: 'state', size: 140, enableSorting: true, enableHiding: false, header: ({ column }) => { return }, cell: ({ row }) => (
), accessorKey: 'state', }, { id: 'snapshot', size: 150, enableSorting: true, enableHiding: false, header: ({ column }) => { return }, cell: ({ row }) => { return (
{row.original.snapshot ? ( {row.original.snapshot} ) : (
-
)}
) }, accessorKey: 'snapshot', }, { id: 'region', size: 100, enableSorting: true, enableHiding: false, header: ({ column }) => { return }, cell: ({ row }) => { return (
{getRegionName(row.original.target) ?? row.original.target}
) }, accessorKey: 'target', }, { id: 'resources', size: 190, enableSorting: false, enableHiding: false, header: () => { return Resources }, cell: ({ row }) => { return (
{row.original.cpu} vCPU
{row.original.memory} GiB
{row.original.disk} GiB
) }, }, { id: 'labels', size: 110, enableSorting: false, enableHiding: true, header: () => { return Labels }, cell: ({ row }) => { const labels = Object.entries(row.original.labels ?? {}) .map(([key, value]) => `${key}: ${value}`) .join(', ') const labelCount = Object.keys(row.original.labels ?? {}).length return ( {labelCount > 0 ? (
{labelCount > 0 ? (labelCount === 1 ? '1 label' : `${labelCount} labels`) : '/'}
) : (
-
)}
{labels && (

{labels}

)}
) }, accessorFn: (row) => Object.entries(row.labels ?? {}).map(([key, value]) => `${key}: ${value}`), }, { id: 'lastEvent', size: 120, enableSorting: true, enableHiding: false, header: ({ column }) => { return }, accessorFn: (row) => getLastEvent(row).date, cell: ({ row }) => { const lastEvent = getLastEvent(row.original) return (
{lastEvent.relativeTimeString}
) }, }, { id: 'createdAt', size: 200, enableSorting: true, enableHiding: false, header: ({ column }) => { return }, cell: ({ row }) => { const timestamp = formatTimestamp(row.original.createdAt) return (
{timestamp}
) }, }, { id: 'actions', size: 100, enableHiding: false, cell: ({ row }) => (
), }, ] return columns } function getDisplayName(sandbox: Sandbox): string { // If the sandbox is destroying and the name starts with "DESTROYED_", trim the prefix and timestamp if (sandbox.desiredState === SandboxDesiredState.DESTROYED && sandbox.name.startsWith('DESTROYED_')) { // Remove "DESTROYED_" prefix and everything after the last underscore (timestamp) const withoutPrefix = sandbox.name.substring(10) // Remove "DESTROYED_" const lastUnderscoreIndex = withoutPrefix.lastIndexOf('_') if (lastUnderscoreIndex !== -1) { return withoutPrefix.substring(0, lastUnderscoreIndex) } return withoutPrefix } return sandbox.name } function getLastEvent(sandbox: Sandbox): { date: Date; relativeTimeString: string } { return getRelativeTimeString(sandbox.updatedAt) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/constants.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxState } from '@daytonaio/api-client' import { CheckCircle, Circle, AlertTriangle, Timer, Archive } from 'lucide-react' import { FacetedFilterOption } from './types' const STATE_LABEL_MAPPING: Record = { [SandboxState.STARTED]: 'Started', [SandboxState.STOPPED]: 'Stopped', [SandboxState.ERROR]: 'Error', [SandboxState.BUILD_FAILED]: 'Build Failed', [SandboxState.BUILDING_SNAPSHOT]: 'Building Snapshot', [SandboxState.PENDING_BUILD]: 'Pending Build', [SandboxState.RESTORING]: 'Restoring', [SandboxState.ARCHIVED]: 'Archived', [SandboxState.CREATING]: 'Creating', [SandboxState.STARTING]: 'Starting', [SandboxState.STOPPING]: 'Stopping', [SandboxState.DESTROYING]: 'Deleting', [SandboxState.DESTROYED]: 'Deleted', [SandboxState.PULLING_SNAPSHOT]: 'Pulling Snapshot', [SandboxState.UNKNOWN]: 'Unknown', [SandboxState.ARCHIVING]: 'Archiving', [SandboxState.RESIZING]: 'Resizing', } export const STATUSES: FacetedFilterOption[] = [ { label: getStateLabel(SandboxState.STARTED), value: SandboxState.STARTED, icon: CheckCircle, }, { label: getStateLabel(SandboxState.STOPPED), value: SandboxState.STOPPED, icon: Circle }, { label: getStateLabel(SandboxState.ERROR), value: SandboxState.ERROR, icon: AlertTriangle }, { label: getStateLabel(SandboxState.BUILD_FAILED), value: SandboxState.BUILD_FAILED, icon: AlertTriangle }, { label: getStateLabel(SandboxState.STARTING), value: SandboxState.STARTING, icon: Timer }, { label: getStateLabel(SandboxState.STOPPING), value: SandboxState.STOPPING, icon: Timer }, { label: getStateLabel(SandboxState.DESTROYING), value: SandboxState.DESTROYING, icon: Timer }, { label: getStateLabel(SandboxState.ARCHIVED), value: SandboxState.ARCHIVED, icon: Archive }, { label: getStateLabel(SandboxState.ARCHIVING), value: SandboxState.ARCHIVING, icon: Timer }, ] export function getStateLabel(state?: SandboxState): string { if (!state) { return 'Unknown' } return STATE_LABEL_MAPPING[state] } ================================================ FILE: apps/dashboard/src/components/SandboxTable/filters/LabelFilter.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Button } from '@/components/ui/button' import { Input } from '@/components/ui/input' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip' import { Plus, Trash2, X } from 'lucide-react' import { useState } from 'react' interface LabelFilterProps { value: string[] onFilterChange: (value: string[] | undefined) => void } export function LabelFilterIndicator({ value, onFilterChange }: Pick) { return (
Labels: {value.length} selected
) } export function LabelFilter({ value, onFilterChange }: LabelFilterProps) { const [newKey, setNewKey] = useState('') const [newValue, setNewValue] = useState('') // Convert string array back to key-value pairs for display const labelPairs = value.map((labelString) => { const [key, ...valueParts] = labelString.split(': ') return { key: key || '', value: valueParts.join(': ') || '' } }) const addKeyValuePair = () => { if (newKey.trim() && newValue.trim()) { const newLabelString = `${newKey.trim()}: ${newValue.trim()}` const updatedValue = [...value, newLabelString] onFilterChange(updatedValue) setNewKey('') setNewValue('') } } const removeKeyValuePair = (index: number) => { const updatedValue = value.filter((_, i) => i !== index) onFilterChange(updatedValue.length > 0 ? updatedValue : undefined) } const clearAll = () => { onFilterChange(undefined) } return (
{/* Header */}

Labels

{/* Current key-value pairs */} {labelPairs.length > 0 && (
{labelPairs.map((pair, index) => (
{pair.key}

{pair.key}

{pair.value}

{pair.value}

))}
)} {/* Add new key-value pair */}
setNewKey(e.target.value)} className="h-8" onKeyDown={(e) => { if (e.key === 'Enter' && newKey.trim() && newValue.trim()) { addKeyValuePair() } }} /> setNewValue(e.target.value)} className="h-8" onKeyDown={(e) => { if (e.key === 'Enter' && newKey.trim() && newValue.trim()) { addKeyValuePair() } }} />
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/filters/LastEventFilter.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Button } from '@/components/ui/button' import { Popover, PopoverTrigger, PopoverContent } from '@/components/ui/popover' import { cn } from '@/lib/utils' import { Calendar } from '@/components/ui/calendar' import { Label } from '@/components/ui/label' import { useState } from 'react' import { format } from 'date-fns' import { CalendarIcon, X } from 'lucide-react' interface LastEventFilterProps { value: (Date | undefined)[] onFilterChange: (value: (Date | undefined)[] | undefined) => void } export function LastEventFilterIndicator({ value, onFilterChange }: LastEventFilterProps) { return (
Last Event:{' '} {value.some((d) => d !== undefined) ? `${value .filter((d): d is Date => d !== undefined) .map((d) => format(d, 'PPP')) .join(' - ')}` : ''}
) } interface LastEventFilterContentProps { onFilterChange: (value: (Date | undefined)[] | undefined) => void value: (Date | undefined)[] } export function LastEventFilter({ onFilterChange, value }: LastEventFilterContentProps) { const [fromDate, setFromDate] = useState(value[0]) const [toDate, setToDate] = useState(value[1]) const handleFromDateSelect = (selectedDate: Date | undefined) => { setFromDate(selectedDate) const dates = [selectedDate, toDate] const hasAnyDate = dates.some((date) => date !== undefined) onFilterChange(hasAnyDate ? dates : undefined) } const handleToDateSelect = (selectedDate: Date | undefined) => { setToDate(selectedDate) const dates = [fromDate, selectedDate] const hasAnyDate = dates.some((date) => date !== undefined) onFilterChange(hasAnyDate ? dates : undefined) } const handleClear = () => { setFromDate(undefined) setToDate(undefined) onFilterChange(undefined) } return (
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/filters/RegionFilter.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Command, CommandCheckboxItem, CommandEmpty, CommandGroup, CommandInput, CommandInputButton, CommandList, } from '@/components/ui/command' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { Loader2, X } from 'lucide-react' import { FacetedFilterOption } from '../types' interface RegionFilterProps { value: string[] onFilterChange: (value: string[] | undefined) => void options?: FacetedFilterOption[] isLoading?: boolean } export function RegionFilterIndicator({ value, onFilterChange, options, isLoading }: RegionFilterProps) { const selectedRegionLabels = value .map((v) => options?.find((r) => r.value === v)?.label) .filter(Boolean) .join(', ') return (
Region:{' '} {selectedRegionLabels.length > 0 ? selectedRegionLabels : 'All'}
) } export function RegionFilter({ value, onFilterChange, options, isLoading }: RegionFilterProps) { return ( onFilterChange(undefined)}>Clear {isLoading ? (
Loading regions...
) : ( <> No regions found. {options?.map((region) => ( { const newValue = value.includes(region.value) ? value.filter((v) => v !== region.value) : [...value, region.value] onFilterChange(newValue.length > 0 ? newValue : undefined) }} > {region.label} ))} )}
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/filters/ResourceFilter.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Popover, PopoverTrigger, PopoverContent } from '@/components/ui/popover' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' import { useMemo } from 'react' import { X } from 'lucide-react' export interface ResourceFilterValue { cpu?: { min?: number; max?: number } memory?: { min?: number; max?: number } disk?: { min?: number; max?: number } } interface ResourceFilterProps { value: ResourceFilterValue onFilterChange: (value: ResourceFilterValue | undefined) => void resourceType?: 'cpu' | 'memory' | 'disk' } const RESOURCE_CONFIG = { cpu: { label: 'vCPU', displayLabel: 'CPU' }, memory: { label: 'Memory (GiB)', displayLabel: 'Memory' }, disk: { label: 'Disk (GiB)', displayLabel: 'Disk' }, } as const export function ResourceFilterIndicator({ value, onFilterChange, resourceType }: ResourceFilterProps) { const { title, label } = useMemo(() => { let title = 'All' let label = 'Resources' if (resourceType) { const resourceValue = value[resourceType] if (resourceValue?.min || resourceValue?.max) { const config = RESOURCE_CONFIG[resourceType] const unit = resourceType === 'cpu' ? 'vCPU' : 'GiB' title = `${resourceValue.min ?? 'Any'} - ${resourceValue.max ?? 'Any'} ${unit}` label = config.displayLabel } } else { const filters: string[] = [] Object.entries(RESOURCE_CONFIG).forEach(([type, config]) => { const resourceValue = value[type as keyof ResourceFilterValue] if (resourceValue?.min || resourceValue?.max) { const unit = type === 'cpu' ? 'vCPU' : 'GiB' filters.push(`${config.displayLabel}: ${resourceValue.min ?? 'any'}-${resourceValue.max ?? 'any'} ${unit}`) } }) title = filters.length > 0 ? filters.join('; ') : 'All' } return { title, label } }, [value, resourceType]) return (
{label}: {title}
) } export function ResourceFilter({ value, onFilterChange, resourceType }: ResourceFilterProps) { const handleValueChange = ( resource: keyof ResourceFilterValue, field: 'min' | 'max', newValue: number | undefined, ) => { const currentResourceValue = value[resource] || {} const updatedResourceValue = { ...currentResourceValue, [field]: newValue } if (newValue === undefined && !currentResourceValue.min && !currentResourceValue.max) { const newFilterValue = { ...value } delete newFilterValue[resource] onFilterChange(Object.keys(newFilterValue).length > 0 ? newFilterValue : undefined) return } const newFilterValue = { ...value, [resource]: updatedResourceValue } onFilterChange(newFilterValue) } const handleClear = (resource: keyof ResourceFilterValue) => { const newFilterValue = { ...value } delete newFilterValue[resource] onFilterChange(Object.keys(newFilterValue).length > 0 ? newFilterValue : undefined) } if (resourceType) { const config = RESOURCE_CONFIG[resourceType] const currentValues = value[resourceType] || {} return (
{ const newValue = e.target.value ? Number(e.target.value) : undefined handleValueChange(resourceType, 'min', newValue) }} className="w-full" />
{ const newValue = e.target.value ? Number(e.target.value) : undefined handleValueChange(resourceType, 'max', newValue) }} className="w-full" />
) } return null } ================================================ FILE: apps/dashboard/src/components/SandboxTable/filters/SnapshotFilter.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Command, CommandCheckboxItem, CommandEmpty, CommandGroup, CommandInput, CommandInputButton, CommandList, } from '@/components/ui/command' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { SnapshotDto } from '@daytonaio/api-client' import { Loader2, X } from 'lucide-react' import { useState } from 'react' interface SnapshotFilterProps { value: string[] onFilterChange: (value: string[] | undefined) => void snapshots: SnapshotDto[] isLoading: boolean hasMore?: boolean onChangeSnapshotSearchValue: (name?: string) => void } export function SnapshotFilterIndicator({ value, onFilterChange, snapshots, isLoading, hasMore, onChangeSnapshotSearchValue, }: SnapshotFilterProps) { return (
Snapshot: {value.length} selected
) } export function SnapshotFilter({ value, onFilterChange, snapshots, isLoading, hasMore, onChangeSnapshotSearchValue, }: SnapshotFilterProps) { const [searchValue, setSearchValue] = useState('') const handleSelect = (snapshotName: string) => { const newValue = value.includes(snapshotName) ? value.filter((name) => name !== snapshotName) : [...value, snapshotName] onFilterChange(newValue.length > 0 ? newValue : undefined) } const handleSearchChange = (search: string | number) => { const searchStr = String(search) setSearchValue(searchStr) if (onChangeSnapshotSearchValue) { onChangeSnapshotSearchValue(searchStr || undefined) } } return ( { onFilterChange(undefined) setSearchValue('') if (onChangeSnapshotSearchValue) { onChangeSnapshotSearchValue(undefined) } }} > Clear {hasMore && (
Please refine your search to see more Snapshots.
)} {isLoading ? (
Loading Snapshots...
) : ( <> No Snapshots found. {snapshots.map((snapshot) => ( handleSelect(snapshot.name ?? '')} value={snapshot.name} className="cursor-pointer" checked={value.includes(snapshot.name ?? '')} > {snapshot.name} ))} )}
) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/filters/StateFilter.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Command, CommandCheckboxItem, CommandGroup, CommandInput, CommandInputButton, CommandList, } from '@/components/ui/command' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { SandboxState } from '@daytonaio/api-client' import { X } from 'lucide-react' import { STATUSES, getStateLabel } from '../constants' interface StateFilterProps { value: string[] onFilterChange: (value: string[] | undefined) => void } export function StateFilterIndicator({ value, onFilterChange }: StateFilterProps) { const selectedStates = value.map((v) => getStateLabel(v as SandboxState)) return (
States:{' '} {selectedStates.length > 0 ? selectedStates.length > 2 ? `${selectedStates[0]}, ${selectedStates[1]}, +${selectedStates.length - 2}` : `${selectedStates.join(', ')}` : ''}
) } export function StateFilter({ value, onFilterChange }: StateFilterProps) { return ( onFilterChange(undefined)} > Clear {STATUSES.map((status) => ( { const newValue = value.includes(status.value) ? value.filter((v) => v !== status.value) : [...value, status.value] onFilterChange(newValue.length > 0 ? newValue : undefined) }} > {status.label} ))} ) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/index.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { RoutePath } from '@/enums/RoutePath' import { useSelectedOrganization } from '@/hooks/useSelectedOrganization' import { cn } from '@/lib/utils' import { filterArchivable, filterDeletable, filterStartable, filterStoppable, getBulkActionCounts, } from '@/lib/utils/sandbox' import { OrganizationRolePermissionsEnum, Sandbox, SandboxState } from '@daytonaio/api-client' import { flexRender } from '@tanstack/react-table' import { Container } from 'lucide-react' import { AnimatePresence } from 'motion/react' import { useCallback, useMemo, useState } from 'react' import { useNavigate } from 'react-router-dom' import { useCommandPaletteActions } from '../CommandPalette' import { Pagination } from '../Pagination' import { SelectionToast } from '../SelectionToast' import { TableEmptyState } from '../TableEmptyState' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '../ui/table' import { BulkAction, BulkActionAlertDialog } from './BulkActionAlertDialog' import { SandboxTableHeader } from './SandboxTableHeader' import { SandboxTableProps } from './types' import { useSandboxCommands } from './useSandboxCommands' import { useSandboxTable } from './useSandboxTable' export function SandboxTable({ data, sandboxIsLoading, sandboxStateIsTransitioning, loading, snapshots, snapshotsDataIsLoading, snapshotsDataHasMore, onChangeSnapshotSearchValue, regionsData, regionsDataIsLoading, getRegionName, handleStart, handleStop, handleDelete, handleBulkDelete, handleBulkStart, handleBulkStop, handleBulkArchive, handleArchive, handleVnc, getWebTerminalUrl, handleCreateSshAccess, handleRevokeSshAccess, handleScreenRecordings, handleRefresh, isRefreshing, onRowClick, pagination, pageCount, totalItems, onPaginationChange, sorting, onSortingChange, filters, onFiltersChange, handleRecover, }: SandboxTableProps) { const navigate = useNavigate() const { authenticatedUserHasPermission } = useSelectedOrganization() const writePermitted = authenticatedUserHasPermission(OrganizationRolePermissionsEnum.WRITE_SANDBOXES) const deletePermitted = authenticatedUserHasPermission(OrganizationRolePermissionsEnum.DELETE_SANDBOXES) const { table, regionOptions } = useSandboxTable({ data, sandboxIsLoading, writePermitted, deletePermitted, handleStart, handleStop, handleDelete, handleArchive, handleVnc, getWebTerminalUrl, handleCreateSshAccess, handleRevokeSshAccess, handleScreenRecordings, pagination, pageCount, onPaginationChange, sorting, onSortingChange, filters, onFiltersChange, regionsData, handleRecover, getRegionName, }) const [pendingBulkAction, setPendingBulkAction] = useState(null) const selectedRows = table.getRowModel().rows.filter((row) => row.getIsSelected()) const hasSelection = selectedRows.length > 0 const selectedCount = selectedRows.length const totalCount = table.getRowModel().rows.length const selectedSandboxes: Sandbox[] = selectedRows.map((row) => row.original) const bulkActionCounts = useMemo(() => getBulkActionCounts(selectedSandboxes), [selectedSandboxes]) const handleBulkActionConfirm = () => { if (!pendingBulkAction) return const handlers: Record void> = { [BulkAction.Delete]: () => handleBulkDelete(filterDeletable(selectedSandboxes).map((s) => s.id)), [BulkAction.Start]: () => handleBulkStart(filterStartable(selectedSandboxes).map((s) => s.id)), [BulkAction.Stop]: () => handleBulkStop(filterStoppable(selectedSandboxes).map((s) => s.id)), [BulkAction.Archive]: () => handleBulkArchive(filterArchivable(selectedSandboxes).map((s) => s.id)), } handlers[pendingBulkAction]() setPendingBulkAction(null) table.toggleAllRowsSelected(false) } const toggleAllRowsSelected = useCallback( (selected: boolean) => { if (selected) { for (const row of table.getRowModel().rows) { const selectDisabled = sandboxIsLoading[row.original.id] || row.original.state === SandboxState.DESTROYED if (!selectDisabled) { row.toggleSelected(true) } } } else { table.toggleAllRowsSelected(selected) } }, [sandboxIsLoading, table], ) const selectableCount = useMemo(() => { return data.filter((sandbox) => !sandboxIsLoading[sandbox.id] && sandbox.state !== SandboxState.DESTROYED).length }, [sandboxIsLoading, data]) useSandboxCommands({ writePermitted, deletePermitted, selectedCount, totalCount, selectableCount, toggleAllRowsSelected, bulkActionCounts, onDelete: () => setPendingBulkAction(BulkAction.Delete), onStart: () => setPendingBulkAction(BulkAction.Start), onStop: () => setPendingBulkAction(BulkAction.Stop), onArchive: () => setPendingBulkAction(BulkAction.Archive), }) const { setIsOpen } = useCommandPaletteActions() const handleOpenCommandPalette = () => { setIsOpen(true) } return ( <> {table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( header.column.getCanSort() && header.column.toggleSorting(header.column.getIsSorted() === 'asc') } className={cn( 'sticky top-0 z-[3] border-b border-border', header.column.getCanSort() ? 'hover:bg-muted cursor-pointer' : '', )} style={{ width: `${header.column.getSize()}px`, }} > {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( onRowClick?.(row.original)} > {row.getVisibleCells().map((cell) => ( { if (cell.column.id === 'select' || cell.column.id === 'actions') { e.stopPropagation() } }} className={cn('border-b border-border', { 'group-hover/table-row:underline': cell.column.id === 'name', })} style={{ width: `${cell.column.getSize()}px`, }} sticky={cell.column.id === 'actions' ? 'right' : undefined} > {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

Spin up a Sandbox to run code in an isolated environment.

Use the Daytona SDK or CLI to create one.

{' '} to learn more.

} /> )}
{hasSelection && ( table.resetRowSelection()} onActionClick={handleOpenCommandPalette} /> )}
setPendingBulkAction(null)} /> ) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/state-icons.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { SandboxState } from '@daytonaio/api-client' import { Loader2 } from 'lucide-react' interface SquareProps { color: string } function Square({ color }: SquareProps) { return (
) } export const STATE_ICONS: Record = { [SandboxState.UNKNOWN]: , [SandboxState.CREATING]: , [SandboxState.STARTING]: , [SandboxState.STARTED]: , [SandboxState.STOPPING]: , [SandboxState.STOPPED]: , [SandboxState.DESTROYING]: , [SandboxState.DESTROYED]: , [SandboxState.ERROR]: , [SandboxState.BUILD_FAILED]: , [SandboxState.BUILDING_SNAPSHOT]: , [SandboxState.PULLING_SNAPSHOT]: , [SandboxState.PENDING_BUILD]: , [SandboxState.ARCHIVED]: , [SandboxState.ARCHIVING]: , [SandboxState.RESTORING]: , [SandboxState.RESIZING]: , RECOVERY: , } ================================================ FILE: apps/dashboard/src/components/SandboxTable/types.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DEFAULT_SANDBOX_SORTING, SandboxFilters, SandboxSorting } from '@/hooks/useSandboxes' import { ListSandboxesPaginatedOrderEnum, ListSandboxesPaginatedSortEnum, ListSandboxesPaginatedStatesEnum, Region, Sandbox, SandboxState, SnapshotDto, } from '@daytonaio/api-client' import { ColumnFiltersState, SortingState, Table } from '@tanstack/react-table' export interface SandboxTableProps { data: Sandbox[] sandboxIsLoading: Record sandboxStateIsTransitioning: Record loading: boolean snapshots: SnapshotDto[] snapshotsDataIsLoading: boolean snapshotsDataHasMore?: boolean onChangeSnapshotSearchValue: (name?: string) => void regionsData: Region[] regionsDataIsLoading: boolean getRegionName: (regionId: string) => string | undefined handleStart: (id: string) => void handleStop: (id: string) => void handleDelete: (id: string) => void handleBulkDelete: (ids: string[]) => void handleBulkStart: (ids: string[]) => void handleBulkStop: (ids: string[]) => void handleBulkArchive: (ids: string[]) => void handleArchive: (id: string) => void handleVnc: (id: string) => void getWebTerminalUrl: (id: string) => Promise handleCreateSshAccess: (id: string) => void handleRevokeSshAccess: (id: string) => void handleRefresh: () => void isRefreshing?: boolean onRowClick?: (sandbox: Sandbox) => void pagination: { pageIndex: number pageSize: number } pageCount: number totalItems: number onPaginationChange: (pagination: { pageIndex: number; pageSize: number }) => void sorting: SandboxSorting onSortingChange: (sorting: SandboxSorting) => void filters: SandboxFilters onFiltersChange: (filters: SandboxFilters) => void handleRecover: (id: string) => void handleScreenRecordings: (id: string) => void } export interface SandboxTableActionsProps { sandbox: Sandbox writePermitted: boolean deletePermitted: boolean isLoading: boolean onStart: (id: string) => void onStop: (id: string) => void onDelete: (id: string) => void onArchive: (id: string) => void onVnc: (id: string) => void onOpenWebTerminal: (id: string) => void onCreateSshAccess: (id: string) => void onRevokeSshAccess: (id: string) => void onRecover: (id: string) => void onScreenRecordings: (id: string) => void } export interface SandboxTableHeaderProps { table: Table regionOptions: FacetedFilterOption[] regionsDataIsLoading: boolean snapshots: SnapshotDto[] snapshotsDataIsLoading: boolean snapshotsDataHasMore?: boolean onChangeSnapshotSearchValue: (name?: string) => void onRefresh: () => void isRefreshing?: boolean } export interface FacetedFilterOption { label: string value: string | SandboxState icon?: any } export const convertTableSortingToApiSorting = (sorting: SortingState): SandboxSorting => { if (!sorting.length) { return DEFAULT_SANDBOX_SORTING } const sort = sorting[0] let field: ListSandboxesPaginatedSortEnum switch (sort.id) { case 'name': field = ListSandboxesPaginatedSortEnum.NAME break case 'state': field = ListSandboxesPaginatedSortEnum.STATE break case 'snapshot': field = ListSandboxesPaginatedSortEnum.SNAPSHOT break case 'region': case 'target': field = ListSandboxesPaginatedSortEnum.REGION break case 'lastEvent': case 'updatedAt': field = ListSandboxesPaginatedSortEnum.UPDATED_AT break case 'createdAt': default: field = ListSandboxesPaginatedSortEnum.CREATED_AT break } return { field, direction: sort.desc ? ListSandboxesPaginatedOrderEnum.DESC : ListSandboxesPaginatedOrderEnum.ASC, } } export const convertTableFiltersToApiFilters = (columnFilters: ColumnFiltersState): SandboxFilters => { const filters: SandboxFilters = {} columnFilters.forEach((filter) => { switch (filter.id) { case 'name': if (filter.value && typeof filter.value === 'string') { filters.idOrName = filter.value } break case 'state': if (Array.isArray(filter.value) && filter.value.length > 0) { filters.states = filter.value as ListSandboxesPaginatedStatesEnum[] } break case 'snapshot': if (Array.isArray(filter.value) && filter.value.length > 0) { filters.snapshots = filter.value as string[] } break case 'region': case 'target': if (Array.isArray(filter.value) && filter.value.length > 0) { filters.regions = filter.value as string[] } break case 'labels': if (Array.isArray(filter.value) && filter.value.length > 0) { const labelObj: Record = {} filter.value.forEach((label: string) => { const [key, value] = label.split(': ') if (key && value) { labelObj[key] = value } }) if (Object.keys(labelObj).length > 0) { filters.labels = labelObj } } break case 'resources': if (filter.value && typeof filter.value === 'object') { const resourceValue = filter.value as { cpu?: { min?: number; max?: number } memory?: { min?: number; max?: number } disk?: { min?: number; max?: number } } if (resourceValue.cpu?.min !== undefined) { filters.minCpu = resourceValue.cpu.min } if (resourceValue.cpu?.max !== undefined) { filters.maxCpu = resourceValue.cpu.max } if (resourceValue.memory?.min !== undefined) { filters.minMemoryGiB = resourceValue.memory.min } if (resourceValue.memory?.max !== undefined) { filters.maxMemoryGiB = resourceValue.memory.max } if (resourceValue.disk?.min !== undefined) { filters.minDiskGiB = resourceValue.disk.min } if (resourceValue.disk?.max !== undefined) { filters.maxDiskGiB = resourceValue.disk.max } } break case 'lastEvent': if (Array.isArray(filter.value) && filter.value.length > 0) { const dateRange = filter.value as (Date | undefined)[] if (dateRange[0]) { filters.lastEventAfter = dateRange[0] } if (dateRange[1]) { filters.lastEventBefore = dateRange[1] } } break } }) return filters } export const convertApiSortingToTableSorting = (sorting: SandboxSorting): SortingState => { if (!sorting.field || !sorting.direction) { return [{ id: 'lastEvent', desc: true }] } let id: string switch (sorting.field) { case ListSandboxesPaginatedSortEnum.NAME: id = 'name' break case ListSandboxesPaginatedSortEnum.STATE: id = 'state' break case ListSandboxesPaginatedSortEnum.SNAPSHOT: id = 'snapshot' break case ListSandboxesPaginatedSortEnum.REGION: id = 'region' break case ListSandboxesPaginatedSortEnum.UPDATED_AT: id = 'lastEvent' break case ListSandboxesPaginatedSortEnum.CREATED_AT: default: id = 'createdAt' break } return [{ id, desc: sorting.direction === ListSandboxesPaginatedOrderEnum.DESC }] } export const convertApiFiltersToTableFilters = (filters: SandboxFilters): ColumnFiltersState => { const columnFilters: ColumnFiltersState = [] if (filters.idOrName) { columnFilters.push({ id: 'name', value: filters.idOrName }) } if (filters.states && filters.states.length > 0) { columnFilters.push({ id: 'state', value: filters.states }) } if (filters.snapshots && filters.snapshots.length > 0) { columnFilters.push({ id: 'snapshot', value: filters.snapshots }) } if (filters.regions && filters.regions.length > 0) { columnFilters.push({ id: 'region', value: filters.regions }) } if (filters.labels && Object.keys(filters.labels).length > 0) { const labelArray = Object.entries(filters.labels).map(([key, value]) => `${key}: ${value}`) columnFilters.push({ id: 'labels', value: labelArray }) } // Convert resource filters back to table format const resourceValue: { cpu?: { min?: number; max?: number } memory?: { min?: number; max?: number } disk?: { min?: number; max?: number } } = {} if (filters.minCpu !== undefined || filters.maxCpu !== undefined) { resourceValue.cpu = {} if (filters.minCpu !== undefined) resourceValue.cpu.min = filters.minCpu if (filters.maxCpu !== undefined) resourceValue.cpu.max = filters.maxCpu } if (filters.minMemoryGiB !== undefined || filters.maxMemoryGiB !== undefined) { resourceValue.memory = {} if (filters.minMemoryGiB !== undefined) resourceValue.memory.min = filters.minMemoryGiB if (filters.maxMemoryGiB !== undefined) resourceValue.memory.max = filters.maxMemoryGiB } if (filters.minDiskGiB !== undefined || filters.maxDiskGiB !== undefined) { resourceValue.disk = {} if (filters.minDiskGiB !== undefined) resourceValue.disk.min = filters.minDiskGiB if (filters.maxDiskGiB !== undefined) resourceValue.disk.max = filters.maxDiskGiB } if (Object.keys(resourceValue).length > 0) { columnFilters.push({ id: 'resources', value: resourceValue }) } // Convert date range filters back to table format if (filters.lastEventAfter || filters.lastEventBefore) { const dateRange: (Date | undefined)[] = [undefined, undefined] if (filters.lastEventAfter) dateRange[0] = filters.lastEventAfter if (filters.lastEventBefore) dateRange[1] = filters.lastEventBefore columnFilters.push({ id: 'lastEvent', value: dateRange }) } return columnFilters } ================================================ FILE: apps/dashboard/src/components/SandboxTable/useSandboxCommands.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { pluralize } from '@/lib/utils' import { BulkActionCounts } from '@/lib/utils/sandbox' import { ArchiveIcon, CheckSquare2Icon, MinusSquareIcon, PlayIcon, SquareIcon, TrashIcon } from 'lucide-react' import { useMemo } from 'react' import { CommandConfig, useRegisterCommands } from '../CommandPalette' interface UseSandboxCommandsProps { writePermitted: boolean deletePermitted: boolean selectedCount: number totalCount: number selectableCount: number toggleAllRowsSelected: (selected: boolean) => void bulkActionCounts: BulkActionCounts onDelete: () => void onStart: () => void onStop: () => void onArchive: () => void } export function useSandboxCommands({ writePermitted, deletePermitted, selectedCount, selectableCount, totalCount, toggleAllRowsSelected, bulkActionCounts, onDelete, onStart, onStop, onArchive, }: UseSandboxCommandsProps) { const rootCommands: CommandConfig[] = useMemo(() => { const commands: CommandConfig[] = [] if (selectableCount !== selectedCount) { commands.push({ id: 'select-all-sandboxes', label: 'Select All Sandboxes', icon: , onSelect: () => toggleAllRowsSelected(true), chainable: true, }) } if (selectedCount > 0) { commands.push({ id: 'deselect-all-sandboxes', label: 'Deselect All Sandboxes', icon: , onSelect: () => toggleAllRowsSelected(false), chainable: true, }) } if (writePermitted && bulkActionCounts.startable > 0) { commands.push({ id: 'start-sandboxes', label: `Start ${pluralize(bulkActionCounts.startable, 'Sandbox', 'Sandboxes')}`, icon: , onSelect: onStart, }) } if (writePermitted && bulkActionCounts.stoppable > 0) { commands.push({ id: 'stop-sandboxes', label: `Stop ${pluralize(bulkActionCounts.stoppable, 'Sandbox', 'Sandboxes')}`, icon: , onSelect: onStop, }) } if (writePermitted && bulkActionCounts.archivable > 0) { commands.push({ id: 'archive-sandboxes', label: `Archive ${pluralize(bulkActionCounts.archivable, 'Sandbox', 'Sandboxes')}`, icon: , onSelect: onArchive, }) } if (deletePermitted && bulkActionCounts.deletable > 0) { commands.push({ id: 'delete-sandboxes', label: `Delete ${pluralize(bulkActionCounts.deletable, 'Sandbox', 'Sandboxes')}`, icon: , onSelect: onDelete, }) } return commands }, [ selectedCount, selectableCount, toggleAllRowsSelected, writePermitted, deletePermitted, bulkActionCounts, onDelete, onStart, onStop, onArchive, ]) useRegisterCommands(rootCommands, { groupId: 'sandbox-actions', groupLabel: 'Sandbox actions', groupOrder: 0 }) } ================================================ FILE: apps/dashboard/src/components/SandboxTable/useSandboxTable.ts ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Sandbox, Region } from '@daytonaio/api-client' import { useReactTable, getCoreRowModel, getFacetedRowModel, getFacetedUniqueValues, getPaginationRowModel, VisibilityState, } from '@tanstack/react-table' import { useMemo, useState, useEffect } from 'react' import { FacetedFilterOption } from './types' import { getColumns } from './columns' import { convertApiSortingToTableSorting, convertApiFiltersToTableFilters, convertTableSortingToApiSorting, convertTableFiltersToApiFilters, } from './types' import { SandboxFilters, SandboxSorting } from '@/hooks/useSandboxes' import { LocalStorageKey } from '@/enums/LocalStorageKey' import { getLocalStorageItem, setLocalStorageItem } from '@/lib/local-storage' import { getRegionFullDisplayName } from '@/lib/utils' interface UseSandboxTableProps { data: Sandbox[] sandboxIsLoading: Record writePermitted: boolean deletePermitted: boolean handleStart: (id: string) => void handleStop: (id: string) => void handleDelete: (id: string) => void handleArchive: (id: string) => void handleVnc: (id: string) => void getWebTerminalUrl: (id: string) => Promise handleCreateSshAccess: (id: string) => void handleRevokeSshAccess: (id: string) => void handleScreenRecordings: (id: string) => void pagination: { pageIndex: number pageSize: number } pageCount: number onPaginationChange: (pagination: { pageIndex: number; pageSize: number }) => void sorting: SandboxSorting onSortingChange: (sorting: SandboxSorting) => void filters: SandboxFilters onFiltersChange: (filters: SandboxFilters) => void regionsData: Region[] handleRecover: (id: string) => void getRegionName: (regionId: string) => string | undefined } export function useSandboxTable({ data, sandboxIsLoading, writePermitted, deletePermitted, handleStart, handleStop, handleDelete, handleArchive, handleVnc, getWebTerminalUrl, handleCreateSshAccess, handleRevokeSshAccess, handleScreenRecordings, pagination, pageCount, onPaginationChange, sorting, onSortingChange, filters, onFiltersChange, regionsData, handleRecover, getRegionName, }: UseSandboxTableProps) { // Column visibility state management with persistence const [columnVisibility, setColumnVisibility] = useState(() => { const saved = getLocalStorageItem(LocalStorageKey.SandboxTableColumnVisibility) if (saved) { try { return JSON.parse(saved) } catch { return { id: false, labels: false } } } return { id: false, labels: false } }) useEffect(() => { setLocalStorageItem(LocalStorageKey.SandboxTableColumnVisibility, JSON.stringify(columnVisibility)) }, [columnVisibility]) // Convert API sorting and filters to table format for internal use const tableSorting = useMemo(() => convertApiSortingToTableSorting(sorting), [sorting]) const tableFilters = useMemo(() => convertApiFiltersToTableFilters(filters), [filters]) const regionOptions: FacetedFilterOption[] = useMemo(() => { return regionsData.map((region) => ({ label: getRegionFullDisplayName(region), value: region.id, })) }, [regionsData]) const columns = useMemo( () => getColumns({ handleStart, handleStop, handleDelete, handleArchive, handleVnc, getWebTerminalUrl, sandboxIsLoading, writePermitted, deletePermitted, handleCreateSshAccess, handleRevokeSshAccess, handleRecover, getRegionName, handleScreenRecordings, }), [ handleStart, handleStop, handleDelete, handleArchive, handleVnc, getWebTerminalUrl, sandboxIsLoading, writePermitted, deletePermitted, handleCreateSshAccess, handleRevokeSshAccess, handleRecover, getRegionName, handleScreenRecordings, ], ) const table = useReactTable({ data, columns, manualFiltering: true, onColumnFiltersChange: (updater) => { const newTableFilters = typeof updater === 'function' ? updater(table.getState().columnFilters) : updater const newApiFilters = convertTableFiltersToApiFilters(newTableFilters) onFiltersChange(newApiFilters) }, getCoreRowModel: getCoreRowModel(), manualSorting: true, onSortingChange: (updater) => { const newTableSorting = typeof updater === 'function' ? updater(table.getState().sorting) : updater const newApiSorting = convertTableSortingToApiSorting(newTableSorting) onSortingChange(newApiSorting) }, getFacetedRowModel: getFacetedRowModel(), getFacetedUniqueValues: getFacetedUniqueValues(), manualPagination: true, pageCount: pageCount, onPaginationChange: (updater) => { const newPagination = typeof updater === 'function' ? updater(table.getState().pagination) : updater onPaginationChange(newPagination) }, getPaginationRowModel: getPaginationRowModel(), state: { sorting: tableSorting, columnFilters: tableFilters, columnVisibility, pagination: { pageIndex: pagination.pageIndex, pageSize: pagination.pageSize, }, }, onColumnVisibilityChange: setColumnVisibility, defaultColumn: { size: 100, }, enableRowSelection: deletePermitted, getRowId: (row) => row.id, }) return { table, regionOptions, } } ================================================ FILE: apps/dashboard/src/components/SelectionToast.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Button } from '@/components/ui/button' import { Separator } from '@/components/ui/separator' import { cn, pluralize } from '@/lib/utils' import { CommandIcon, XIcon } from 'lucide-react' import { motion } from 'motion/react' export function SelectionToast({ className, selectedCount, onClearSelection, onActionClick, }: { className?: string selectedCount: number onActionClick: () => void onClearSelection: () => void }) { return (
{pluralize(selectedCount, 'item', 'items')} selected
) } ================================================ FILE: apps/dashboard/src/components/Sidebar.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Logo, LogoText } from '@/assets/Logo' import { OrganizationPicker } from '@/components/Organizations/OrganizationPicker' import { Sidebar as SidebarComponent, SidebarContent, SidebarFooter, SidebarGroup, SidebarGroupContent, SidebarHeader, SidebarMenu, SidebarMenuButton, SidebarMenuItem, SidebarSeparator, SidebarTrigger, useSidebar, } from '@/components/ui/sidebar' import { DAYTONA_DOCS_URL, DAYTONA_SLACK_URL } from '@/constants/ExternalLinks' import { useTheme } from '@/contexts/ThemeContext' import { FeatureFlags } from '@/enums/FeatureFlags' import { RoutePath } from '@/enums/RoutePath' import { useWebhookAppPortalAccessQuery } from '@/hooks/queries/useWebhookAppPortalAccessQuery' import { useConfig } from '@/hooks/useConfig' import { useSelectedOrganization } from '@/hooks/useSelectedOrganization' import { useUserOrganizationInvitations } from '@/hooks/useUserOrganizationInvitations' import { useWebhooks } from '@/hooks/useWebhooks' import { cn, getMetaKey } from '@/lib/utils' import { usePylon, usePylonCommands } from '@/vendor/pylon' import { OrganizationRolePermissionsEnum, OrganizationUserRoleEnum } from '@daytonaio/api-client' import { ArrowRightIcon, BookOpen, Box, ChartColumn, ChevronsUpDown, Container, CreditCard, FlaskConical, HardDrive, Joystick, KeyRound, LifeBuoyIcon, ListChecks, LockKeyhole, LogOut, Mail, MapPinned, MoonIcon, PackageOpen, SearchIcon, Server, Settings, Slack, SquareUserRound, SunIcon, TextSearch, Users, } from 'lucide-react' import { useFeatureFlagEnabled, usePostHog } from 'posthog-js/react' import React, { useMemo } from 'react' import { useAuth } from 'react-oidc-context' import { Link, useLocation, useNavigate } from 'react-router-dom' import { CommandConfig, useCommandPaletteActions, useRegisterCommands } from './CommandPalette' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuSeparator, DropdownMenuTrigger, } from './ui/dropdown-menu' import { Kbd } from './ui/kbd' import { ScrollArea } from './ui/scroll-area' interface SidebarProps { isBannerVisible: boolean billingEnabled: boolean version: string } interface SidebarItem { icon: React.ReactElement label: string path: RoutePath | string onClick?: () => void } const useNavCommands = (items: { label: string; path: RoutePath | string; onClick?: () => void }[]) => { const { pathname } = useLocation() const navigate = useNavigate() const navCommands: CommandConfig[] = useMemo( () => items .filter((item) => item.path !== pathname) .map((item) => ({ id: `nav-${item.path}`, label: `Go to ${item.label}`, icon: , onSelect: () => navigate(item.path), })), [pathname, navigate, items], ) useRegisterCommands(navCommands, { groupId: 'navigation', groupLabel: 'Navigation', groupOrder: 1 }) } export function Sidebar({ isBannerVisible, billingEnabled, version }: SidebarProps) { const posthog = usePostHog() const config = useConfig() const { theme, setTheme } = useTheme() const { user, signoutRedirect } = useAuth() const { pathname } = useLocation() const sidebar = useSidebar() const { selectedOrganization, authenticatedUserOrganizationMember, authenticatedUserHasPermission } = useSelectedOrganization() const { count: organizationInvitationsCount } = useUserOrganizationInvitations() const { isInitialized: webhooksInitialized } = useWebhooks() const webhooksAccess = useWebhookAppPortalAccessQuery(selectedOrganization?.id) const orgInfraEnabled = useFeatureFlagEnabled(FeatureFlags.ORGANIZATION_INFRASTRUCTURE) const organizationExperimentsEnabled = useFeatureFlagEnabled(FeatureFlags.ORGANIZATION_EXPERIMENTS) const playgroundEnabled = useFeatureFlagEnabled(FeatureFlags.DASHBOARD_PLAYGROUND) const webhooksEnabled = useFeatureFlagEnabled(FeatureFlags.DASHBOARD_WEBHOOKS) const sidebarItems = useMemo(() => { const arr: SidebarItem[] = [ { icon: , label: 'Sandboxes', path: RoutePath.SANDBOXES, }, { icon: , label: 'Snapshots', path: RoutePath.SNAPSHOTS, }, { icon: , label: 'Registries', path: RoutePath.REGISTRIES, }, ] if (authenticatedUserHasPermission(OrganizationRolePermissionsEnum.READ_VOLUMES)) { arr.push({ icon: , label: 'Volumes', path: RoutePath.VOLUMES, }) } if (authenticatedUserHasPermission(OrganizationRolePermissionsEnum.READ_AUDIT_LOGS)) { arr.push({ icon: , label: 'Audit Logs', path: RoutePath.AUDIT_LOGS, }) } return arr }, [authenticatedUserHasPermission]) const settingsItems = useMemo(() => { const arr: SidebarItem[] = [ { icon: , label: 'Settings', path: RoutePath.SETTINGS, }, { icon: , label: 'API Keys', path: RoutePath.KEYS }, ] // Add Webhooks link if webhooks are initialized if (webhooksInitialized) { if (webhooksEnabled) { arr.push({ icon: , label: 'Webhooks', path: RoutePath.WEBHOOKS, }) } else { arr.push({ icon: , label: 'Webhooks', path: '#webhooks' as any, // This will be handled by onClick onClick: () => { window.open(webhooksAccess.data?.url, '_blank', 'noopener,noreferrer') }, }) } } if (authenticatedUserOrganizationMember?.role === OrganizationUserRoleEnum.OWNER) { arr.push({ icon: , label: 'Limits', path: RoutePath.LIMITS, }) } if (!selectedOrganization?.personal) { arr.push({ icon: , label: 'Members', path: RoutePath.MEMBERS, }) // TODO: uncomment when we allow creating custom roles // if (authenticatedUserOrganizationMember?.role === OrganizationUserRoleEnum.OWNER) { // arr.push({ icon: , label: 'Roles', path: RoutePath.ROLES }) // } } return arr }, [ authenticatedUserOrganizationMember?.role, selectedOrganization?.personal, webhooksInitialized, webhooksAccess.data?.url, webhooksEnabled, ]) const experimentalItems = useMemo(() => { const arr: SidebarItem[] = [] if ( organizationExperimentsEnabled && authenticatedUserOrganizationMember?.role === OrganizationUserRoleEnum.OWNER ) { arr.push({ icon: , label: 'Experimental', path: RoutePath.EXPERIMENTAL, }) } return arr }, [organizationExperimentsEnabled, authenticatedUserOrganizationMember?.role]) const billingItems = useMemo(() => { if (!billingEnabled || authenticatedUserOrganizationMember?.role !== OrganizationUserRoleEnum.OWNER) { return [] } return [ { icon: , label: 'Spending', path: RoutePath.BILLING_SPENDING, }, { icon: , label: 'Wallet', path: RoutePath.BILLING_WALLET, }, ] }, [billingEnabled, authenticatedUserOrganizationMember?.role]) const infrastructureItems = useMemo(() => { if (!orgInfraEnabled) { return [] } const arr = [ { icon: , label: 'Regions', path: RoutePath.REGIONS, }, ] if (authenticatedUserHasPermission(OrganizationRolePermissionsEnum.READ_RUNNERS)) { arr.push({ icon: , label: 'Runners', path: RoutePath.RUNNERS, }) } return arr }, [authenticatedUserHasPermission, orgInfraEnabled]) const handleSignOut = () => { posthog?.reset() signoutRedirect() } const miscItems = useMemo(() => { if (!playgroundEnabled) { return [] } return [ playgroundEnabled && { icon: , label: 'Playground', path: RoutePath.PLAYGROUND, }, ] }, [playgroundEnabled]) const sidebarGroups: { label: string; items: SidebarItem[] }[] = useMemo(() => { return [ { label: 'Sandboxes', items: sidebarItems }, { label: 'Misc', items: miscItems, }, { label: 'Settings', items: settingsItems }, { label: 'Billing', items: billingItems }, { label: 'Infrastructure', items: infrastructureItems }, { label: 'Experimental', items: experimentalItems }, ].filter((group) => group.items.length > 0) }, [sidebarItems, settingsItems, billingItems, infrastructureItems, experimentalItems, miscItems]) const commandItems = useMemo(() => { return sidebarGroups .flatMap((group) => group.items) .concat( { path: RoutePath.ACCOUNT_SETTINGS, label: 'Account Settings', icon: , }, { path: RoutePath.USER_INVITATIONS, label: 'Invitations', icon: , }, { path: RoutePath.ONBOARDING, label: 'Onboarding', icon: , }, ) }, [sidebarGroups]) const { unreadCount: pylonUnreadCount, toggle: togglePylon, isEnabled: pylonEnabled } = usePylon() usePylonCommands() const commandPaletteActions = useCommandPaletteActions() useNavCommands(commandItems) const metaKey = getMetaKey() return (
commandPaletteActions.setIsOpen(true)} > Search {metaKey} K
{sidebarGroups.map((group, i) => ( {i > 0 && } {group.items.map((item) => ( {item.onClick ? ( ) : ( {item.icon} {item.label} )} ))} ))} {pylonEnabled && ( { togglePylon() }} > Support {pylonUnreadCount > 0 && (
)} )}
Slack Docs {user?.profile.picture ? ( {user.profile.name ) : ( )}
{user?.profile.name || ''} {user?.profile.email || ''}
Account Settings setTheme(theme === 'dark' ? 'light' : 'dark')} > {theme === 'dark' ? : } {theme === 'dark' ? 'Light mode' : 'Dark mode'} Invitations {organizationInvitationsCount > 0 && ( {organizationInvitationsCount} )} Onboarding Sign out
{sidebar.open && Version {version}}
) } ================================================ FILE: apps/dashboard/src/components/SortIcon.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { AnimatePresence, motion } from 'framer-motion' import { ArrowDownIcon, ArrowUpDownIcon, ArrowUpIcon } from 'lucide-react' interface Props { sort: 'asc' | 'desc' | null hideDefaultState?: boolean className?: string } const motionProps = { initial: { opacity: 0, y: 6 }, animate: { opacity: 1, y: 0 }, exit: { opacity: 0, y: -6 }, transition: { duration: 0.15 }, } const PlaceholderIcon = () => export const SortOrderIcon = ({ hideDefaultState = false, sort, className }: Props) => { const Icon = sort === 'asc' ? ArrowUpIcon : sort === 'desc' ? ArrowDownIcon : hideDefaultState ? PlaceholderIcon : ArrowUpDownIcon return ( ) } ================================================ FILE: apps/dashboard/src/components/TableColumnVisibilityToggle.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import type { Column } from '@tanstack/react-table' import { Command, CommandCheckboxItem, CommandGroup, CommandList } from './ui/command' interface TableColumnVisibilityToggleProps { columns: Column[] getColumnLabel: (id: string) => string } export function TableColumnVisibilityToggle({ columns, getColumnLabel }: TableColumnVisibilityToggleProps) { return ( {columns .filter((column) => column.getCanHide()) .map((column) => { return ( column.toggleVisibility()} > {getColumnLabel(column.id)} ) })} ) } ================================================ FILE: apps/dashboard/src/components/TableEmptyState.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { TableRow, TableCell } from './ui/table' interface TableEmptyStateProps { /** * The number of columns in the table (used for colSpan) */ colSpan: number /** * The message to display when no data is found */ message: string /** * Optional icon to display above the message */ icon?: React.ReactNode /** * Optional description text to display below the main message */ description?: React.ReactNode /** * Additional CSS classes for the container */ className?: string } export function TableEmptyState({ colSpan, message, icon, description, className = '' }: TableEmptyStateProps) { return (
{icon &&
{icon}
}

{message}

{description &&
{description}
}
) } ================================================ FILE: apps/dashboard/src/components/TierComparisonTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ComparisonSection, ComparisonTable } from './ComparisonTable' import { OrganizationTier, Tier } from '@/billing-api' import { TIER_RATE_LIMITS } from '@/constants/limits' import { Skeleton } from './ui/skeleton' export function TierComparisonTableSkeleton() { return (
{Array.from({ length: 5 }).map((_, index) => ( ))}
) } export function TierComparisonTable({ tiers, currentTier, className, }: { tiers: Tier[] currentTier?: OrganizationTier | null className?: string }) { return ( ) } function buildTierComparisonTableData(tiers: Tier[]): ComparisonSection[] { return [ { id: 'tiers', title: 'Tiers', rows: tiers .map((tier) => { return { label: {tier.tier}, values: [ `${tier.tierLimit.concurrentCPU}`, `${tier.tierLimit.concurrentRAMGiB}`, `${tier.tierLimit.concurrentDiskGiB}`, `${TIER_RATE_LIMITS[tier.tier]?.authenticatedRateLimit.toLocaleString() || '-'}`, `${TIER_RATE_LIMITS[tier.tier]?.sandboxCreateRateLimit.toLocaleString() || '-'}`, `${TIER_RATE_LIMITS[tier.tier]?.sandboxLifecycleRateLimit.toLocaleString() || '-'}`, ], } }) .concat({ label: Enterprise, values: Array(6).fill('Custom'), }), }, ] } ================================================ FILE: apps/dashboard/src/components/TierUpgradeCard.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { OrganizationTier, Tier } from '@/billing-api' import { Button } from '@/components/ui/button' import { Card, CardContent } from '@/components/ui/card' import { RoutePath } from '@/enums/RoutePath' import { useDowngradeTierMutation } from '@/hooks/mutations/useDowngradeTierMutation' import { useUpgradeTierMutation } from '@/hooks/mutations/useUpgradeTierMutation' import { handleApiError } from '@/lib/error-handling' import { cn } from '@/lib/utils' import { Organization } from '@daytonaio/api-client/src' import { CheckIcon, ExternalLinkIcon, Loader2 } from 'lucide-react' import { useMemo } from 'react' import { Link } from 'react-router-dom' import { toast } from 'sonner' interface Props { tiers: Tier[] organizationTier?: OrganizationTier | null organization: Organization requirementsState: { emailVerified: boolean creditCardLinked: boolean } } export function TierUpgradeCard({ tiers, organizationTier, requirementsState, organization }: Props) { const { currentTier, previousTier, nextTier } = useMemo(() => { const targetTiers: { currentTier?: Tier; previousTier?: Tier; nextTier?: Tier } = {} for (const tier of tiers) { if (tier.tier === organizationTier?.tier) { targetTiers.currentTier = tier } if (tier.tier < (organizationTier?.tier || 0)) { targetTiers.previousTier = tier } if (tier.tier > (organizationTier?.tier || 0) && !targetTiers.nextTier) { targetTiers.nextTier = tier } } return targetTiers }, [tiers, organizationTier]) const requirements = getTierRequirementItems(requirementsState, organizationTier, nextTier) const canUpgrade = requirements.length > 0 && requirements.every((requirement) => requirement.isChecked) const downgradeTier = useDowngradeTierMutation() const upgradeTier = useUpgradeTierMutation() const handleUpgradeTier = async (tier: number) => { if (!organization) { return } try { await upgradeTier.mutateAsync({ organizationId: organization.id, tier }) toast.success('Tier upgraded successfully') } catch (error) { handleApiError(error, 'Failed to upgrade organization tier') } } const handleDowngradeTier = async (tier: number) => { if (!organization) { return } try { await downgradeTier.mutateAsync({ organizationId: organization.id, tier }) toast.success('Tier downgraded successfully') } catch (error) { handleApiError(error, 'Failed to downgrade organization tier') } } return ( {nextTier && (
Upgrade to Tier {nextTier?.tier}
Unlock more resources and higher rate limits by completing the verification steps.
Requirements
    {requirements.map((requirement) => (
  • ))}
{requirements.length && !canUpgrade && (
Please complete all requirements to upgrade.
)}
)}
Enterprise
Contact sales at{' '} sales@daytona.io .
{organizationTier && (
Current Tier: {organizationTier?.tier}
{organizationTier.expiresAt && (
Tier expires on{' '} {organizationTier.expiresAt.toLocaleDateString('en-US', { month: 'short', day: 'numeric', })} .
)} {currentTier && currentTier?.topUpIntervalDays > 0 && (
Automatically charged {getDollarAmount(currentTier.minTopUpAmountCents)} every{' '} {currentTier.topUpIntervalDays} days.
)}
{previousTier && ( )}
)}
) } function getDollarAmount(cents: number) { return new Intl.NumberFormat('en-US', { style: 'currency', currency: 'USD', minimumFractionDigits: 0, maximumFractionDigits: 0, }).format(cents / 100) } function checkTopUpRequirementStatus(currentTier: OrganizationTier, nextTier: Tier) { if (!currentTier) { return false } if (currentTier.largestSuccessfulPaymentCents < nextTier.minTopUpAmountCents) { return false } if (nextTier.topUpIntervalDays && currentTier.largestSuccessfulPaymentDate) { const diffTime = Math.abs(Date.now() - (currentTier.largestSuccessfulPaymentDate?.getTime() || 0)) const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24)) return diffDays < nextTier.topUpIntervalDays } return true } function getTierRequirementItems( requirementsState: { emailVerified: boolean creditCardLinked: boolean }, currentTier?: OrganizationTier | null, tier?: Tier | null, ) { if (!tier || !currentTier) { return [] } if (tier.tier < 1 || tier.tier > 4) { return [] } const items = [] if (tier.tier === 1) { items.push({ label: 'Email verification', isChecked: requirementsState.emailVerified, link: RoutePath.ACCOUNT_SETTINGS, }) } if (tier.tier === 2) { items.push({ label: 'Credit card linked', isChecked: requirementsState.creditCardLinked, link: RoutePath.BILLING_WALLET, }) } if (tier.minTopUpAmountCents) { items.push({ label: `Top up ${getDollarAmount(tier.minTopUpAmountCents)} (${tier.topUpIntervalDays ? `every ${tier.topUpIntervalDays} days` : 'one time'})`, isChecked: checkTopUpRequirementStatus(currentTier, tier), link: RoutePath.BILLING_WALLET, }) } return items } interface TierRequirementItemProps { checked: boolean label: string link?: string externalLink?: boolean } function RequirementIcon({ checked, label }: { checked: boolean; label: string }) { return (
) } function TierRequirementItem({ checked, label, link, externalLink }: TierRequirementItemProps) { const content = ( {label} {!checked && externalLink && ( )} ) if (!checked && link) { return (
{content}
) } return
{content}
} ================================================ FILE: apps/dashboard/src/components/TimestampTooltip.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { format, formatDistanceToNow } from 'date-fns' import { ReactNode } from 'react' import { Separator } from './ui/separator' import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip' interface TimestampTooltipProps { timestamp?: string children: ReactNode time?: boolean } export const TimestampTooltip = ({ children, timestamp, time = true }: TimestampTooltipProps) => { if (!timestamp) { return children } const date = new Date(timestamp) const relativeTimeString = formatDistanceToNow(date, { addSuffix: true }) const dateFormat = 'MMM d, yyyy' const timeFormat = 'HH:mm:ss' const utcDate = new Date( date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate(), date.getUTCHours(), date.getUTCMinutes(), date.getUTCSeconds(), ) const utcDateFormatted = format(utcDate, dateFormat) const utcTimeFormatted = format(utcDate, timeFormat) const localDateFormatted = format(date, dateFormat) const localTimeFormatted = format(date, timeFormat) const timezoneFormatter = new Intl.DateTimeFormat('en-US', { timeZoneName: 'short', }) const timezoneParts = timezoneFormatter.formatToParts(date) const localTimezone = timezoneParts.find((part) => part.type === 'timeZoneName')?.value || 'Local' return ( {children}
{relativeTimeString}
{time && } {time && }
[UTC] {utcDateFormatted}{utcTimeFormatted}
[{localTimezone}] {localDateFormatted}{localTimeFormatted}
) } ================================================ FILE: apps/dashboard/src/components/Tooltip.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { TooltipContent, TooltipTrigger, Tooltip as UiTooltip } from '@/components/ui/tooltip' import React from 'react' export function Tooltip({ label, content, side = 'top', contentClassName, }: { label: React.ReactNode content: React.ReactNode side?: 'right' | 'left' | 'top' | 'bottom' contentClassName?: string }) { return ( {label} {content} ) } ================================================ FILE: apps/dashboard/src/components/TooltipButton.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { ComponentProps, ReactNode } from 'react' import { Button } from './ui/button' import { Tooltip, TooltipContent, TooltipTrigger } from './ui/tooltip' type Props = ComponentProps & { tooltipText: string tooltipContent?: ReactNode tooltipContainer?: HTMLElement side?: ComponentProps['side'] } function TooltipButton({ tooltipText, tooltipContent, side = 'top', tooltipContainer, ref, size = 'icon-sm', ...props }: Props) { return ( {loading ? ( ) : ( )} ) } ================================================ FILE: apps/dashboard/src/components/UsageOverview.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { RegionUsageOverview } from '@daytonaio/api-client' import QuotaLine from './QuotaLine' import { Skeleton } from './ui/skeleton' export function UsageOverview({ usageOverview, className, }: { usageOverview: RegionUsageOverview className?: string }) { return (
*]:flex-1 flex-col lg:flex-row', className)}>
Compute
Memory
Storage
) } export function UsageOverviewSkeleton() { return (
) } const UsageLabel = ({ current, total, unit }: { current: number; total: number; unit: string }) => { const percentage = (current / total) * 100 const isHighUsage = percentage > 90 return ( {current} / {total} {unit} ) } ================================================ FILE: apps/dashboard/src/components/UsageOverviewIndicator.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import { RegionUsageOverview } from '@daytonaio/api-client/src' export function UsageOverviewIndicator({ usage, className, isLive, }: { usage: RegionUsageOverview className?: string isLive?: boolean }) { return (
{isLive && }
) } function ResourceLabel({ value, total, unit, name }: { value: number; total: number; unit?: string; name?: string }) { return ( {name} {value}/{total} {unit} ) } function LiveIndicatorDot({ className }: { className?: string }) { return (
) } ================================================ FILE: apps/dashboard/src/components/UserOrganizationInvitations/DeclineOrganizationInvitationDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React from 'react' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' interface DeclineOrganizationInvitationDialogProps { open: boolean onOpenChange: (open: boolean) => void onDeclineInvitation: () => Promise loading: boolean } export const DeclineOrganizationInvitationDialog: React.FC = ({ open, onOpenChange, onDeclineInvitation, loading, }) => { const handleDeclineInvitation = async () => { const success = await onDeclineInvitation() if (success) { onOpenChange(false) } } return ( Decline Invitation Are you sure you want to decline this invitation to join the organization? {loading ? ( ) : ( )} ) } ================================================ FILE: apps/dashboard/src/components/UserOrganizationInvitations/OrganizationInvitationActionDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState } from 'react' import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' import { OrganizationInvitation } from '@daytonaio/api-client' interface OrganizationInvitationActionDialogProps { invitation: OrganizationInvitation open: boolean onOpenChange: (open: boolean) => void onAccept: (invitation: OrganizationInvitation) => Promise onDecline: (invitation: OrganizationInvitation) => Promise } export const OrganizationInvitationActionDialog: React.FC = ({ invitation, open, onOpenChange, onAccept, onDecline, }) => { const [loadingAccept, setLoadingAccept] = useState(false) const [loadingDecline, setLoadingDecline] = useState(false) const handleAccept = async () => { setLoadingAccept(true) const success = await onAccept(invitation) if (success) { onOpenChange(false) } setLoadingAccept(false) } const handleDecline = async () => { setLoadingDecline(true) const success = await onDecline(invitation) if (success) { onOpenChange(false) } setLoadingDecline(false) } return ( Organization Invitation Would you like to accept or decline this invitation?
Organization: {invitation.organizationName} Invited by: {invitation.invitedBy || 'Not specified'} Expires: {new Date(invitation.expiresAt).toLocaleString('default', { year: 'numeric', month: 'numeric', day: 'numeric', hour: 'numeric', minute: '2-digit', })}
{loadingDecline ? ( ) : ( )} {loadingAccept ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/UserOrganizationInvitations/UserOrganizationInvitationTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useState } from 'react' import { Check, X } from 'lucide-react' import { ColumnDef, flexRender, getCoreRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { OrganizationInvitation } from '@daytonaio/api-client' import { Pagination } from '@/components/Pagination' import { Button } from '@/components/ui/button' import { TableHeader, TableRow, TableHead, TableBody, TableCell, Table } from '@/components/ui/table' import { DeclineOrganizationInvitationDialog } from '@/components/UserOrganizationInvitations/DeclineOrganizationInvitationDialog' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { TableEmptyState } from '../TableEmptyState' interface DataTableProps { data: OrganizationInvitation[] loadingData: boolean onAcceptInvitation: (invitation: OrganizationInvitation) => Promise onDeclineInvitation: (invitation: OrganizationInvitation) => Promise loadingInvitationAction: Record } export function UserOrganizationInvitationTable({ data, loadingData, onAcceptInvitation, onDeclineInvitation, loadingInvitationAction, }: DataTableProps) { const [sorting, setSorting] = useState([]) const [invitationToDecline, setInvitationToDecline] = useState(null) const [isDeclineDialogOpen, setIsDeclineDialogOpen] = useState(false) const handleDecline = (invitation: OrganizationInvitation) => { setInvitationToDecline(invitation) setIsDeclineDialogOpen(true) } const handleConfirmDecline = async () => { if (invitationToDecline) { const success = await onDeclineInvitation(invitationToDecline) if (success) { setInvitationToDecline(null) setIsDeclineDialogOpen(false) return success } } return false } const columns = getColumns({ onAccept: onAcceptInvitation, onDecline: handleDecline }) const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), state: { sorting, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) return ( <>
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loadingData ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( )}
{invitationToDecline && ( { setIsDeclineDialogOpen(open) if (!open) { setInvitationToDecline(null) } }} onDeclineInvitation={handleConfirmDecline} loading={loadingInvitationAction[invitationToDecline.id]} /> )} ) } const getColumns = ({ onAccept, onDecline, }: { onAccept: (invitation: OrganizationInvitation) => void onDecline: (invitation: OrganizationInvitation) => void }): ColumnDef[] => { const columns: ColumnDef[] = [ { accessorKey: 'organizationName', header: 'Organization', }, { accessorKey: 'invitedBy', header: 'Invited by', }, { accessorKey: 'expiresAt', header: 'Expires', cell: ({ row }) => { return new Date(row.original.expiresAt).toLocaleDateString() }, }, { id: 'actions', cell: ({ row }) => { return (
) }, }, ] return columns } ================================================ FILE: apps/dashboard/src/components/VerifyEmailDialog.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React from 'react' import { Button } from '@/components/ui/button' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' interface VerifyEmailDialogProps { open: boolean onOpenChange: (open: boolean) => void } export const VerifyEmailDialog: React.FC = ({ open, onOpenChange }) => { return ( Verify Your Account A verification email was sent to your registered email address. Please note that you must verify your email before you can create sandboxes or new organizations. ) } ================================================ FILE: apps/dashboard/src/components/VolumeTable.tsx ================================================ /* * Copyright 2025 Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DebouncedInput } from '@/components/DebouncedInput' import { Pagination } from '@/components/Pagination' import { Button } from '@/components/ui/button' import { Checkbox } from '@/components/ui/checkbox' import { DataTableFacetedFilter, FacetedFilterOption } from '@/components/ui/data-table-faceted-filter' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { useSelectedOrganization } from '@/hooks/useSelectedOrganization' import { getRelativeTimeString } from '@/lib/utils' import { OrganizationRolePermissionsEnum, VolumeDto, VolumeState } from '@daytonaio/api-client' import { ColumnDef, ColumnFiltersState, flexRender, getCoreRowModel, getFacetedRowModel, getFacetedUniqueValues, getFilteredRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { AlertTriangle, CheckCircle, HardDrive, Loader2, MoreHorizontal, Timer } from 'lucide-react' import { useMemo, useState } from 'react' import { TableEmptyState } from './TableEmptyState' interface VolumeTableProps { data: VolumeDto[] loading: boolean processingVolumeAction: Record onDelete: (volume: VolumeDto) => void onBulkDelete: (volumes: VolumeDto[]) => void } export function VolumeTable({ data, loading, processingVolumeAction, onDelete, onBulkDelete }: VolumeTableProps) { const { authenticatedUserHasPermission } = useSelectedOrganization() const deletePermitted = useMemo( () => authenticatedUserHasPermission(OrganizationRolePermissionsEnum.DELETE_VOLUMES), [authenticatedUserHasPermission], ) const [sorting, setSorting] = useState([]) const [columnFilters, setColumnFilters] = useState([]) const columns = getColumns({ onDelete, processingVolumeAction, deletePermitted, }) const table = useReactTable({ data, columns, onColumnFiltersChange: setColumnFilters, getCoreRowModel: getCoreRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), getFacetedRowModel: getFacetedRowModel(), getFacetedUniqueValues: getFacetedUniqueValues(), getFilteredRowModel: getFilteredRowModel(), state: { sorting, columnFilters, }, enableRowSelection: true, getRowId: (row) => row.id, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) const [bulkDeleteConfirmationOpen, setBulkDeleteConfirmationOpen] = useState(false) return (
table.getColumn('name')?.setFilterValue(value)} placeholder="Search..." className="max-w-sm mr-4" /> {table.getColumn('state') && ( )}
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( Loading... ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => ( {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

Volumes are shared, persistent directories backed by S3-compatible storage, perfect for reusing datasets, caching dependencies, or passing files across sandboxes.

Create one via the SDK or CLI.
Read the Volumes guide {' '} to learn more.

} /> )}
{table.getRowModel().rows.some((row) => row.getIsSelected()) && (

Are you sure you want to delete these Volumes?

)}
) } const getStateIcon = (state: VolumeState) => { switch (state) { case VolumeState.READY: return case VolumeState.ERROR: return default: return } } const getStateColor = (state: VolumeState) => { switch (state) { case VolumeState.READY: return 'text-green-500' case VolumeState.ERROR: return 'text-red-500' default: return 'text-gray-600 dark:text-gray-400' } } const getStateLabel = (state: VolumeState) => { return state .split('_') .map((word) => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase()) .join(' ') } const statuses: FacetedFilterOption[] = [ { label: getStateLabel(VolumeState.CREATING), value: VolumeState.CREATING, icon: Timer }, { label: getStateLabel(VolumeState.READY), value: VolumeState.READY, icon: CheckCircle }, { label: getStateLabel(VolumeState.PENDING_CREATE), value: VolumeState.PENDING_CREATE, icon: Timer }, { label: getStateLabel(VolumeState.PENDING_DELETE), value: VolumeState.PENDING_DELETE, icon: Timer }, { label: getStateLabel(VolumeState.DELETING), value: VolumeState.DELETING, icon: Timer }, { label: getStateLabel(VolumeState.DELETED), value: VolumeState.DELETED, icon: Timer }, { label: getStateLabel(VolumeState.ERROR), value: VolumeState.ERROR, icon: AlertTriangle }, ] const getColumns = ({ onDelete, processingVolumeAction, deletePermitted, }: { onDelete: (volume: VolumeDto) => void processingVolumeAction: Record deletePermitted: boolean }): ColumnDef[] => { const columns: ColumnDef[] = [ { id: 'select', header: ({ table }) => ( { for (const row of table.getRowModel().rows) { if (processingVolumeAction[row.original.id]) { row.toggleSelected(false) } else { row.toggleSelected(!!value) } } }} aria-label="Select all" className="translate-y-[2px]" /> ), cell: ({ row }) => { if (processingVolumeAction[row.original.id]) { return } return ( row.toggleSelected(!!value)} aria-label="Select row" className="translate-y-[2px]" /> ) }, enableSorting: false, enableHiding: false, }, { accessorKey: 'name', header: 'Name', cell: ({ row }) => { return
{row.original.name}
}, }, { id: 'state', header: 'State', cell: ({ row }) => { const volume = row.original const state = row.original.state const color = getStateColor(state) if (state === VolumeState.ERROR && !!volume.errorReason) { return (
{getStateIcon(state)} {getStateLabel(state)}

{volume.errorReason}

) } return (
{getStateIcon(state)} {getStateLabel(state)}
) }, accessorKey: 'state', filterFn: (row, id, value) => { return value.includes(row.getValue(id)) }, }, { accessorKey: 'createdAt', header: 'Created', cell: ({ row }) => { return getRelativeTimeString(row.original.createdAt).relativeTimeString }, }, { accessorKey: 'lastUsedAt', header: 'Last Used', cell: ({ row }) => { return getRelativeTimeString(row.original.lastUsedAt).relativeTimeString }, }, { id: 'actions', enableHiding: false, cell: ({ row }) => { if (!deletePermitted) { return null } return ( onDelete(row.original)} > Delete ) }, }, ] return columns } ================================================ FILE: apps/dashboard/src/components/Webhooks/CreateEndpointDialog.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' import { Command, CommandCheckboxItem, CommandEmpty, CommandGroup, CommandInput, CommandList, } from '@/components/ui/command' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog' import { Field, FieldError, FieldLabel } from '@/components/ui/field' import { Input } from '@/components/ui/input' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { Spinner } from '@/components/ui/spinner' import { WEBHOOK_EVENT_CATEGORIES, WEBHOOK_EVENTS } from '@/constants/webhook-events' import { handleApiError } from '@/lib/error-handling' import { cn } from '@/lib/utils' import { useForm } from '@tanstack/react-form' import { ChevronsUpDown, Plus } from 'lucide-react' import React, { useCallback, useEffect, useState } from 'react' import { toast } from 'sonner' import { useSvix } from 'svix-react' import { z } from 'zod' const formSchema = z.object({ url: z.string().min(1, 'URL is required').url('Must be a valid URL'), description: z.string(), filterTypes: z.array(z.string()).min(1, 'At least one event is required'), }) type FormValues = z.infer interface CreateEndpointDialogProps { onSuccess: () => void className?: string } export const CreateEndpointDialog: React.FC = ({ onSuccess, className }) => { const [open, setOpen] = useState(false) const [eventsPopoverOpen, setEventsPopoverOpen] = useState(false) const { svix, appId } = useSvix() const form = useForm({ defaultValues: { url: '', description: '', filterTypes: [], } as FormValues, validators: { onSubmit: formSchema, }, onSubmit: async ({ value }) => { try { await svix.endpoint.create(appId, { url: value.url.trim(), description: value.description?.trim() || undefined, filterTypes: value.filterTypes.length > 0 ? value.filterTypes : undefined, }) toast.success('Endpoint created') onSuccess() setOpen(false) } catch (error) { handleApiError(error, 'Failed to create endpoint') } }, }) const resetState = useCallback(() => { form.reset() }, [form]) useEffect(() => { if (open) { resetState() } }, [open, resetState]) const toggleEvent = (eventValue: string) => { const currentEvents = form.getFieldValue('filterTypes') if (currentEvents.includes(eventValue)) { form.setFieldValue( 'filterTypes', currentEvents.filter((e) => e !== eventValue), ) } else { form.setFieldValue('filterTypes', [...currentEvents, eventValue]) } } return ( Add Webhook Endpoint Configure a new endpoint to receive webhook events.
{ e.preventDefault() e.stopPropagation() form.handleSubmit() }} > {(field) => { const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Endpoint Name field.handleChange(e.target.value)} placeholder="My Webhook Endpoint" /> {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }} {(field) => { const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Endpoint URL field.handleChange(e.target.value)} placeholder="https://example.com/webhook" /> {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }} {(field) => { const selectedEvents = field.state.value const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Events No events found. {WEBHOOK_EVENT_CATEGORIES.map((category) => ( {WEBHOOK_EVENTS.filter((event) => event.category === category).map((event) => ( toggleEvent(event.value)} > {event.label} ))} ))} {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }}
[state.canSubmit, state.isSubmitting]} children={([canSubmit, isSubmitting]) => ( )} />
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/DeliveryStatsLine.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { cn } from '@/lib/utils' import type { EndpointStats } from 'svix' import { motion } from 'framer-motion' import React from 'react' const transition = { type: 'spring', stiffness: 60, damping: 15, mass: 1, } as const const SEGMENTS = [ { key: 'success', label: 'Success', color: 'bg-green-500', dotColor: 'bg-green-500' }, { key: 'fail', label: 'Failed', color: 'bg-red-500', dotColor: 'bg-red-500' }, { key: 'pending', label: 'Pending', color: 'bg-muted-foreground/50', dotColor: 'bg-muted-foreground/50' }, { key: 'sending', label: 'Sending', color: 'bg-white', dotColor: 'bg-white border border-border' }, ] as const interface DeliveryStatsLineProps { stats: EndpointStats className?: string } const DeliveryStatsLine: React.FC = ({ stats, className }) => { const total = stats.success + stats.fail + stats.pending + stats.sending if (total === 0) { return (
) } return (
{SEGMENTS.map(({ key, color }) => { const value = stats[key] if (value === 0) return null const pct = (value / total) * 100 return ( ) })}
) } function Legend({ stats, total }: { stats: EndpointStats; total: number }) { return (
{SEGMENTS.map(({ key, label, dotColor }) => (
{label} {stats[key]} {total > 0 && ({Math.round((stats[key] / total) * 100)}%)}
))}
) } export default DeliveryStatsLine ================================================ FILE: apps/dashboard/src/components/Webhooks/EditEndpointDialog.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' import { Command, CommandCheckboxItem, CommandEmpty, CommandGroup, CommandInput, CommandList, } from '@/components/ui/command' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Field, FieldError, FieldLabel } from '@/components/ui/field' import { Input } from '@/components/ui/input' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover' import { Spinner } from '@/components/ui/spinner' import { WEBHOOK_EVENT_CATEGORIES, WEBHOOK_EVENTS } from '@/constants/webhook-events' import { useUpdateWebhookEndpointMutation } from '@/hooks/mutations/useUpdateWebhookEndpointMutation' import { handleApiError } from '@/lib/error-handling' import { useForm } from '@tanstack/react-form' import { ChevronsUpDown } from 'lucide-react' import React, { useEffect, useState } from 'react' import { toast } from 'sonner' import { EndpointOut } from 'svix' import { z } from 'zod' const formSchema = z.object({ url: z.string().min(1, 'URL is required').url('Must be a valid URL'), description: z.string().min(1, 'Name is required'), filterTypes: z.array(z.string()).min(1, 'At least one event is required'), }) type FormValues = z.infer interface EditEndpointDialogProps { endpoint: EndpointOut | null open: boolean onOpenChange: (open: boolean) => void onSuccess: () => void } export const EditEndpointDialog: React.FC = ({ endpoint, open, onOpenChange, onSuccess }) => { const [eventsPopoverOpen, setEventsPopoverOpen] = useState(false) const updateMutation = useUpdateWebhookEndpointMutation() const form = useForm({ defaultValues: { url: '', description: '', filterTypes: [], } as FormValues, validators: { onSubmit: formSchema, }, onSubmit: async ({ value }) => { if (!endpoint) return try { await updateMutation.mutateAsync({ endpointId: endpoint.id, update: { url: value.url.trim(), description: value.description?.trim() || undefined, filterTypes: value.filterTypes.length > 0 ? value.filterTypes : undefined, }, }) toast.success('Endpoint updated') onSuccess() onOpenChange(false) } catch (error) { handleApiError(error, 'Failed to update endpoint') } }, }) useEffect(() => { if (endpoint && open) { form.reset({ url: endpoint.url, description: endpoint.description || '', filterTypes: endpoint.filterTypes || [], }) } }, [endpoint, open, form]) const handleOpenChange = (isOpen: boolean) => { onOpenChange(isOpen) if (!isOpen) { form.reset() } } const toggleEvent = (eventValue: string) => { const currentEvents = form.getFieldValue('filterTypes') if (currentEvents.includes(eventValue)) { form.setFieldValue( 'filterTypes', currentEvents.filter((e) => e !== eventValue), ) } else { form.setFieldValue('filterTypes', [...currentEvents, eventValue]) } } return ( Edit Webhook Endpoint Update the endpoint configuration.
{ e.preventDefault() e.stopPropagation() form.handleSubmit() }} > {(field) => { const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Endpoint Name field.handleChange(e.target.value)} placeholder="My Webhook Endpoint" /> {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }} {(field) => { const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Endpoint URL field.handleChange(e.target.value)} placeholder="https://example.com/webhook" /> {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }} {(field) => { const selectedEvents = field.state.value const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Events No events found. {WEBHOOK_EVENT_CATEGORIES.map((category) => ( {WEBHOOK_EVENTS.filter((event) => event.category === category).map((event) => ( toggleEvent(event.value)} > {event.label} ))} ))} {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }}
[state.canSubmit, state.isSubmitting]} children={([canSubmit, isSubmitting]) => ( )} />
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/EndpointEventsTable/EndpointEventsTable.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DebouncedInput } from '@/components/DebouncedInput' import { Pagination } from '@/components/Pagination' import { TableEmptyState } from '@/components/TableEmptyState' import { DataTableFacetedFilter } from '@/components/ui/data-table-faceted-filter' import { Skeleton } from '@/components/ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { ColumnFiltersState, flexRender, getCoreRowModel, getFacetedRowModel, getFacetedUniqueValues, getFilteredRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { Mail } from 'lucide-react' import { useCallback, useState } from 'react' import { EndpointMessageOut } from 'svix' import { columns, eventTypeOptions, statusOptions } from './columns' import { EventDetailsSheet } from './EventDetailsSheet' interface EndpointEventsTableProps { data: EndpointMessageOut[] loading: boolean onReplay: (msgId: string) => void } export function EndpointEventsTable({ data, loading, onReplay }: EndpointEventsTableProps) { const [sorting, setSorting] = useState([]) const [columnFilters, setColumnFilters] = useState([]) const [globalFilter, setGlobalFilter] = useState('') const [selectedEventIndex, setSelectedEventIndex] = useState(null) const [sheetOpen, setSheetOpen] = useState(false) const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getFilteredRowModel: getFilteredRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), onColumnFiltersChange: setColumnFilters, getFacetedRowModel: getFacetedRowModel(), getFacetedUniqueValues: getFacetedUniqueValues(), onGlobalFilterChange: setGlobalFilter, globalFilterFn: (row, _columnId, filterValue) => { const event = row.original const searchValue = filterValue.toLowerCase() return ( (event.id?.toLowerCase().includes(searchValue) ?? false) || (event.eventType?.toLowerCase().includes(searchValue) ?? false) || (event.statusText?.toLowerCase().includes(searchValue) ?? false) ) }, state: { sorting, columnFilters, globalFilter, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, meta: { endpointEvents: { onReplay, }, }, }) const handleRowClick = useCallback((index: number) => { setSelectedEventIndex(index) setSheetOpen(true) }, []) const rowCount = table.getRowModel().rows.length const handleNavigate = useCallback( (direction: 'prev' | 'next') => { setSelectedEventIndex((prev) => { if (prev === null) return null if (direction === 'prev' && prev > 0) return prev - 1 if (direction === 'next' && prev < rowCount - 1) return prev + 1 return prev }) }, [rowCount], ) return (
setGlobalFilter(String(value))} placeholder="Search by Event Type, Message ID, or Status" className="max-w-sm mr-4" /> {table.getColumn('eventType') && ( )} {table.getColumn('status') && ( )}
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( <> {Array.from(new Array(5)).map((_, i) => ( {table.getVisibleLeafColumns().map((column) => ( ))} ))} ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row, rowIndex) => ( handleRowClick(rowIndex)} onKeyDown={(e) => { if (e.key === 'Enter' || e.key === ' ') { e.preventDefault() handleRowClick(rowIndex) } }} > {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

Events will appear here when webhooks are triggered.

} /> )}
0} hasNext={selectedEventIndex !== null && selectedEventIndex < table.getRowModel().rows.length - 1} onReplay={onReplay} />
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/EndpointEventsTable/EventDetailsSheet.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CopyButton } from '@/components/CopyButton' import { TimestampTooltip } from '@/components/TimestampTooltip' import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' import { ScrollArea } from '@/components/ui/scroll-area' import { Separator } from '@/components/ui/separator' import { Sheet, SheetContent, SheetHeader, SheetTitle } from '@/components/ui/sheet' import { getRelativeTimeString } from '@/lib/utils' import { ChevronDown, ChevronUp, RefreshCw, X } from 'lucide-react' import { useCallback, useState } from 'react' import { EndpointMessageOut } from 'svix' import { MessageAttemptsTable } from '../MessageAttemptsTable' interface EventDetailsSheetProps { event: EndpointMessageOut | null open: boolean onOpenChange: (open: boolean) => void onNavigate: (direction: 'prev' | 'next') => void hasPrev: boolean hasNext: boolean onReplay: (msgId: string) => void } export function EventDetailsSheet({ event, open, onOpenChange, onNavigate, hasPrev, hasNext, onReplay, }: EventDetailsSheetProps) { const [attemptsReloadKey, setAttemptsReloadKey] = useState(0) const handleReplay = useCallback( (msgId: string) => { onReplay(msgId) setAttemptsReloadKey((prev) => prev + 1) }, [onReplay], ) if (!event) return null const hasPayload = event.payload && Object.keys(event.payload).length > 0 const payload = hasPayload ? typeof event.payload === 'string' ? event.payload : JSON.stringify(event.payload, null, 2) : '' const { relativeTimeString } = getRelativeTimeString(event.timestamp) return ( Event Details
Overview
Message ID
{event.id}
Status {event.status === 0 ? 'Success' : event.status === 1 ? 'Pending' : 'Failed'}
Event Type {event.eventType}
Sent {relativeTimeString}
{event.nextAttempt && (
Next Attempt {getRelativeTimeString(event.nextAttempt).relativeTimeString}
)} {event.channels && event.channels.length > 0 && (
Channels
{event.channels.map((channel) => ( {channel} ))}
)} {event.tags && event.tags.length > 0 && (
Tags
{event.tags.map((tag) => ( {tag} ))}
)} {event.eventId && (
Event ID
{event.eventId}
)}
Payload {hasPayload && }
{hasPayload ? (
                {payload}
              
) : (
This event has no payload
)}
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/EndpointEventsTable/columns.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { TimestampTooltip } from '@/components/TimestampTooltip' import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' import { FacetedFilterOption } from '@/components/ui/data-table-faceted-filter' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu' import { WEBHOOK_EVENTS } from '@/constants/webhook-events' import { getRelativeTimeString } from '@/lib/utils' import { ColumnDef, RowData, Table } from '@tanstack/react-table' import { CheckCircle, Clock, MoreHorizontal, XCircle } from 'lucide-react' import { EndpointMessageOut } from 'svix' import { CopyButton } from '../../CopyButton' type EndpointEventsTableMeta = { onReplay: (msgId: string) => void } declare module '@tanstack/react-table' { interface TableMeta { endpointEvents?: TData extends EndpointMessageOut ? EndpointEventsTableMeta : never } } const getMeta = (table: Table) => { return table.options.meta?.endpointEvents as EndpointEventsTableMeta } const columns: ColumnDef[] = [ { accessorKey: 'id', header: 'Message ID', size: 300, cell: ({ row }) => { const msgId = row.original.id return (
{msgId ?? '-'} {msgId && ( e.stopPropagation()}> )}
) }, }, { id: 'status', accessorFn: (row) => row.statusText || 'unknown', header: 'Status', size: 100, filterFn: (row, id, value) => { return value.includes(row.getValue(id)) }, cell: ({ row }) => { const status = row.original.status const variant = status === 0 ? 'success' : status === 1 ? 'secondary' : 'destructive' return {status === 0 ? 'Success' : status === 1 ? 'Pending' : 'Failed'} }, }, { accessorKey: 'eventType', header: 'Event Type', size: 200, filterFn: (row, id, value) => { return value.includes(row.getValue(id)) }, cell: ({ row }) => { const eventType = row.original.eventType return ( {eventType} ) }, }, { accessorKey: 'nextAttempt', header: 'Next Attempt', size: 100, cell: ({ row }) => { const nextAttempt = row.original.nextAttempt if (!nextAttempt) { return - } const relativeTime = getRelativeTimeString(nextAttempt) return ( {relativeTime.relativeTimeString} ) }, }, { accessorKey: 'timestamp', header: 'Sent', size: 100, cell: ({ row }) => { const timestamp = row.original.timestamp if (!timestamp) { return - } const relativeTime = getRelativeTimeString(timestamp) return ( {relativeTime.relativeTimeString} ) }, }, { id: 'actions', maxSize: 44, enableHiding: false, cell: ({ row, table }) => { const { onReplay } = getMeta(table) const msgId = row.original.id return ( e.stopPropagation()}> e.stopPropagation()}> onReplay(msgId)}> Replay ) }, }, ] const eventTypeOptions: FacetedFilterOption[] = WEBHOOK_EVENTS.map((event) => ({ label: event.label, value: event.value, })) const statusOptions: FacetedFilterOption[] = [ { label: 'Success', value: 'success', icon: CheckCircle }, { label: 'Pending', value: 'pending', icon: Clock }, { label: 'Failed', value: 'fail', icon: XCircle }, ] export { columns, eventTypeOptions, statusOptions } export type { EndpointEventsTableMeta } ================================================ FILE: apps/dashboard/src/components/Webhooks/EndpointEventsTable/index.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export { EndpointEventsTable } from './EndpointEventsTable' export type { EndpointEventsTableMeta } from './columns' ================================================ FILE: apps/dashboard/src/components/Webhooks/MessageAttemptsTable.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CopyButton } from '@/components/CopyButton' import { TimestampTooltip } from '@/components/TimestampTooltip' import { Badge } from '@/components/ui/badge' import { Skeleton } from '@/components/ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { getRelativeTimeString } from '@/lib/utils' import { AnimatePresence, motion } from 'framer-motion' import { ChevronDown, ChevronRight, LoaderCircle } from 'lucide-react' import { Fragment, useCallback, useEffect, useRef, useState } from 'react' import { MessageAttemptOut } from 'svix' import { useMessageAttempts } from 'svix-react' const RELOAD_DELAY = 5 function AttemptStatusBadge({ status }: { status: number }) { const variant = status === 0 ? 'success' : status === 1 ? 'secondary' : 'destructive' const label = status === 0 ? 'Success' : status === 1 ? 'Pending' : status === 3 ? 'Sending' : 'Failed' return {label} } function TriggerTypeBadge({ triggerType }: { triggerType: number }) { return ( {triggerType === 1 ? 'Manual' : 'Scheduled'} ) } function AttemptExpandedRow({ attempt }: { attempt: MessageAttemptOut }) { let responseBody: string try { const parsed = JSON.parse(attempt.response) responseBody = JSON.stringify(parsed, null, 2) } catch { responseBody = attempt.response || '(empty)' } return (
Status Code {!attempt.responseStatusCode ? ( No Response ) : ( = 200 && attempt.responseStatusCode < 300 ? 'success' : 'destructive' } > {attempt.responseStatusCode} )}
Duration {attempt.responseDurationMs}ms
Trigger
Endpoint ID
{attempt.endpointId}
Response Body
          {responseBody}
        
) } export function MessageAttemptsTable({ messageId, reloadKey }: { messageId: string; reloadKey?: number }) { const attempts = useMessageAttempts(messageId) const [expandedRows, setExpandedRows] = useState>(new Set()) const [countdown, setCountdown] = useState(null) const prevReloadKey = useRef(reloadKey) const timerRef = useRef>(null) const clearTimer = useCallback(() => { if (timerRef.current) { clearInterval(timerRef.current) timerRef.current = null } }, []) useEffect(() => { if (reloadKey !== undefined && reloadKey !== prevReloadKey.current) { prevReloadKey.current = reloadKey clearTimer() setCountdown(RELOAD_DELAY) timerRef.current = setInterval(() => { setCountdown((prev) => { if (prev === null || prev <= 1) { clearTimer() attempts.reload() return null } return prev - 1 }) }, 1000) } }, [reloadKey, attempts, clearTimer]) useEffect(() => { return clearTimer }, [clearTimer]) const toggleRow = (attemptId: string) => { setExpandedRows((prev) => { const next = new Set(prev) if (next.has(attemptId)) { next.delete(attemptId) } else { next.add(attemptId) } return next }) } const header = (
Delivery Attempts {countdown !== null && ( Reloading in {countdown}... )}
) if (attempts.loading) { return (
{header}
) } if (attempts.error) { return (
{header}
Failed to load message attempts.
) } const data = attempts.data ?? [] if (data.length === 0) { return (
{header}
No delivery attempts yet.
) } return (
{header}
Status URL Timestamp {data.map((attempt: MessageAttemptOut) => { const { relativeTimeString } = getRelativeTimeString(attempt.timestamp) const isExpanded = expandedRows.has(attempt.id) return ( toggleRow(attempt.id)}> {isExpanded ? ( ) : ( )} {attempt.url} {relativeTimeString} {isExpanded && ( )} ) })}
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/WebhooksEndpointTable/WebhooksEndpointTable.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DebouncedInput } from '@/components/DebouncedInput' import { Pagination } from '@/components/Pagination' import { TableEmptyState } from '@/components/TableEmptyState' import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle, } from '@/components/ui/alert-dialog' import { Skeleton } from '@/components/ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { RoutePath } from '@/enums/RoutePath' import { flexRender, getCoreRowModel, getFilteredRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { Mail } from 'lucide-react' import { useState } from 'react' import { useNavigate } from 'react-router-dom' import { EndpointOut } from 'svix' import { columns } from './columns' interface WebhooksEndpointTableProps { data: EndpointOut[] loading: boolean onDisable: (endpoint: EndpointOut) => void onDelete: (endpoint: EndpointOut) => void isLoadingEndpoint: (endpoint: EndpointOut) => boolean } export function WebhooksEndpointTable({ data, loading, onDisable, onDelete, isLoadingEndpoint, }: WebhooksEndpointTableProps) { const [sorting, setSorting] = useState([]) const [globalFilter, setGlobalFilter] = useState('') const [deleteEndpoint, setDeleteEndpoint] = useState(null) const [disableEndpoint, setDisableEndpoint] = useState(null) const navigate = useNavigate() const handleConfirmDelete = () => { if (deleteEndpoint) { onDelete(deleteEndpoint) setDeleteEndpoint(null) } } const handleConfirmDisable = () => { if (disableEndpoint) { onDisable(disableEndpoint) setDisableEndpoint(null) } } const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getFilteredRowModel: getFilteredRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), onGlobalFilterChange: setGlobalFilter, globalFilterFn: (row, _columnId, filterValue) => { const endpoint = row.original const searchValue = filterValue.toLowerCase() return ( endpoint.url.toLowerCase().includes(searchValue) || (endpoint.description?.toLowerCase().includes(searchValue) ?? false) || endpoint.id.toLowerCase().includes(searchValue) ) }, state: { sorting, globalFilter, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, meta: { webhookEndpoints: { onDisable: setDisableEndpoint, onDelete: setDeleteEndpoint, isLoadingEndpoint, }, }, }) const handleRowClick = (endpoint: EndpointOut) => { navigate(RoutePath.WEBHOOK_ENDPOINT_DETAILS.replace(':endpointId', endpoint.id)) } return (
setGlobalFilter(String(value))} placeholder="Search by URL or Description" className="max-w-sm" />
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => { return ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ) })} ))} {loading ? ( <> {Array.from(new Array(5)).map((_, i) => ( {table.getVisibleLeafColumns().map((column, colIndex, arr) => colIndex === arr.length - 1 ? null : ( ), )} ))} ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row) => { const isLoading = isLoadingEndpoint(row.original) return ( { if (!isLoading) { handleRowClick(row.original) } }} onKeyDown={(e) => { if (!isLoading && (e.key === 'Enter' || e.key === ' ')) { e.preventDefault() handleRowClick(row.original) } }} > {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} ) }) ) : ( } description={

Create an endpoint to start receiving webhook events.

Check out the Docs {' '} to learn more.

} /> )}
!open && setDeleteEndpoint(null)}> Delete Webhook Endpoint Are you sure you want to delete this webhook endpoint? This action cannot be undone. {deleteEndpoint && (
URL: {deleteEndpoint.url}
)}
Cancel Delete
!open && setDisableEndpoint(null)}> {disableEndpoint?.disabled ? 'Enable' : 'Disable'} Webhook Endpoint Are you sure you want to {disableEndpoint?.disabled ? 'enable' : 'disable'} this webhook endpoint? {disableEndpoint && (
URL: {disableEndpoint.url}
)}
Cancel {disableEndpoint?.disabled ? 'Enable' : 'Disable'}
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/WebhooksEndpointTable/columns.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { TimestampTooltip } from '@/components/TimestampTooltip' import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from '@/components/ui/dropdown-menu' import { getRelativeTimeString } from '@/lib/utils' import { ColumnDef, RowData, Table } from '@tanstack/react-table' import { MoreHorizontal } from 'lucide-react' import { EndpointOut } from 'svix' import { CopyButton } from '../../CopyButton' type WebhooksEndpointTableMeta = { onDisable: (endpoint: EndpointOut) => void onDelete: (endpoint: EndpointOut) => void isLoadingEndpoint: (endpoint: EndpointOut) => boolean } declare module '@tanstack/react-table' { interface TableMeta { webhookEndpoints?: TData extends EndpointOut ? WebhooksEndpointTableMeta : never } } const getMeta = (table: Table) => { return table.options.meta?.webhookEndpoints as WebhooksEndpointTableMeta } const columns: ColumnDef[] = [ { accessorKey: 'description', header: 'Name', size: 200, cell: ({ row }) => (
{row.original.description || 'Unnamed Endpoint'}
), }, { accessorKey: 'url', header: 'URL', size: 300, cell: ({ row }) => (
{row.original.url}
), }, { accessorKey: 'disabled', header: 'Status', size: 100, cell: ({ row }) => ( {row.original.disabled ? 'Disabled' : 'Active'} ), }, { accessorKey: 'createdAt', header: 'Created', size: 150, cell: ({ row }) => { const createdAt = row.original.createdAt const relativeTime = getRelativeTimeString(createdAt).relativeTimeString return ( {relativeTime} ) }, }, { id: 'actions', header: () => null, maxSize: 44, cell: ({ row, table }) => { const { onDisable, onDelete, isLoadingEndpoint } = getMeta(table) const isLoading = isLoadingEndpoint(row.original) return (
e.stopPropagation()}> onDisable(row.original)} className="cursor-pointer" disabled={isLoading}> {row.original.disabled ? 'Enable' : 'Disable'} onDelete(row.original)} className="cursor-pointer" disabled={isLoading} > Delete
) }, }, ] export { columns } export type { WebhooksEndpointTableMeta } ================================================ FILE: apps/dashboard/src/components/Webhooks/WebhooksEndpointTable/index.ts ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ export { WebhooksEndpointTable } from './WebhooksEndpointTable' export type { WebhooksEndpointTableMeta } from './columns' ================================================ FILE: apps/dashboard/src/components/Webhooks/WebhooksMessagesTable/MessageDetailsSheet.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CopyButton } from '@/components/CopyButton' import { TimestampTooltip } from '@/components/TimestampTooltip' import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' import { Separator } from '@/components/ui/separator' import { Sheet, SheetContent, SheetHeader, SheetTitle } from '@/components/ui/sheet' import { getRelativeTimeString } from '@/lib/utils' import { ChevronDown, ChevronUp, X } from 'lucide-react' import { MessageOut } from 'svix' import { MessageAttemptsTable } from '../MessageAttemptsTable' interface MessageDetailsSheetProps { message: MessageOut | null open: boolean onOpenChange: (open: boolean) => void onNavigate: (direction: 'prev' | 'next') => void hasPrev: boolean hasNext: boolean } export function MessageDetailsSheet({ message, open, onOpenChange, onNavigate, hasPrev, hasNext, }: MessageDetailsSheetProps) { if (!message) return null const hasPayload = message.payload && Object.keys(message.payload).length > 0 const payload = hasPayload ? typeof message.payload === 'string' ? message.payload : JSON.stringify(message.payload, null, 2) : '' const { relativeTimeString } = getRelativeTimeString(message.timestamp) return ( Message Details
Overview
Message ID
{message.id}
Event Type {message.eventType}
{message.eventId && (
Event ID
{message.eventId}
)}
Timestamp {relativeTimeString}
{message.channels && message.channels.length > 0 && (
Channels
{message.channels.map((channel) => ( {channel} ))}
)} {message.tags && message.tags.length > 0 && (
Tags
{message.tags.map((tag) => ( {tag} ))}
)}
Payload {hasPayload && }
{hasPayload ? (
                {payload}
              
) : (
This message has no payload
)}
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/WebhooksMessagesTable/WebhooksMessagesTable.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { DebouncedInput } from '@/components/DebouncedInput' import { Pagination } from '@/components/Pagination' import { TableEmptyState } from '@/components/TableEmptyState' import { DataTableFacetedFilter } from '@/components/ui/data-table-faceted-filter' import { Skeleton } from '@/components/ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { DEFAULT_PAGE_SIZE } from '@/constants/Pagination' import { ColumnFiltersState, flexRender, getCoreRowModel, getFacetedRowModel, getFacetedUniqueValues, getFilteredRowModel, getPaginationRowModel, getSortedRowModel, SortingState, useReactTable, } from '@tanstack/react-table' import { Mail, RefreshCcw } from 'lucide-react' import { useCallback, useState } from 'react' import { Button } from '@/components/ui/button' import { useMessages } from 'svix-react' import { columns, eventTypeOptions } from './columns' import { MessageDetailsSheet } from './MessageDetailsSheet' export function WebhooksMessagesTable() { const messages = useMessages() const [sorting, setSorting] = useState([]) const [columnFilters, setColumnFilters] = useState([]) const [globalFilter, setGlobalFilter] = useState('') const [selectedMessageIndex, setSelectedMessageIndex] = useState(null) const [sheetOpen, setSheetOpen] = useState(false) const data = messages.data ?? [] const table = useReactTable({ data, columns, getCoreRowModel: getCoreRowModel(), getFilteredRowModel: getFilteredRowModel(), getPaginationRowModel: getPaginationRowModel(), onSortingChange: setSorting, getSortedRowModel: getSortedRowModel(), onColumnFiltersChange: setColumnFilters, getFacetedRowModel: getFacetedRowModel(), getFacetedUniqueValues: getFacetedUniqueValues(), onGlobalFilterChange: setGlobalFilter, globalFilterFn: (row, _columnId, filterValue) => { const message = row.original const searchValue = filterValue.toLowerCase() return ( (message.id?.toLowerCase().includes(searchValue) ?? false) || (message.eventType?.toLowerCase().includes(searchValue) ?? false) || (message.eventId?.toLowerCase().includes(searchValue) ?? false) ) }, state: { sorting, columnFilters, globalFilter, }, initialState: { pagination: { pageSize: DEFAULT_PAGE_SIZE, }, }, }) const handleRowClick = useCallback((index: number) => { setSelectedMessageIndex(index) setSheetOpen(true) }, []) const rowCount = table.getRowModel().rows.length const handleNavigate = useCallback( (direction: 'prev' | 'next') => { setSelectedMessageIndex((prev) => { if (prev === null) return null if (direction === 'prev' && prev > 0) return prev - 1 if (direction === 'next' && prev < rowCount - 1) return prev + 1 return prev }) }, [rowCount], ) return (
setGlobalFilter(String(value))} placeholder="Search by Message ID, Event Type, or Event ID" className="max-w-sm mr-4" /> {table.getColumn('eventType') && ( )}
{table.getHeaderGroups().map((headerGroup) => ( {headerGroup.headers.map((header) => ( {header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())} ))} ))} {messages.loading ? ( <> {Array.from(new Array(5)).map((_, i) => ( {table.getVisibleLeafColumns().map((column) => ( ))} ))} ) : table.getRowModel().rows?.length ? ( table.getRowModel().rows.map((row, rowIndex) => ( handleRowClick(rowIndex)} onKeyDown={(e) => { if (e.key === 'Enter' || e.key === ' ') { e.preventDefault() handleRowClick(rowIndex) } }} > {row.getVisibleCells().map((cell) => ( {flexRender(cell.column.columnDef.cell, cell.getContext())} ))} )) ) : ( } description={

Messages will appear here when webhook events are triggered.

} /> )}
0} hasNext={selectedMessageIndex !== null && selectedMessageIndex < table.getRowModel().rows.length - 1} />
) } ================================================ FILE: apps/dashboard/src/components/Webhooks/WebhooksMessagesTable/columns.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { TimestampTooltip } from '@/components/TimestampTooltip' import { Badge } from '@/components/ui/badge' import { FacetedFilterOption } from '@/components/ui/data-table-faceted-filter' import { WEBHOOK_EVENTS } from '@/constants/webhook-events' import { getRelativeTimeString } from '@/lib/utils' import { ColumnDef } from '@tanstack/react-table' import { MessageOut } from 'svix' import { CopyButton } from '../../CopyButton' const columns: ColumnDef[] = [ { accessorKey: 'id', header: 'Message ID', size: 300, cell: ({ row }) => { const msgId = row.original.id return (
{msgId ?? '-'} {msgId && ( e.stopPropagation()}> )}
) }, }, { accessorKey: 'eventType', header: 'Event Type', size: 200, filterFn: (row, id, value) => { return value.includes(row.getValue(id)) }, cell: ({ row }) => { const eventType = row.original.eventType return ( {eventType} ) }, }, { accessorKey: 'timestamp', header: 'Timestamp', size: 200, cell: ({ row }) => { const timestamp = row.original.timestamp if (!timestamp) { return - } const relativeTime = getRelativeTimeString(timestamp) return ( {relativeTime.relativeTimeString} ) }, }, ] const eventTypeOptions: FacetedFilterOption[] = WEBHOOK_EVENTS.map((event) => ({ label: event.label, value: event.value, })) export { columns, eventTypeOptions } ================================================ FILE: apps/dashboard/src/components/sandboxes/CreateSshAccessDialog.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useCallback, useEffect, useState } from 'react' import { SshAccessDto } from '@daytonaio/api-client' import { useForm } from '@tanstack/react-form' import { CheckIcon, CopyIcon, InfoIcon } from 'lucide-react' import { AnimatePresence, motion } from 'motion/react' import { NumericFormat } from 'react-number-format' import { z } from 'zod' import { Field, FieldError, FieldLabel } from '@/components/ui/field' import { InputGroup, InputGroupAddon, InputGroupButton, InputGroupInput, InputGroupText, } from '@/components/ui/input-group' import { Spinner } from '@/components/ui/spinner' import { Alert, AlertDescription } from '@/components/ui/alert' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' import { useCreateSshAccessMutation } from '@/hooks/mutations/useCreateSshAccessMutation' import { useCopyToClipboard } from '@/hooks/useCopyToClipboard' import { handleApiError } from '@/lib/error-handling' interface CreateSshAccessDialogProps { sandboxId: string open: boolean onOpenChange: (open: boolean) => void } const MotionCopyIcon = motion(CopyIcon) const MotionCheckIcon = motion(CheckIcon) const iconProps = { initial: { opacity: 0, y: 5 }, animate: { opacity: 1, y: 0 }, exit: { opacity: 0, y: -5 }, transition: { duration: 0.1 }, } const formSchema = z.object({ expiryMinutes: z.number().int('Must be a whole number').min(1, 'Minimum 1 minute').max(1440, 'Maximum 1440 minutes'), }) type FormValues = z.infer const defaultValues: FormValues = { expiryMinutes: 60, } export function CreateSshAccessDialog({ sandboxId, open, onOpenChange }: CreateSshAccessDialogProps) { const [sshAccess, setSshAccess] = useState(null) const { reset: resetMutation, ...createMutation } = useCreateSshAccessMutation() const form = useForm({ defaultValues, validators: { onSubmit: formSchema, }, onSubmit: async ({ value }) => { try { const result = await createMutation.mutateAsync({ sandboxId, expiresInMinutes: value.expiryMinutes, }) setSshAccess(result) } catch (error) { handleApiError(error, 'Failed to create SSH access') } }, }) const resetState = useCallback(() => { form.reset(defaultValues) resetMutation() setSshAccess(null) }, [form, resetMutation]) useEffect(() => { if (open) { resetState() } }, [open, resetState]) return ( {sshAccess ? 'SSH Access Created' : 'Create SSH Access'} {sshAccess ? 'Your SSH access has been created successfully.' : 'Set the expiration time for SSH access.'} {sshAccess ? ( ) : (
{ e.preventDefault() e.stopPropagation() form.handleSubmit() }} > {(field) => { const isInvalid = field.state.meta.isTouched && !field.state.meta.isValid return ( Expiry field.handleChange(floatValue ?? 0)} /> min {field.state.meta.errors.length > 0 && field.state.meta.isTouched && ( )} ) }}
)} {!sshAccess && ( [state.canSubmit, state.isSubmitting]} children={([canSubmit, isSubmitting]) => ( )} /> )}
) } function SshAccessCreated({ sshAccess }: { sshAccess: SshAccessDto }) { const [copiedCommand, copyCommand] = useCopyToClipboard() return (
Store the token safely — you won't be able to view it again. SSH Command copyCommand(sshAccess.sshCommand)}> {copiedCommand ? ( ) : ( )}
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/RevokeSshAccessDialog.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useState } from 'react' import { toast } from 'sonner' import { Field, FieldLabel } from '@/components/ui/field' import { Input } from '@/components/ui/input' import { Spinner } from '@/components/ui/spinner' import { Dialog, DialogClose, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, } from '@/components/ui/dialog' import { Button } from '@/components/ui/button' import { useRevokeSshAccessMutation } from '@/hooks/mutations/useRevokeSshAccessMutation' import { handleApiError } from '@/lib/error-handling' interface RevokeSshAccessDialogProps { sandboxId: string open: boolean onOpenChange: (open: boolean) => void } export function RevokeSshAccessDialog({ sandboxId, open, onOpenChange }: RevokeSshAccessDialogProps) { const [token, setToken] = useState('') const revokeMutation = useRevokeSshAccessMutation() const handleOpenChange = (isOpen: boolean) => { onOpenChange(isOpen) if (!isOpen) { setToken('') revokeMutation.reset() } } const handleRevoke = async () => { if (!token.trim()) { toast.error('Please enter a token to revoke') return } try { await revokeMutation.mutateAsync({ sandboxId, token }) toast.success('SSH access revoked successfully') handleOpenChange(false) } catch (error) { handleApiError(error, 'Failed to revoke SSH access') } } return ( Revoke SSH Access Enter the SSH access token you want to revoke. SSH Token setToken(e.target.value)} placeholder="Paste token here" /> ) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxContentTabs.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { Skeleton } from '@/components/ui/skeleton' import { Spinner } from '@/components/ui/spinner' import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs' import { useRegions } from '@/hooks/useRegions' import { Sandbox } from '@daytonaio/api-client' import { SandboxInfoPanel } from './SandboxInfoPanel' import { SandboxLogsTab } from './SandboxLogsTab' import { SandboxMetricsTab } from './SandboxMetricsTab' import { SandboxSpendingTab } from './SandboxSpendingTab' import { SandboxTerminalTab } from './SandboxTerminalTab' import { SandboxTracesTab } from './SandboxTracesTab' import { SandboxVncTab } from './SandboxVncTab' import { TabValue } from './SearchParams' interface SandboxContentTabsProps { sandbox: Sandbox | undefined isLoading: boolean experimentsEnabled: boolean | undefined tab: TabValue onTabChange: (tab: TabValue) => void } export function SandboxContentTabs({ sandbox, isLoading, experimentsEnabled, tab, onTabChange, }: SandboxContentTabsProps) { const { getRegionName } = useRegions() if (isLoading) { return (
) } if (!sandbox) return null return ( onTabChange(v as TabValue)} className="flex flex-col h-full gap-0"> Overview {experimentsEnabled && ( <> Logs Traces Metrics Spending )} Terminal VNC {experimentsEnabled && ( <> )} ) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxDetails.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { OrganizationSuspendedError } from '@/api/errors' import { PageHeader, PageLayout, PageTitle } from '@/components/PageLayout' import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle, } from '@/components/ui/alert-dialog' import { Button } from '@/components/ui/button' import { Empty, EmptyDescription, EmptyHeader, EmptyMedia, EmptyTitle } from '@/components/ui/empty' import { ScrollArea } from '@/components/ui/scroll-area' import { FeatureFlags } from '@/enums/FeatureFlags' import { RoutePath } from '@/enums/RoutePath' import { useArchiveSandboxMutation } from '@/hooks/mutations/useArchiveSandboxMutation' import { useDeleteSandboxMutation } from '@/hooks/mutations/useDeleteSandboxMutation' import { useRecoverSandboxMutation } from '@/hooks/mutations/useRecoverSandboxMutation' import { useStartSandboxMutation } from '@/hooks/mutations/useStartSandboxMutation' import { useStopSandboxMutation } from '@/hooks/mutations/useStopSandboxMutation' import { useSandboxQuery } from '@/hooks/queries/useSandboxQuery' import { useApi } from '@/hooks/useApi' import { useConfig } from '@/hooks/useConfig' import { useMatchMedia } from '@/hooks/useMatchMedia' import { useRegions } from '@/hooks/useRegions' import { useSandboxWsSync } from '@/hooks/useSandboxWsSync' import { useSelectedOrganization } from '@/hooks/useSelectedOrganization' import { handleApiError } from '@/lib/error-handling' import { isStoppable, isTransitioning } from '@/lib/utils/sandbox' import { SandboxSessionProvider } from '@/providers/SandboxSessionProvider' import { OrganizationRolePermissionsEnum, OrganizationUserRoleEnum } from '@daytonaio/api-client' import { isAxiosError } from 'axios' import { Container, GripVertical, RefreshCw } from 'lucide-react' import { useQueryState } from 'nuqs' import { useFeatureFlagEnabled } from 'posthog-js/react' import { useEffect, useState } from 'react' import { Group, Panel, Separator } from 'react-resizable-panels' import { useNavigate, useParams } from 'react-router-dom' import { toast } from 'sonner' import { CreateSshAccessDialog } from './CreateSshAccessDialog' import { RevokeSshAccessDialog } from './RevokeSshAccessDialog' import { SandboxContentTabs } from './SandboxContentTabs' import { SandboxHeader } from './SandboxHeader' import { InfoPanelSkeleton, SandboxInfoPanel } from './SandboxInfoPanel' import { tabParser } from './SearchParams' export default function SandboxDetails() { const { sandboxId } = useParams<{ sandboxId: string }>() const navigate = useNavigate() const config = useConfig() const { sandboxApi } = useApi() const { authenticatedUserOrganizationMember, selectedOrganization, authenticatedUserHasPermission } = useSelectedOrganization() const { getRegionName } = useRegions() const experimentsEnabled = useFeatureFlagEnabled(FeatureFlags.ORGANIZATION_EXPERIMENTS) const [deleteDialogOpen, setDeleteDialogOpen] = useState(false) const [createSshDialogOpen, setCreateSshDialogOpen] = useState(false) const [revokeSshDialogOpen, setRevokeSshDialogOpen] = useState(false) const [tab, setTab] = useQueryState('tab', tabParser) const isDesktop = useMatchMedia('(min-width: 1024px)') // On desktop (lg+), the overview tab is hidden in the sidebar, so switch to a content tab useEffect(() => { if (isDesktop && tab === 'overview') { setTab(experimentsEnabled ? 'logs' : 'terminal') } }, [isDesktop, tab, setTab, experimentsEnabled]) // When experiments are disabled, coerce experimental tabs back to a supported default useEffect(() => { if (!experimentsEnabled && (tab === 'logs' || tab === 'traces' || tab === 'metrics' || tab === 'spending')) { setTab('terminal') } }, [experimentsEnabled, tab, setTab]) const { data: sandbox, isLoading, isError, error, refetch, isFetching } = useSandboxQuery(sandboxId ?? '') const isNotFound = isError && isAxiosError(error.cause) && error.cause?.status === 404 useSandboxWsSync({ sandboxId }) const startMutation = useStartSandboxMutation() const stopMutation = useStopSandboxMutation() const archiveMutation = useArchiveSandboxMutation() const recoverMutation = useRecoverSandboxMutation() const deleteMutation = useDeleteSandboxMutation() const writePermitted = authenticatedUserHasPermission(OrganizationRolePermissionsEnum.WRITE_SANDBOXES) const deletePermitted = authenticatedUserHasPermission(OrganizationRolePermissionsEnum.DELETE_SANDBOXES) const transitioning = sandbox ? isTransitioning(sandbox) : false const anyMutating = startMutation.isPending || stopMutation.isPending || archiveMutation.isPending || recoverMutation.isPending || deleteMutation.isPending const actionsDisabled = anyMutating || transitioning const handleStart = async () => { if (!sandbox) return try { await startMutation.mutateAsync({ sandboxId: sandbox.id }) toast.success('Sandbox started') } catch (error) { handleApiError( error, 'Failed to start sandbox', error instanceof OrganizationSuspendedError && config.billingApiUrl && authenticatedUserOrganizationMember?.role === OrganizationUserRoleEnum.OWNER ? ( ) : undefined, ) } } const handleStop = async () => { if (!sandbox) return try { await stopMutation.mutateAsync({ sandboxId: sandbox.id }) toast.success('Sandbox stopped') } catch (error) { handleApiError(error, 'Failed to stop sandbox') } } const handleArchive = async () => { if (!sandbox) return try { await archiveMutation.mutateAsync({ sandboxId: sandbox.id }) toast.success('Sandbox archived') } catch (error) { handleApiError(error, 'Failed to archive sandbox') } } const handleRecover = async () => { if (!sandbox) return try { await recoverMutation.mutateAsync({ sandboxId: sandbox.id }) toast.success('Sandbox recovery started') } catch (error) { handleApiError(error, 'Failed to recover sandbox') } } const handleDelete = async () => { if (!sandbox) return try { await deleteMutation.mutateAsync({ sandboxId: sandbox.id }) toast.success('Sandbox deleted') setDeleteDialogOpen(false) navigate(RoutePath.SANDBOXES) } catch (error) { handleApiError(error, 'Failed to delete sandbox') } } const handleScreenRecordings = async () => { if (!sandbox || !isStoppable(sandbox)) { toast.error('Sandbox must be started to access Screen Recordings') return } try { const response = await sandboxApi.getSignedPortPreviewUrl(sandbox.id, 33333, selectedOrganization?.id) window.open(response.data.url, '_blank', 'noopener,noreferrer') toast.success('Opening Screen Recordings dashboard...') } catch (error) { handleApiError(error, 'Failed to open Screen Recordings') } } return ( Sandboxes setDeleteDialogOpen(true)} onRefresh={() => refetch()} onBack={() => navigate(RoutePath.SANDBOXES)} onCreateSshAccess={() => setCreateSshDialogOpen(true)} onRevokeSshAccess={() => setRevokeSshDialogOpen(true)} onScreenRecordings={handleScreenRecordings} mutations={{ start: startMutation.isPending, stop: stopMutation.isPending, archive: archiveMutation.isPending, recover: recoverMutation.isPending, }} /> {isNotFound ? (
Sandbox not found Are you sure you're in the right organization?
) : ( {isDesktop && ( <>
Overview
{isLoading ? ( ) : isError || !sandbox ? (

Failed to load sandbox details.

) : ( )}
)}
)} Delete Sandbox Are you sure you want to delete this sandbox? This action cannot be undone. Cancel {deleteMutation.isPending ? 'Deleting...' : 'Delete'} {sandboxId && ( <> )}
) } function ResizableSeparator() { return (
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxHeader.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CopyButton } from '@/components/CopyButton' import { SandboxState } from '@/components/SandboxTable/SandboxState' import { Button } from '@/components/ui/button' import { ButtonGroup } from '@/components/ui/button-group' import { DropdownMenu, DropdownMenuContent, DropdownMenuGroup, DropdownMenuItem, DropdownMenuSeparator, DropdownMenuTrigger, } from '@/components/ui/dropdown-menu' import { Skeleton } from '@/components/ui/skeleton' import { Spinner } from '@/components/ui/spinner' import { isArchivable, isRecoverable, isStartable, isStoppable } from '@/lib/utils/sandbox' import { Sandbox } from '@daytonaio/api-client' import { ArrowLeft, MoreHorizontal, Play, RefreshCw, Square, Wrench } from 'lucide-react' interface SandboxHeaderProps { sandbox: Sandbox | undefined isLoading: boolean writePermitted: boolean deletePermitted: boolean actionsDisabled: boolean isFetching: boolean onStart: () => void onStop: () => void onArchive: () => void onRecover: () => void onDelete: () => void onRefresh: () => void onBack: () => void onCreateSshAccess: () => void onRevokeSshAccess: () => void onScreenRecordings: () => void mutations: { start: boolean; stop: boolean; archive: boolean; recover: boolean } } export function SandboxHeader({ sandbox, isLoading, writePermitted, deletePermitted, actionsDisabled, isFetching, onStart, onStop, onArchive, onRecover, onDelete, onRefresh, onBack, onCreateSshAccess, onRevokeSshAccess, onScreenRecordings, mutations, }: SandboxHeaderProps) { return (
{isLoading ? ( ) : sandbox ? (

{sandbox.name || sandbox.id}

UUID {sandbox.id}
) : null}
{isLoading ? (
) : sandbox ? ( <>
{writePermitted && ( {isStartable(sandbox) && !sandbox.recoverable && ( )} {isStoppable(sandbox) && ( )} {isRecoverable(sandbox) && ( )} Create SSH Access Revoke SSH Access Screen Recordings {isArchivable(sandbox) && ( <> Archive )} {deletePermitted && ( <> Delete )} )}
) : null}
) } function SandboxHeaderSkeleton() { return (
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxInfoPanel.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CopyButton } from '@/components/CopyButton' import { ResourceChip } from '@/components/ResourceChip' import { TimestampTooltip } from '@/components/TimestampTooltip' import { Alert, AlertDescription } from '@/components/ui/alert' import { Empty, EmptyDescription, EmptyHeader, EmptyMedia } from '@/components/ui/empty' import { Skeleton } from '@/components/ui/skeleton' import { cn, formatDuration, getRelativeTimeString } from '@/lib/utils' import { Sandbox } from '@daytonaio/api-client' import { AlertCircle, Tag } from 'lucide-react' import React, { useMemo } from 'react' export function InfoSection({ title, children, className, }: { title: string children: React.ReactNode className?: string }) { return (

{title}

{children}
) } export function InfoRow({ label, children, className, }: { label: string children: React.ReactNode className?: string }) { return (
{label}
{children}
) } interface SandboxInfoPanelProps { sandbox: Sandbox getRegionName: (id: string) => string | undefined } export function SandboxInfoPanel({ sandbox, getRegionName }: SandboxInfoPanelProps) { const labelEntries = useMemo(() => { return sandbox.labels ? Object.entries(sandbox.labels) : [] }, [sandbox.labels]) return (
{sandbox.errorReason && (
{sandbox.errorReason}
)}
{getRegionName(sandbox.target) ?? sandbox.target}
{sandbox.snapshot ? (
{sandbox.snapshot}
) : ( )}
{sandbox.autoStopInterval ? ( formatDuration(sandbox.autoStopInterval) ) : ( Disabled )} {sandbox.autoArchiveInterval ? ( formatDuration(sandbox.autoArchiveInterval) ) : ( Disabled )} {sandbox.autoDeleteInterval !== undefined && sandbox.autoDeleteInterval >= 0 ? ( sandbox.autoDeleteInterval === 0 ? ( 'On stop' ) : ( formatDuration(sandbox.autoDeleteInterval) ) ) : ( Disabled )} {labelEntries.length > 0 ? (
{labelEntries.map(([key, value]) => ( {key}: {value} ))}
) : ( No labels )}
{getRelativeTimeString(sandbox.createdAt).relativeTimeString} {getRelativeTimeString(sandbox.updatedAt).relativeTimeString}
) } export function InfoPanelSkeleton() { return (
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxLogsTab.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { CopyButton } from '@/components/CopyButton' import { SeverityBadge } from '@/components/telemetry/SeverityBadge' import { TimeRangeSelector } from '@/components/telemetry/TimeRangeSelector' import { Button } from '@/components/ui/button' import { Empty, EmptyDescription, EmptyHeader, EmptyMedia, EmptyTitle } from '@/components/ui/empty' import { Input } from '@/components/ui/input' import { ScrollArea } from '@/components/ui/scroll-area' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/select' import { Skeleton } from '@/components/ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { DAYTONA_DOCS_URL } from '@/constants/ExternalLinks' import { LogsQueryParams, useSandboxLogs } from '@/hooks/useSandboxLogs' import { cn } from '@/lib/utils' import { LogEntry } from '@daytonaio/api-client' import { format, subHours } from 'date-fns' import { ChevronDown, ChevronLeft, ChevronRight, FileText, RefreshCw, Search } from 'lucide-react' import { useQueryStates } from 'nuqs' import React, { useCallback, useMemo, useState } from 'react' import { logsSearchParams, SEVERITY_OPTIONS, timeRangeSearchParams } from './SearchParams' function formatTimestamp(timestamp: string) { try { return format(new Date(timestamp), 'yyyy-MM-dd HH:mm:ss.SSS') } catch { return timestamp } } const getLogKey = (log: LogEntry, index: number) => `${log.timestamp}-${index}` function LogsTableSkeleton() { return ( Timestamp Severity Message {Array.from({ length: 10 }).map((_, i) => ( ))}
) } function LogsErrorState({ onRetry }: { onRetry: () => void }) { return ( Failed to load logs Something went wrong while fetching logs. ) } function LogsEmptyState() { return ( No logs found Try adjusting your time range or filters.{' '} Learn more about observability . ) } export function SandboxLogsTab({ sandboxId }: { sandboxId: string }) { const [params, setParams] = useQueryStates(logsSearchParams) const [timeRange, setTimeRange] = useQueryStates(timeRangeSearchParams) const [searchInput, setSearchInput] = useState(params.search) const [expandedRow, setExpandedRow] = useState(null) const limit = 50 const resolvedFrom = useMemo(() => timeRange.from ?? subHours(new Date(), 1), [timeRange.from]) const resolvedTo = useMemo(() => timeRange.to ?? new Date(), [timeRange.to]) const queryParams: LogsQueryParams = useMemo( () => ({ from: resolvedFrom, to: resolvedTo, page: params.logsPage, limit, severities: params.severity.length > 0 ? [...params.severity] : undefined, search: params.search || undefined, }), [resolvedFrom, resolvedTo, params.logsPage, params.severity, params.search], ) const { data, isLoading, isError, refetch } = useSandboxLogs(sandboxId, queryParams) const handleTimeRangeChange = useCallback( (from: Date, to: Date) => { setTimeRange({ from, to }) setParams({ logsPage: 1 }) }, [setTimeRange, setParams], ) const handleSearch = useCallback(() => { setParams({ search: searchInput, logsPage: 1 }) }, [searchInput, setParams]) const handleSeverityChange = useCallback( (value: string) => { if (value === 'all' || !value) { setParams({ severity: [], logsPage: 1 }) } else { setParams({ severity: [value as (typeof SEVERITY_OPTIONS)[number]], logsPage: 1 }) } }, [setParams], ) return (
setSearchInput(e.target.value)} onKeyDown={(e) => e.key === 'Enter' && handleSearch()} className="w-48" />
{isLoading ? (
) : isError ? (
refetch()} />
) : !data?.items?.length ? (
) : ( Timestamp Severity Message {data.items.map((log: LogEntry, index: number) => ( setExpandedRow(expandedRow === index ? null : index)} > {formatTimestamp(log.timestamp)} {log.body} {expandedRow === index && (

Full Message

                              {log.body}
                            
{log.traceId && (

Trace ID

{log.traceId}
)} {log.spanId && (

Span ID

{log.spanId}
)} {Object.keys(log.logAttributes || {}).length > 0 && (

Attributes

                                  {JSON.stringify(log.logAttributes, null, 2)}
                                
)}
)}
))}
)} {data && data.totalPages > 1 && (
Page {params.logsPage} of {data.totalPages} ({data.total} total)
)}
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxMetricsTab.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import React, { useState, useCallback, useMemo } from 'react' import { useQueryStates } from 'nuqs' import { useSandboxMetrics, MetricsQueryParams } from '@/hooks/useSandboxMetrics' import { TimeRangeSelector } from '@/components/telemetry/TimeRangeSelector' import { Button } from '@/components/ui/button' import { ChartContainer, ChartTooltip, ChartTooltipContent, ChartConfig } from '@/components/ui/chart' import { ScrollArea } from '@/components/ui/scroll-area' import { Skeleton } from '@/components/ui/skeleton' import { LineChart, Line, XAxis, YAxis, CartesianGrid, Legend } from 'recharts' import { Empty, EmptyDescription, EmptyHeader, EmptyMedia, EmptyTitle } from '@/components/ui/empty' import { DAYTONA_DOCS_URL } from '@/constants/ExternalLinks' import { RefreshCw, BarChart3 } from 'lucide-react' import { ToggleGroup, ToggleGroupItem } from '@/components/ui/toggle-group' import { format, subHours } from 'date-fns' import { MetricSeries } from '@daytonaio/api-client' import { getMetricDisplayName } from '@/constants/metrics' import { timeRangeSearchParams } from './SearchParams' const CHART_COLORS = [ 'hsl(var(--chart-1))', 'hsl(var(--chart-2))', 'hsl(var(--chart-3))', 'hsl(var(--chart-4))', 'hsl(var(--chart-5))', ] const BYTES_TO_GIB = 1024 * 1024 * 1024 type ViewMode = '%' | 'GiB' const METRIC_GROUPS = [ { key: 'cpu', title: 'CPU', prefix: '.cpu.', hasToggle: false }, { key: 'memory', title: 'Memory', prefix: '.memory.', hasToggle: true }, { key: 'filesystem', title: 'Filesystem', prefix: '.filesystem.', hasToggle: true }, ] function isByteMetric(metricName: string): boolean { return !metricName.endsWith('.utilization') } function buildChartData(series: MetricSeries[], convertToGiB: boolean): Record[] { const timestampSet = new Set() const indexed = series.map((s) => { const byTimestamp = new Map() for (const p of s.dataPoints) { timestampSet.add(p.timestamp) byTimestamp.set(p.timestamp, p.value ?? null) } return { metricName: s.metricName, byTimestamp, convertMetric: convertToGiB && isByteMetric(s.metricName) } }) const timestamps = Array.from(timestampSet).sort() return timestamps.map((timestamp) => { const point: Record = { timestamp } for (const { metricName, byTimestamp, convertMetric } of indexed) { const value = byTimestamp.get(timestamp) if (value == null) { point[metricName] = null } else if (convertMetric) { point[metricName] = Math.round((value / BYTES_TO_GIB) * 100) / 100 } else { point[metricName] = value } } return point }) } function buildChartConfig(series: MetricSeries[]): ChartConfig { const config: ChartConfig = {} series.forEach((s, index) => { config[s.metricName] = { label: getMetricDisplayName(s.metricName), color: CHART_COLORS[index % CHART_COLORS.length], } }) return config } const formatXAxis = (timestamp: string) => { try { return format(new Date(timestamp), 'HH:mm') } catch { return timestamp } } function MetricGroupChart({ title, series, convertToGiB, viewMode, onViewModeChange, }: { title: string series: MetricSeries[] convertToGiB: boolean viewMode?: ViewMode onViewModeChange?: (mode: ViewMode) => void }) { const chartData = React.useMemo(() => buildChartData(series, convertToGiB), [series, convertToGiB]) const chartConfig = React.useMemo(() => buildChartConfig(series), [series]) const displayTitle = viewMode ? `${title} (${viewMode})` : title return (

{displayTitle}

{viewMode && onViewModeChange && ( { if (value) onViewModeChange(value as ViewMode) }} variant="outline" size="sm" > % GiB )}
value.toFixed(2) : undefined} domain={viewMode === '%' ? [0, 100] : undefined} /> { try { return format(new Date(label as string), 'yyyy-MM-dd HH:mm:ss') } catch { return String(label) } }} /> } /> {series.map((s, index) => { const isLimit = s.metricName.endsWith('.limit') || s.metricName.endsWith('.total') return ( ) })}
) } function MetricsChartsSkeleton() { return (
{['CPU', 'Memory', 'Filesystem'].map((title) => (
))}
) } function MetricsErrorState({ onRetry }: { onRetry: () => void }) { return ( Failed to load metrics Something went wrong while fetching metrics. ) } function MetricsEmptyState() { return ( No metrics available Metrics may take a moment to appear after the sandbox starts.{' '} Learn more about observability . ) } export function SandboxMetricsTab({ sandboxId }: { sandboxId: string }) { const [timeRange, setTimeRange] = useQueryStates(timeRangeSearchParams) const [viewModes, setViewModes] = useState>({ memory: '%', filesystem: '%' }) const resolvedFrom = useMemo(() => timeRange.from ?? subHours(new Date(), 1), [timeRange.from]) const resolvedTo = useMemo(() => timeRange.to ?? new Date(), [timeRange.to]) const queryParams: MetricsQueryParams = { from: resolvedFrom, to: resolvedTo } const { data, isLoading, isError, refetch } = useSandboxMetrics(sandboxId, queryParams) const handleTimeRangeChange = useCallback( (from: Date, to: Date) => { setTimeRange({ from, to }) }, [setTimeRange], ) const handleViewModeChange = useCallback((groupKey: string, mode: ViewMode) => { setViewModes((prev) => ({ ...prev, [groupKey]: mode })) }, []) const groupedSeries = React.useMemo(() => { if (!data?.series?.length) return [] return METRIC_GROUPS.map((group) => { const allSeries = data.series.filter((s) => s.metricName.includes(group.prefix)) const mode = group.hasToggle ? viewModes[group.key] : undefined let filteredSeries = allSeries.filter((s) => s.metricName !== 'system.memory.utilization') if (mode === '%') { filteredSeries = filteredSeries.filter((s) => s.metricName.endsWith('.utilization')) } else if (mode === 'GiB') { filteredSeries = filteredSeries.filter((s) => !s.metricName.endsWith('.utilization')) } return { key: group.key, title: group.title, series: filteredSeries, convertToGiB: mode === 'GiB', hasToggle: group.hasToggle, viewMode: mode, } }).filter((group) => group.series.length > 0) }, [data, viewModes]) return (
{isLoading ? (
) : isError ? (
refetch()} />
) : !data?.series?.length ? (
) : (
{groupedSeries.map((group) => ( handleViewModeChange(group.key, mode) : undefined} /> ))}
)}
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxSpendingTab.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { TimeRangeSelector } from '@/components/telemetry/TimeRangeSelector' import { Button } from '@/components/ui/button' import { Empty, EmptyDescription, EmptyHeader, EmptyMedia, EmptyTitle } from '@/components/ui/empty' import { ScrollArea } from '@/components/ui/scroll-area' import { Skeleton } from '@/components/ui/skeleton' import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from '@/components/ui/table' import { AnalyticsUsageParams, useSandboxUsagePeriods } from '@/hooks/queries/useAnalyticsUsage' import { formatMoney } from '@/lib/utils' import { format, subHours } from 'date-fns' import { DollarSign, RefreshCw } from 'lucide-react' import { useQueryStates } from 'nuqs' import { useCallback, useMemo } from 'react' import { timeRangeSearchParams } from './SearchParams' function formatTimestamp(timestamp: string) { try { return format(new Date(timestamp), 'yyyy-MM-dd HH:mm:ss') } catch { return timestamp } } function SpendingTableSkeleton() { return ( Start End CPU RAM (GB) Disk (GB) Price {Array.from({ length: 8 }).map((_, i) => ( ))}
) } function SpendingErrorState({ onRetry }: { onRetry: () => void }) { return ( Failed to load spending Something went wrong while fetching usage periods. ) } function SpendingEmptyState() { return ( No usage periods found Try adjusting your time range. ) } export function SandboxSpendingTab({ sandboxId }: { sandboxId: string }) { const [timeRange, setTimeRange] = useQueryStates(timeRangeSearchParams) const resolvedFrom = useMemo(() => timeRange.from ?? subHours(new Date(), 24), [timeRange.from]) const resolvedTo = useMemo(() => timeRange.to ?? new Date(), [timeRange.to]) const queryParams: AnalyticsUsageParams = { from: resolvedFrom, to: resolvedTo } const { data, isLoading, isError, refetch } = useSandboxUsagePeriods(sandboxId, queryParams) const handleTimeRangeChange = useCallback( (from: Date, to: Date) => { setTimeRange({ from, to }) }, [setTimeRange], ) return (
{isLoading ? (
) : isError ? (
refetch()} />
) : !data?.length ? (
) : ( Start End CPU RAM (GB) Disk (GB) Price {data.map((period) => { const rowKey = `${period.startAt ?? 'unknown-start'}-${period.endAt ?? 'unknown-end'}` return ( {period.startAt ? formatTimestamp(period.startAt) : '-'} {period.endAt ? formatTimestamp(period.endAt) : '-'} {period.cpu ?? 0} {period.ramGB ?? 0} {period.diskGB ?? 0} {formatMoney(period.price ?? 0, { maximumFractionDigits: 8, })} ) })}
)}
) } ================================================ FILE: apps/dashboard/src/components/sandboxes/SandboxTerminalTab.tsx ================================================ /* * Copyright Daytona Platforms Inc. * SPDX-License-Identifier: AGPL-3.0 */ import { useEffect, useState } from 'react' import { Button } from '@/components/ui/button' import { Empty, EmptyDescription, EmptyHeader, EmptyMedia, EmptyTitle } from '@/components/ui/empty' import { DAYTONA_DOCS_URL } from '@/constants/ExternalLinks' import { useTerminalSessionQuery } from '@/hooks/queries/useTerminalSessionQuery' import { useSandboxSessionContext } from '@/hooks/useSandboxSessionContext' import { isStoppable } from '@/lib/utils/sandbox' import { Sandbox } from '@daytonaio/api-client' import { Spinner } from '@/components/ui/spinner' import { Play, RefreshCw, TerminalSquare } from 'lucide-react' export function SandboxTerminalTab({ sandbox }: { sandbox: Sandbox }) { const running = isStoppable(sandbox) const { isTerminalActivated, activateTerminal } = useSandboxSessionContext() const [activated, setActivated] = useState(() => isTerminalActivated(sandbox.id)) const { data: session, isLoading, isError, isFetching, existingSession, reset, } = useTerminalSessionQuery(sandbox.id, running && activated) // Auto-reconnect: if activated and session is expired, refetch useEffect(() => { if (!activated || !existingSession) return if (existingSession.expiresAt <= Date.now()) { reset() } }, [activated, existingSession, reset]) const handleConnect = () => { activateTerminal(sandbox.id) setActivated(true) } if (!running) { return (
Sandbox is not running Start the sandbox to access the terminal.{' '} Learn more .
) } // Not yet activated — show connect button if (!activated) { return (
Terminal Connect to an interactive terminal session in your sandbox.{' '} Learn more .
) } // Loading / fetching if (isLoading || isFetching) { return (
Connecting...
) } // Error if (isError || !session) { return (
Failed to connect Something went wrong while connecting to the terminal.
) } // Active session return (