Repository: caioricciuti/ch-ui Branch: main Commit: f21fb4c40d52 Files: 274 Total size: 2.1 MB Directory structure: gitextract_j38tsz8m/ ├── .dockerignore ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug.yml │ │ └── feature.yml │ └── workflows/ │ └── release.yml ├── .gitignore ├── .gitpod.yml ├── Dockerfile ├── LICENSE.md ├── Makefile ├── README.md ├── VERSION ├── ch-ui.conf ├── cmd/ │ ├── connect.go │ ├── connect_detach_unix.go │ ├── connect_detach_windows.go │ ├── connect_process_unix.go │ ├── connect_process_windows.go │ ├── root.go │ ├── server.go │ ├── service.go │ ├── tunnel.go │ ├── uninstall.go │ ├── update.go │ └── version.go ├── connector/ │ ├── clickhouse.go │ ├── config/ │ │ └── config.go │ ├── connector.go │ ├── hostinfo.go │ ├── hostinfo_unix.go │ ├── hostinfo_windows.go │ ├── protocol.go │ ├── service/ │ │ ├── launchd.go │ │ ├── service.go │ │ └── systemd.go │ └── ui/ │ └── ui.go ├── docs/ │ ├── brain/ │ │ └── SKILLS.md │ ├── cant-login.md │ ├── legal/ │ │ ├── privacy-policy.md │ │ └── terms-of-service.md │ ├── license.md │ └── production-runbook.md ├── frontend.go ├── go.mod ├── go.sum ├── internal/ │ ├── alerts/ │ │ └── dispatcher.go │ ├── brain/ │ │ ├── provider.go │ │ └── provider_test.go │ ├── config/ │ │ ├── config.go │ │ ├── secret.go │ │ └── secret_test.go │ ├── crypto/ │ │ └── aes.go │ ├── database/ │ │ ├── alert_digests.go │ │ ├── alerts.go │ │ ├── audit_logs.go │ │ ├── audit_logs_test.go │ │ ├── brain.go │ │ ├── cleanup.go │ │ ├── connections.go │ │ ├── dashboards.go │ │ ├── database.go │ │ ├── migrations.go │ │ ├── migrations_guardrails_test.go │ │ ├── models.go │ │ ├── pipelines.go │ │ ├── rate_limits.go │ │ ├── saved_queries.go │ │ ├── schedules.go │ │ ├── sessions.go │ │ ├── settings.go │ │ └── user_roles.go │ ├── embedded/ │ │ └── embedded.go │ ├── governance/ │ │ ├── guardrails.go │ │ ├── guardrails_test.go │ │ ├── harvester_access.go │ │ ├── harvester_metadata.go │ │ ├── harvester_querylog.go │ │ ├── incidents.go │ │ ├── lineage.go │ │ ├── policy_engine.go │ │ ├── store.go │ │ ├── syncer.go │ │ └── types.go │ ├── langfuse/ │ │ └── langfuse.go │ ├── license/ │ │ ├── license.go │ │ ├── pubkey.go │ │ ├── public.pem │ │ └── tokens.go │ ├── models/ │ │ ├── dag.go │ │ ├── ref.go │ │ ├── runner.go │ │ └── scheduler.go │ ├── pipelines/ │ │ ├── clickhouse_sink.go │ │ ├── database_source.go │ │ ├── helpers.go │ │ ├── kafka.go │ │ ├── kafka_scram.go │ │ ├── registry.go │ │ ├── runner.go │ │ ├── s3_source.go │ │ ├── types.go │ │ └── webhook.go │ ├── queryproc/ │ │ ├── variables.go │ │ └── variables_test.go │ ├── scheduler/ │ │ ├── cron.go │ │ └── runner.go │ ├── server/ │ │ ├── handlers/ │ │ │ ├── admin.go │ │ │ ├── admin_brain.go │ │ │ ├── admin_governance.go │ │ │ ├── admin_langfuse.go │ │ │ ├── auth.go │ │ │ ├── auth_helpers_test.go │ │ │ ├── brain.go │ │ │ ├── connections.go │ │ │ ├── dashboards.go │ │ │ ├── governance.go │ │ │ ├── governance_alerts.go │ │ │ ├── governance_auditlog.go │ │ │ ├── governance_querylog.go │ │ │ ├── health.go │ │ │ ├── license.go │ │ │ ├── models.go │ │ │ ├── pipelines.go │ │ │ ├── query.go │ │ │ ├── query_guardrails_test.go │ │ │ ├── query_upload.go │ │ │ ├── saved_queries.go │ │ │ ├── schedules.go │ │ │ └── view_graph.go │ │ ├── middleware/ │ │ │ ├── context.go │ │ │ ├── cors.go │ │ │ ├── license.go │ │ │ ├── logging.go │ │ │ ├── ratelimit.go │ │ │ ├── ratelimit_test.go │ │ │ ├── security.go │ │ │ └── session.go │ │ └── server.go │ ├── tunnel/ │ │ ├── api.go │ │ ├── gateway.go │ │ └── protocol.go │ └── version/ │ └── version.go ├── license/ │ └── public.pem ├── main.go └── ui/ ├── .gitignore ├── README.md ├── index.html ├── package.json ├── src/ │ ├── App.svelte │ ├── app.css │ ├── lib/ │ │ ├── api/ │ │ │ ├── alerts.ts │ │ │ ├── auth.ts │ │ │ ├── brain.ts │ │ │ ├── client.ts │ │ │ ├── governance.ts │ │ │ ├── models.ts │ │ │ ├── pipelines.ts │ │ │ ├── query.ts │ │ │ └── stream.ts │ │ ├── basePath.ts │ │ ├── components/ │ │ │ ├── brain/ │ │ │ │ ├── BrainArtifactCard.svelte │ │ │ │ ├── BrainEmptyState.svelte │ │ │ │ ├── BrainHeader.svelte │ │ │ │ ├── BrainInput.svelte │ │ │ │ ├── BrainMentionDropdown.svelte │ │ │ │ ├── BrainMessage.svelte │ │ │ │ ├── BrainSidebar.svelte │ │ │ │ ├── BrainSqlBlock.svelte │ │ │ │ └── brain-markdown.ts │ │ │ ├── common/ │ │ │ │ ├── Button.svelte │ │ │ │ ├── Combobox.svelte │ │ │ │ ├── ConfirmDialog.svelte │ │ │ │ ├── ContextMenu.svelte │ │ │ │ ├── HelpTip.svelte │ │ │ │ ├── InputDialog.svelte │ │ │ │ ├── MiniTrendChart.svelte │ │ │ │ ├── Modal.svelte │ │ │ │ ├── ProRequired.svelte │ │ │ │ ├── Sheet.svelte │ │ │ │ ├── Spinner.svelte │ │ │ │ └── Toast.svelte │ │ │ ├── dashboard/ │ │ │ │ ├── ChartPanel.svelte │ │ │ │ ├── DashboardGrid.svelte │ │ │ │ ├── PanelEditor.svelte │ │ │ │ ├── TimeRangeSelector.svelte │ │ │ │ └── time-picker/ │ │ │ │ ├── CalendarMonth.svelte │ │ │ │ ├── DualCalendar.svelte │ │ │ │ ├── PresetList.svelte │ │ │ │ ├── TimeInput.svelte │ │ │ │ └── TimezoneSelect.svelte │ │ │ ├── editor/ │ │ │ │ ├── InsightsPanel.svelte │ │ │ │ ├── ResultFooter.svelte │ │ │ │ ├── ResultPanel.svelte │ │ │ │ ├── SchemaPanel.svelte │ │ │ │ ├── SqlEditor.svelte │ │ │ │ ├── StatsPanel.svelte │ │ │ │ └── Toolbar.svelte │ │ │ ├── explorer/ │ │ │ │ ├── DataPreview.svelte │ │ │ │ └── DatabaseTree.svelte │ │ │ ├── governance/ │ │ │ │ ├── LineageGraph.svelte │ │ │ │ └── LineageTableNode.svelte │ │ │ ├── layout/ │ │ │ │ ├── CommandPalette.svelte │ │ │ │ ├── Shell.svelte │ │ │ │ ├── Sidebar.svelte │ │ │ │ ├── TabBar.svelte │ │ │ │ ├── TabContent.svelte │ │ │ │ ├── TabGroup.svelte │ │ │ │ └── content/ │ │ │ │ ├── DatabaseContent.svelte │ │ │ │ ├── ModelContent.svelte │ │ │ │ ├── QueryContent.svelte │ │ │ │ └── TableContent.svelte │ │ │ ├── models/ │ │ │ │ └── ModelNode.svelte │ │ │ ├── pipelines/ │ │ │ │ ├── NodeConfigPanel.svelte │ │ │ │ ├── PipelineCanvas.svelte │ │ │ │ ├── PipelineEditor.svelte │ │ │ │ ├── PipelineList.svelte │ │ │ │ ├── PipelineStatusBar.svelte │ │ │ │ ├── PipelineToolbar.svelte │ │ │ │ └── nodes/ │ │ │ │ ├── SinkNode.svelte │ │ │ │ └── SourceNode.svelte │ │ │ └── table/ │ │ │ ├── Pagination.svelte │ │ │ ├── TableCell.svelte │ │ │ ├── TableHeader.svelte │ │ │ └── VirtualTable.svelte │ │ ├── editor/ │ │ │ └── completions.ts │ │ ├── stores/ │ │ │ ├── command-palette.svelte.ts │ │ │ ├── license.svelte.ts │ │ │ ├── number-format.svelte.ts │ │ │ ├── query-limit.svelte.ts │ │ │ ├── router.svelte.ts │ │ │ ├── schema.svelte.ts │ │ │ ├── session.svelte.ts │ │ │ ├── tabs.svelte.ts │ │ │ ├── theme.svelte.ts │ │ │ └── toast.svelte.ts │ │ ├── types/ │ │ │ ├── alerts.ts │ │ │ ├── api.ts │ │ │ ├── brain.ts │ │ │ ├── governance.ts │ │ │ ├── models.ts │ │ │ ├── pipelines.ts │ │ │ ├── query.ts │ │ │ └── schema.ts │ │ └── utils/ │ │ ├── calendar.ts │ │ ├── ch-types.ts │ │ ├── chart-transform.ts │ │ ├── dashboard-time.test.ts │ │ ├── dashboard-time.ts │ │ ├── export.ts │ │ ├── format.ts │ │ ├── grid-layout.ts │ │ ├── lineage-layout.ts │ │ ├── safe-json.ts │ │ ├── sql.ts │ │ ├── stats.ts │ │ └── uuid.ts │ ├── main.ts │ └── pages/ │ ├── Admin.svelte │ ├── Brain.svelte │ ├── Dashboards.svelte │ ├── Governance.svelte │ ├── Home.svelte │ ├── Login.svelte │ ├── Models.svelte │ ├── Pipelines.svelte │ ├── SavedQueries.svelte │ ├── Schedules.svelte │ └── Settings.svelte ├── svelte.config.js ├── tsconfig.app.json ├── tsconfig.json ├── tsconfig.node.json ├── vite.config.d.ts ├── vite.config.ts └── vitest.config.ts ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ .git .github .claude .DS_Store ch-ui ch-ui-server.pid data dist tmp node_modules ui/node_modules ui/.svelte-kit ui/dist ui/.DS_Store ================================================ FILE: .github/ISSUE_TEMPLATE/bug.yml ================================================ name: Bug Report description: Found something that doesn't work as expected? body: - type: dropdown id: os attributes: label: Operating System description: What OS are you running CH-UI on? options: - Linux - macOS - Windows - Other validations: required: true - type: dropdown id: arch attributes: label: Architecture description: What architecture? options: - x86_64 (amd64) - ARM64 (aarch64 / Apple Silicon) - Other validations: required: true - type: textarea id: repro attributes: label: How did you encounter the bug? description: How can this bug be reproduced? Please provide steps to reproduce. placeholder: |- 1. Start CH-UI with... 2. Go to... 3. Click on... validations: required: true - type: textarea id: expected attributes: label: What did you expect? description: What was supposed to happen? validations: required: true - type: textarea id: actual attributes: label: Actual Result description: What actually happened? validations: required: true - type: textarea id: version attributes: label: Version description: What version of CH-UI are you using? placeholder: e.g. 2.0.0 validations: required: true - type: textarea id: logs attributes: label: Logs / Error Output description: Any relevant logs or error messages from the terminal? render: shell validations: required: false - type: markdown attributes: value: |- ### All done, now, just submit the issue and I will do my best to take care of it! validations: required: false ================================================ FILE: .github/ISSUE_TEMPLATE/feature.yml ================================================ name: Feature Request description: Tell us about something ch-UI doesn't do yet, but should! body: - type: textarea id: idea attributes: label: Idea Statement description: Which is the feature you would like to see implemented? placeholder: |- I want to be able to do anything I want, whenever I want. Because my ideas are the best. validations: required: true - type: textarea id: expected attributes: label: Feature implementation brainstorm description: All your ideas are welcome, let's brainstorm together. placeholder: |- Create the next big feature that will all our problems. validations: required: false - type: markdown attributes: value: |- ## Thanks 🙏 validations: required: false ================================================ FILE: .github/workflows/release.yml ================================================ name: Release on: push: tags: - 'v*' env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} permissions: contents: write packages: write concurrency: group: release-${{ github.ref }} cancel-in-progress: false jobs: release: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v5 with: go-version: '1.24' - name: Setup Bun uses: oven-sh/setup-bun@v2 - name: Build frontend run: make build-frontend - name: Extract version id: version run: echo "version=${GITHUB_REF_NAME}" >> "$GITHUB_OUTPUT" - name: Cross-compile binaries env: VERSION: ${{ steps.version.outputs.version }} COMMIT: ${{ github.sha }} DATE: ${{ github.event.head_commit.timestamp }} run: | LDFLAGS="-s -w -X main.Version=${VERSION} -X main.Commit=${COMMIT} -X main.BuildDate=${DATE}" CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "${LDFLAGS}" -o dist/ch-ui-linux-amd64 . CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "${LDFLAGS}" -o dist/ch-ui-linux-arm64 . CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "${LDFLAGS}" -o dist/ch-ui-darwin-amd64 . CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags "${LDFLAGS}" -o dist/ch-ui-darwin-arm64 . - name: Alpine smoke test (linux-amd64) run: | chmod +x dist/ch-ui-linux-amd64 docker run --rm -v "$PWD/dist:/dist:ro" alpine:3.20 /dist/ch-ui-linux-amd64 version - name: Generate checksums working-directory: dist run: | sha256sum ch-ui-* > checksums.txt cat checksums.txt - name: Create or update GitHub Release env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} VERSION: ${{ steps.version.outputs.version }} run: | set -euo pipefail TAG="${VERSION}" TITLE="${TAG}" REPO="${{ github.repository }}" NOTES_FILE="$(mktemp)" cat > "${NOTES_FILE}" </dev/null 2>&1; then gh release edit "${TAG}" --title "${TITLE}" --notes-file "${NOTES_FILE}" else gh release create "${TAG}" --title "${TITLE}" --notes-file "${NOTES_FILE}" fi gh release upload "${TAG}" \ dist/ch-ui-linux-amd64 \ dist/ch-ui-linux-arm64 \ dist/ch-ui-darwin-amd64 \ dist/ch-ui-darwin-arm64 \ dist/checksums.txt \ --clobber - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Verify GHCR credentials env: GHCR_PAT: ${{ secrets.GHCR_PAT }} run: | if [ -z "${GHCR_PAT}" ]; then echo "GHCR_PAT secret is required to publish ghcr.io/${{ github.repository }} images." exit 1 fi - name: Log in to GHCR uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GHCR_PAT }} - name: Build and push Docker image uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile push: true platforms: linux/amd64,linux/arm64 tags: | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.version.outputs.version }} ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest build-args: | VERSION=${{ steps.version.outputs.version }} COMMIT=${{ github.sha }} BUILD_DATE=${{ github.event.head_commit.timestamp }} ================================================ FILE: .gitignore ================================================ # Logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* pnpm-debug.log* lerna-debug.log* # Dependencies node_modules # Build output dist/ dist-ssr *.local .claude # Frontend build output (embedded into Go binary) ui/dist/ !ui/dist/.gitkeep # Editor directories and files .vscode/* !.vscode/extensions.json .idea .DS_Store *.suo *.ntvs* *.njsproj *.sln *.sw? # Environment .env .env.local !.env.example # Docker local data .clickhouse_local_data # SQLite database /data *.db *.db-shm *.db-wal # Go binary (built by Makefile) ch-ui .claude .gocache/ # License tool secrets license/private.pem license/*.json license/*.log license/go.* license/licensetool license/README.md license/main.go posts.md ch-ui-server.pid CLAUDE.md ================================================ FILE: .gitpod.yml ================================================ image: gitpod/workspace-full tasks: - name: ClickHouse init: docker pull clickhouse/clickhouse-server:latest command: | docker run -d --rm \ --name clickhouse \ -p 8123:8123 \ -p 9000:9000 \ clickhouse/clickhouse-server:latest echo "ClickHouse running on port 8123" - name: CH-UI init: | curl -L -o ch-ui https://github.com/caioricciuti/ch-ui/releases/latest/download/ch-ui-linux-amd64 chmod +x ch-ui command: | # Wait for ClickHouse to be ready echo "Waiting for ClickHouse..." while ! curl -s http://localhost:8123/ping > /dev/null 2>&1; do sleep 1; done echo "ClickHouse is up. Starting CH-UI..." CLICKHOUSE_URL=http://localhost:8123 ./ch-ui ports: - port: 3488 onOpen: open-browser visibility: public - port: 8123 onOpen: ignore - port: 9000 onOpen: ignore ================================================ FILE: Dockerfile ================================================ # syntax=docker/dockerfile:1.7 FROM oven/bun:1.2.23 AS ui-builder WORKDIR /src/ui COPY ui/package.json ui/bun.lock ./ RUN bun install --frozen-lockfile COPY ui/ ./ ENV CHUI_VITE_MINIFY=true \ CHUI_VITE_REPORT_COMPRESSED=false RUN bun run build FROM golang:1.25-alpine AS go-builder WORKDIR /src ARG VERSION=dev ARG COMMIT=none ARG BUILD_DATE=unknown ARG TARGETOS=linux ARG TARGETARCH=amd64 COPY go.mod go.sum ./ RUN go mod download COPY . . COPY --from=ui-builder /src/ui/dist ./ui/dist RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} \ go build -trimpath -ldflags "-s -w -X main.Version=${VERSION} -X main.Commit=${COMMIT} -X main.BuildDate=${BUILD_DATE}" -o /out/ch-ui . FROM alpine:3.20 AS runtime RUN addgroup -S chui && adduser -S -G chui chui \ && apk add --no-cache ca-certificates tzdata \ && mkdir -p /app/data \ && chown -R chui:chui /app WORKDIR /app COPY --from=go-builder /out/ch-ui /usr/local/bin/ch-ui ENV DATABASE_PATH=/app/data/ch-ui.db EXPOSE 3488 VOLUME ["/app/data"] USER chui ENTRYPOINT ["ch-ui", "server"] ================================================ FILE: LICENSE.md ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to the Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by the Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding any notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2024-2026 Caio Ricciuti Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ # CH-UI Makefile # Single binary: server + agent + embedded frontend VERSION ?= $(shell cat VERSION 2>/dev/null || git describe --tags --always --dirty 2>/dev/null || echo "dev") COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "none") DATE ?= $(shell date -u '+%Y-%m-%dT%H:%M:%SZ') LDFLAGS = -s -w \ -X main.Version=$(VERSION) \ -X main.Commit=$(COMMIT) \ -X main.BuildDate=$(DATE) BINARY = ch-ui .PHONY: app build rebuild from-scratch build-frontend build-go dev test clean tidy vet help ## app: Build frontend + Go binary (production-ready) app: build-frontend build-go ## build: Build everything (frontend + Go binary) build: app ## rebuild: Clean artifacts, then build everything rebuild: $(MAKE) clean $(MAKE) build ## from-scratch: Alias for rebuild from-scratch: rebuild ## build-frontend: Build the React frontend build-frontend: cd ui && bun install @cd ui && (CHUI_VITE_MINIFY=true CHUI_VITE_REPORT_COMPRESSED=false bun run build || \ (echo "Frontend build was killed; retrying with low-memory profile (no minify)..." && \ CHUI_VITE_MINIFY=false CHUI_VITE_REPORT_COMPRESSED=false bun run build)) ## build-go: Build just the Go binary (skip frontend rebuild) build-go: CGO_ENABLED=0 go build -ldflags "$(LDFLAGS)" -o $(BINARY) . ## dev: Start the server in dev mode (expects Vite running on :5173) dev: go run -ldflags "$(LDFLAGS)" . server --dev ## test: Run all Go tests test: go test ./... -v -count=1 ## clean: Remove build artifacts clean: rm -f $(BINARY) rm -rf ui/dist/ ## tidy: Clean up Go modules tidy: go mod tidy ## vet: Run go vet vet: go vet ./... ## help: Show this help message help: @echo "Available targets:" @grep -E '^## ' Makefile | sed 's/## / /' ================================================ FILE: README.md ================================================

CH-UI Logo

CH-UI

The open-source ClickHouse management platform.
SQL editor, dashboards, AI copilot, data pipelines, models, and admin — all in one binary. Free.

Version License Stars Docker

--- ## Why CH-UI? Most ClickHouse tools give you a query box and call it a day. CH-UI gives you a full workspace — and almost everything is **free and open source**. Download one binary. Run it. Get: - A multi-tab **SQL editor** with formatting, profiling, and streaming results - **Dashboards** with a drag-and-drop panel builder and multiple chart types - **Brain** — an AI assistant that understands your schema (OpenAI, Ollama, or any compatible provider) - **Data pipelines** — visual builder for Webhook, S3, Kafka, and DB sources into ClickHouse - **Models** — dbt-style SQL transformations with dependency graphs and scheduling - **Admin panel** — user management, connection management, provider configuration - **Saved queries**, **schema explorer**, **connection management**, and more No Docker requirement. No external dependencies. No signup. --- ## Table of Contents - [Features (Free)](#features-free) - [Community vs Pro](#community-vs-pro) - [Quick Start](#quick-start) - [Quick Start (Docker)](#quick-start-docker) - [Architecture](#architecture) - [Remote ClickHouse (Tunnel)](#remote-clickhouse-tunnel) - [CLI Reference](#cli-reference) - [Configuration](#configuration) - [Production Checklist](#production-checklist) - [Troubleshooting](#troubleshooting) - [Development](#development) - [Upgrade](#upgrade) - [Legal](#legal) - [Contributing](#contributing) --- ## Features (Free) Everything below is included in the free Community edition under Apache 2.0. ### SQL Editor - Multi-tab interface with persistent state - CodeMirror 6 with SQL syntax highlighting and autocomplete - Query formatting and beautification - Streaming results via SSE — no timeout on long queries - **Query cost estimation** — see estimated rows and parts to scan before running (like BigQuery's dry run) - Query profiling (pulls from `system.query_log`) with estimate vs actual accuracy comparison - Query plan analysis (EXPLAIN with parsed tree view) - Configurable max result rows and query timeout - Guardrails enforcement (query validation before execution) ### Schema Explorer - Full database/table/column tree browser - Table data preview with pagination - Column type introspection - Search across databases and tables ### Dashboards - Create unlimited dashboards - Drag-and-drop panel builder - Multiple chart types (line, bar, scatter, area, and more via uplot) - Time range selector with presets (1h, 24h, 7d, 30d, custom) - Timezone support - Auto-refresh control - Each panel runs its own SQL query against your ClickHouse ### Brain (AI Assistant) - Chat with your data using natural language - Multi-chat support with full history persistence - **Provider support:** OpenAI, OpenAI-compatible APIs (Groq, Together, etc.), Ollama (local LLMs) - Admin-controlled model and provider activation - Schema-aware context (attach up to 10 tables as context per chat) - SQL artifact generation — run generated queries directly from chat - Brain skills (configurable system prompts/instructions) - Token usage tracking - Langfuse integration for LLM observability ### Data Pipelines - Visual pipeline canvas (drag-and-drop with XyFlow) - **Source connectors:** Webhook (inbound HTTP), Database (SQL query), S3, Kafka (with SCRAM auth) - **Sink:** ClickHouse (native insert with configurable batch size) - Pipeline start/stop controls - Run history, metrics, and error tracking - Real-time monitoring (rows ingested, bytes, batches, errors) ### Models (SQL Transformations) - dbt-style SQL models with `table`, `view`, and `incremental` materialization - Model dependency graph (DAG visualization) - Execution with dependency ordering - Run history and results tracking - Table engine configuration per model - Can be scheduled via the scheduler (Pro) or run manually ### Saved Queries - Save queries with titles and descriptions - Sort by date, name, or query length - Filter, search, copy, and organize - Quick access from the sidebar ### Admin Panel - User management (create, delete, assign roles) - ClickHouse user management (create users, update passwords, delete) - Connection management with multi-connection support - Brain provider and model configuration - Brain skill management - Langfuse integration settings - System statistics dashboard ### Connections & Tunnel - Multi-connection support (manage multiple ClickHouse instances) - Secure WebSocket tunnel for remote ClickHouse access - Token-based agent authentication - Connection health monitoring - Install connector as OS service (`ch-ui service install`) ### Other - Dark mode - Session-based authentication with rate limiting - Security headers (CSP, X-Frame-Options, etc.) - Health check endpoint (`/health`) - Self-update (`ch-ui update`) - Shell completion generation --- ## Community vs Pro Almost everything is free. Pro adds enterprise governance and scheduling. | Capability | Community (Free) | Pro | |---|:---:|:---:| | SQL editor + explorer + formatting + profiling | **Yes** | Yes | | Saved queries | **Yes** | Yes | | Dashboards + panel builder | **Yes** | Yes | | Brain (AI assistant, multi-provider) | **Yes** | Yes | | Data pipelines (Webhook, S3, Kafka, DB) | **Yes** | Yes | | Models (SQL transformations, DAG) | **Yes** | Yes | | Admin panel + user management | **Yes** | Yes | | Multi-connection management | **Yes** | Yes | | Tunnel (remote ClickHouse) | **Yes** | Yes | | Scheduled query jobs + cron + history | - | **Yes** | | Governance (metadata, visual lineage graph, column-level lineage, access matrix) | - | **Yes** | | Policies + incidents + violations | - | **Yes** | | Alerting (SMTP, Resend, Brevo) | - | **Yes** | See: [`docs/license.md`](docs/license.md) --- ## Quick Start ### 1) Download Linux (amd64): ```bash curl -L -o ch-ui https://github.com/caioricciuti/ch-ui/releases/latest/download/ch-ui-linux-amd64 chmod +x ch-ui ``` Linux (arm64): ```bash curl -L -o ch-ui https://github.com/caioricciuti/ch-ui/releases/latest/download/ch-ui-linux-arm64 chmod +x ch-ui ``` macOS (Apple Silicon): ```bash curl -L -o ch-ui https://github.com/caioricciuti/ch-ui/releases/latest/download/ch-ui-darwin-arm64 chmod +x ch-ui ``` macOS (Intel): ```bash curl -L -o ch-ui https://github.com/caioricciuti/ch-ui/releases/latest/download/ch-ui-darwin-amd64 chmod +x ch-ui ``` Optional — verify checksum: ```bash curl -L -o checksums.txt https://github.com/caioricciuti/ch-ui/releases/latest/download/checksums.txt sha256sum -c checksums.txt --ignore-missing ``` ### 2) Run ```bash sudo install -m 755 ch-ui /usr/local/bin/ch-ui ch-ui ``` Or just `./ch-ui` from the download folder. Open `http://localhost:3488` and log in with your ClickHouse credentials. --- ## Quick Start (Docker) ```bash docker run --rm \ -p 3488:3488 \ -v ch-ui-data:/app/data \ -e CLICKHOUSE_URL=http://host.docker.internal:8123 \ ghcr.io/caioricciuti/ch-ui:latest ``` - On Linux, replace `host.docker.internal` with a host/IP reachable from the container. - Persisted state is stored in `/app/data/ch-ui.db` (volume: `ch-ui-data`). --- ## Architecture CH-UI ships as a single binary with two operating modes: - **`server`** — web app + API + WebSocket tunnel gateway (default) - **`connect`** — lightweight agent that exposes local ClickHouse over secure WebSocket ```mermaid flowchart LR U["Browser"] --> S["CH-UI Server\n(UI + API + Gateway)"] S <--> DB["SQLite\n(state, settings, chats, dashboards)"] A["ch-ui connect\n(Agent)"] <--> S A --> CH["ClickHouse"] ``` For local use, the server starts an embedded connector automatically against `localhost:8123`. **Tech stack:** Go backend (chi v5, SQLite WAL mode), Svelte 5 frontend (TypeScript, Vite, TailwindCSS), embedded at build time. --- ## Remote ClickHouse (Tunnel) Connect to ClickHouse instances running on other machines using the secure WebSocket tunnel. **Server (VM2):** ```bash ch-ui server --port 3488 ``` **Agent (VM1, where ClickHouse runs):** ```bash ch-ui connect --url wss://your-ch-ui-domain/connect --key cht_your_tunnel_token ``` ### Tunnel key management Run these on the server host: ```bash ch-ui tunnel create --name "vm1-clickhouse" # Create connection + key ch-ui tunnel list # List all connections ch-ui tunnel show # Show token + setup commands ch-ui tunnel rotate # Rotate token (old one invalidated) ch-ui tunnel delete # Delete connection ``` - Token can also be generated from the Admin UI. - Agent only needs outbound access to the server's `/connect` endpoint. - Add `--takeover` to replace a stale agent session. - Install as OS service: `ch-ui service install --key cht_xxx --url wss://host/connect` For full hardening guide: [`docs/production-runbook.md`](docs/production-runbook.md) --- ## CLI Reference ### Quick start commands ```bash ch-ui # Start server (local ClickHouse) ch-ui server start --detach # Start in background ch-ui server status # Check if running ch-ui server stop # Stop server ``` ### Full command map | Command | Description | |---|---| | `ch-ui` / `ch-ui server` | Start web app + API + gateway | | `ch-ui connect` | Start tunnel agent next to ClickHouse | | `ch-ui tunnel create/list/show/rotate/delete` | Manage tunnel keys (server host) | | `ch-ui service install/start/stop/status/logs/uninstall` | Manage connector as OS service | | `ch-ui update` | Update to latest release | | `ch-ui version` | Print version | | `ch-ui completion bash/zsh/fish` | Generate shell completions | | `ch-ui uninstall` | Remove CH-UI from system | ### Server flags | Flag | Default | Description | |---|---|---| | `--port, -p` | `3488` | HTTP port | | `--clickhouse-url` | `http://localhost:8123` | Local ClickHouse URL | | `--connection-name` | `Local ClickHouse` | Display name for local connection | | `--config, -c` | - | Path to `server.yaml` | | `--detach` | - | Run in background | | `--dev` | - | Development mode (proxy to Vite) | ### Connect flags | Flag | Default | Description | |---|---|---| | `--url` | - | WebSocket tunnel URL (`wss://`) | | `--key` | - | Tunnel token (`cht_...`) | | `--clickhouse-url` | `http://localhost:8123` | Local ClickHouse | | `--config, -c` | - | Path to `config.yaml` | | `--detach` | - | Run in background | | `--takeover` | - | Replace stale agent session | --- ## Configuration CH-UI works without config files. You only need them for production defaults or service-managed startup. ### Config file locations | File | macOS | Linux | |---|---|---| | `server.yaml` | `~/.config/ch-ui/server.yaml` | `/etc/ch-ui/server.yaml` | | `config.yaml` | `~/.config/ch-ui/config.yaml` | `/etc/ch-ui/config.yaml` | **Priority:** CLI flags > environment variables > config file > built-in defaults ### Server config ```yaml port: 3488 app_url: https://ch-ui.yourcompany.com database_path: /var/lib/ch-ui/ch-ui.db clickhouse_url: http://localhost:8123 connection_name: Local ClickHouse app_secret_key: "change-this-in-production" allowed_origins: - https://ch-ui.yourcompany.com ``` | Key | Env var | Default | Description | |---|---|---|---| | `port` | `PORT` | `3488` | HTTP port | | `app_url` | `APP_URL` | `http://localhost:` | Public URL for links and tunnel inference | | `database_path` | `DATABASE_PATH` | `./data/ch-ui.db` | SQLite database location | | `clickhouse_url` | `CLICKHOUSE_URL` | `http://localhost:8123` | Embedded local connection target | | `connection_name` | `CONNECTION_NAME` | `Local ClickHouse` | Display name for local connection | | `app_secret_key` | `APP_SECRET_KEY` | auto-generated | Session encryption key | | `allowed_origins` | `ALLOWED_ORIGINS` | empty | CORS allowlist (comma-separated in env) | | `tunnel_url` | `TUNNEL_URL` | derived from port | Tunnel endpoint advertised to agents | ### Connector config ```yaml tunnel_token: "cht_your_token" clickhouse_url: "http://127.0.0.1:8123" tunnel_url: "wss://your-ch-ui-domain/connect" ``` | Key | Env var | Default | Description | |---|---|---|---| | `tunnel_token` | `TUNNEL_TOKEN` | required | Auth key from `ch-ui tunnel create` | | `clickhouse_url` | `CLICKHOUSE_URL` | `http://localhost:8123` | Local ClickHouse | | `tunnel_url` | `TUNNEL_URL` | `ws://127.0.0.1:3488/connect` | Server gateway endpoint | ### Changing the local ClickHouse URL ```bash # CLI flag ch-ui server --clickhouse-url http://127.0.0.1:8123 # Environment variable CLICKHOUSE_URL=http://127.0.0.1:8123 ch-ui server # With custom connection name ch-ui server --clickhouse-url http://127.0.0.1:8123 --connection-name "My ClickHouse" ``` The login page also has a **Can't login?** button that shows setup guidance. --- ## Production Checklist - [ ] Set a strong `APP_SECRET_KEY` - [ ] Set `APP_URL` to your public HTTPS URL - [ ] Configure `ALLOWED_ORIGINS` - [ ] Put CH-UI behind a TLS reverse proxy (Nginx example: [`ch-ui.conf`](ch-ui.conf)) - [ ] Ensure WebSocket upgrade support for `/connect` - [ ] Back up SQLite database regularly - [ ] Run connector as OS service on remote hosts ### Backup and restore ```bash # Backup cp /var/lib/ch-ui/ch-ui.db /var/backups/ch-ui-$(date +%F).db # Restore — stop server first, then replace the DB file ``` --- ## Troubleshooting ### Port already in use ```bash ch-ui server status # Check if already running ch-ui server stop # Stop the old process ``` ### Can't log in - **Authentication failed** — wrong ClickHouse credentials - **Connection unavailable** — wrong URL or connector offline - **Too many attempts** — wait for retry window; fix URL first if needed Click **Can't login?** on the login page for guided recovery, or restart with: ```bash ch-ui server --clickhouse-url 'http://127.0.0.1:8123' ``` Full guide: [`docs/cant-login.md`](docs/cant-login.md) ### Connector auth fails (`invalid token`) - Verify you copied the latest `cht_...` token - Check with `ch-ui tunnel list` - Rotate with `ch-ui tunnel rotate ` ### WebSocket fails behind proxy Your proxy must forward upgrades on `/connect`: - `Upgrade` and `Connection: upgrade` headers - Long read/send timeouts - Buffering disabled for tunnel path ### Health check ```bash curl http://localhost:3488/health ``` --- ## Development Requirements: Go 1.25+, Bun ```bash git clone https://github.com/caioricciuti/ch-ui.git cd ch-ui make build # Full production build (frontend + Go binary) ./ch-ui ``` Dev mode (two terminals): ```bash make dev # Terminal 1: Go server cd ui && bun install && bun run dev # Terminal 2: Vite dev server ``` Useful targets: `make build` | `make test` | `make vet` | `make clean` | `make rebuild` --- ## Upgrade ```bash ch-ui update ``` Downloads the latest release for your OS/arch, verifies checksum, and replaces the binary. --- ## Legal - Core license: [`LICENSE`](LICENSE) (Apache 2.0) - Licensing details: [`docs/license.md`](docs/license.md) - Terms: [`docs/legal/terms-of-service.md`](docs/legal/terms-of-service.md) - Privacy: [`docs/legal/privacy-policy.md`](docs/legal/privacy-policy.md) --- ## Contributing Issues and PRs are welcome. When contributing, please include: - Reproduction steps (for bugs) - Expected behavior - Migration notes (if schema/API changed) - Screenshots (for UI changes) # Gitpod One-Click Demo ## Try it now [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/caioricciuti/ch-ui) > Launches a full CH-UI + ClickHouse environment in your browser. No install required. > Free tier: 50 hours/month, no credit card - Via Gitpod (https://www.gitpod.io/) ================================================ FILE: VERSION ================================================ v2.0.23 ================================================ FILE: ch-ui.conf ================================================ upstream ch-ui { server 127.0.0.1:3488; keepalive 64; } # ─── HTTP → HTTPS redirect ────────────────────────────────────────────────── server { listen 80; listen [::]:80; server_name ch-ui.example.com; # Let certbot handle ACME challenges location /.well-known/acme-challenge/ { root /var/www/certbot; } location / { return 301 https://$host$request_uri; } } # ─── HTTPS ─────────────────────────────────────────────────────────────────── server { listen 443 ssl; listen [::]:443 ssl; server_name ch-ui.example.com; # SSL certificates — managed by certbot ssl_certificate /etc/letsencrypt/live/ch-ui.example.com/fullchain.pem; ssl_certificate_key /etc/letsencrypt/live/ch-ui.example.com/privkey.pem; include /etc/letsencrypt/options-ssl-nginx.conf; ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # Security headers add_header X-Frame-Options "SAMEORIGIN" always; add_header X-Content-Type-Options "nosniff" always; add_header X-XSS-Protection "1; mode=block" always; add_header Referrer-Policy "strict-origin-when-cross-origin" always; # Gzip (static assets are already embedded in the Go binary) gzip on; gzip_vary on; gzip_min_length 1024; gzip_proxied any; gzip_types text/plain text/css text/xml text/javascript application/javascript application/json application/xml; # Agent tunnel WebSocket — keep alive indefinitely location = /connect { proxy_pass http://ch-ui; proxy_http_version 1.1; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_read_timeout 86400; proxy_send_timeout 86400; proxy_buffering off; } # Health check — no access log noise location = /health { proxy_pass http://ch-ui; proxy_http_version 1.1; proxy_set_header Host $host; access_log off; } # Agent binary downloads — large files location /download/ { proxy_pass http://ch-ui; proxy_http_version 1.1; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_buffering off; proxy_read_timeout 300; } # Everything else — API, frontend, install script location / { proxy_pass http://ch-ui; proxy_http_version 1.1; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 300; proxy_send_timeout 300; } } ================================================ FILE: cmd/connect.go ================================================ package cmd import ( "errors" "fmt" "io" "os" "os/exec" "os/signal" "path/filepath" "strconv" "strings" "syscall" "github.com/caioricciuti/ch-ui/connector" "github.com/caioricciuti/ch-ui/connector/config" "github.com/caioricciuti/ch-ui/connector/service" "github.com/caioricciuti/ch-ui/connector/ui" "github.com/spf13/cobra" ) var ( connectURL string connectKey string connectCHURL string connectDetach bool connectTakeover bool connectConfigPath string ) var connectCmd = &cobra.Command{ Use: "connect", Short: "Connect to a CH-UI server as a tunnel", Long: `Connect this machine's ClickHouse instance to a remote CH-UI server via a secure WebSocket tunnel. Queries executed in the CH-UI dashboard will be forwarded through this tunnel to your local ClickHouse.`, RunE: func(cmd *cobra.Command, args []string) error { u := ui.New(false, false, false, false) u.Logo("") // Build CLI config from flags cliCfg := &config.Config{} if cmd.Flags().Changed("key") { cliCfg.Token = connectKey } if cmd.Flags().Changed("url") { cliCfg.TunnelURL = connectURL } if cmd.Flags().Changed("clickhouse-url") { cliCfg.ClickHouseURL = connectCHURL } cliCfg.Takeover = connectTakeover cfg, err := config.Load(connectConfigPath, cliCfg) if err != nil { u.Error("Configuration error: %v", err) if strings.Contains(strings.ToLower(err.Error()), "tunnel token is required") { u.Info("Create a tunnel token on your CH-UI server host with:") u.Info(" ch-ui tunnel create --name ") u.Info("Then retry connect with --key (or set TUNNEL_TOKEN).") } return err } if connectDetach { pid, logPath, err := startDetached() if err != nil { return fmt.Errorf("failed to start in background: %w", err) } u.Success("Started in background (PID %d)", pid) if logPath != "" { u.Info("Logs: %s", logPath) } return nil } if !connectTakeover { if running, err := service.New().IsRunning(); err == nil && running { u.Info("CH-UI service is already running on this machine") u.Info("Use 'ch-ui service status' to inspect it") u.Info("Use 'ch-ui service stop' to stop it before running connect") return nil } } releasePID, err := acquirePIDLock() if err != nil { u.DiagnosticError(ui.ErrorTypeConfig, "Local host", err.Error(), []string{ "Check current state with: ch-ui service status", "If this is stale, remove it and retry: rm -f " + pidFilePath(), }, ) return err } defer releasePID() conn := connector.New(cfg, u) sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) go func() { <-sigCh u.Info("Shutting down...") conn.Shutdown() }() if err := conn.Run(); err != nil { u.Error("Connection error: %v", err) return err } return nil }, } func init() { connectCmd.Flags().StringVar(&connectURL, "url", "", "CH-UI server WebSocket URL (ws:// or wss://)") connectCmd.Flags().StringVar(&connectKey, "key", "", "Tunnel token (cht_..., create on server with: ch-ui tunnel create --name )") connectCmd.Flags().StringVar(&connectCHURL, "clickhouse-url", "", "ClickHouse HTTP URL (default: http://localhost:8123)") connectCmd.Flags().BoolVar(&connectDetach, "detach", false, "Run in background") connectCmd.Flags().BoolVar(&connectTakeover, "takeover", false, "Replace an existing active session") connectCmd.Flags().StringVarP(&connectConfigPath, "config", "c", "", "Path to config file") rootCmd.AddCommand(connectCmd) } // ── Detach ────────────────────────────────────────────────────────────────── func startDetached() (int, string, error) { exe, err := os.Executable() if err != nil { return 0, "", err } exe, err = filepath.EvalSymlinks(exe) if err != nil { return 0, "", err } args := sanitizeDetachedArgs(os.Args[1:]) if len(args) == 0 || args[0] != "connect" { return 0, "", fmt.Errorf("detach must be started from 'connect' command") } logDir := service.GetConfigDir() if err := os.MkdirAll(logDir, 0755); err != nil { return 0, "", err } logPath := filepath.Join(logDir, "ch-ui-connect.log") logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) if err != nil { return 0, "", err } cmd := exec.Command(exe, args...) cmd.Env = append(os.Environ(), "CHUI_DETACHED=1") cmd.Stdout = logFile cmd.Stderr = logFile setProcessDetachedAttr(cmd) if err := cmd.Start(); err != nil { logFile.Close() return 0, "", err } _ = logFile.Close() return cmd.Process.Pid, logPath, nil } func sanitizeDetachedArgs(in []string) []string { args := make([]string, 0, len(in)) for _, a := range in { if a == "--detach" || strings.HasPrefix(a, "--detach=") { continue } args = append(args, a) } return args } // ── PID guard ─────────────────────────────────────────────────────────────── func pidFilePath() string { return filepath.Join(service.GetConfigDir(), "ch-ui.pid") } func acquirePIDLock() (func(), error) { pidPath := pidFilePath() if err := os.MkdirAll(filepath.Dir(pidPath), 0755); err != nil { return nil, fmt.Errorf("failed to create state dir: %w", err) } for attempts := 0; attempts < 2; attempts++ { f, err := os.OpenFile(pidPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600) if err == nil { pid := os.Getpid() _, writeErr := f.WriteString(strconv.Itoa(pid)) closeErr := f.Close() if writeErr != nil { _ = os.Remove(pidPath) return nil, fmt.Errorf("failed to write pid file: %w", writeErr) } if closeErr != nil { _ = os.Remove(pidPath) return nil, fmt.Errorf("failed to finalize pid file: %w", closeErr) } return func() { currentPID, readErr := readPIDFile(pidPath) if readErr == nil && currentPID != os.Getpid() { return } _ = os.Remove(pidPath) }, nil } if !errors.Is(err, os.ErrExist) { return nil, fmt.Errorf("failed to create pid file: %w", err) } existingPID, readErr := readPIDFile(pidPath) if readErr != nil { _ = os.Remove(pidPath) continue } if isProcessRunning(existingPID) { return nil, fmt.Errorf("another ch-ui connect process is already running (PID %d)", existingPID) } _ = os.Remove(pidPath) } return nil, fmt.Errorf("failed to acquire lock at %s", pidPath) } func readPIDFile(path string) (int, error) { raw, err := os.ReadFile(path) if err != nil { return 0, err } pid, err := strconv.Atoi(strings.TrimSpace(string(raw))) if err != nil || pid <= 0 { return 0, fmt.Errorf("invalid pid file") } return pid, nil } // ── Helpers ───────────────────────────────────────────────────────────────── func copyFile(src, dst string) error { in, err := os.Open(src) if err != nil { return err } defer in.Close() out, err := os.Create(dst) if err != nil { return err } defer out.Close() if _, err := io.Copy(out, in); err != nil { return err } return out.Close() } func fileExists(path string) bool { _, err := os.Stat(path) return err == nil } ================================================ FILE: cmd/connect_detach_unix.go ================================================ //go:build darwin || linux package cmd import ( "os/exec" "syscall" ) func setProcessDetachedAttr(cmd *exec.Cmd) { cmd.SysProcAttr = &syscall.SysProcAttr{ Setsid: true, } } ================================================ FILE: cmd/connect_detach_windows.go ================================================ //go:build windows package cmd import "os/exec" func setProcessDetachedAttr(cmd *exec.Cmd) { // No-op on windows. } ================================================ FILE: cmd/connect_process_unix.go ================================================ //go:build darwin || linux package cmd import ( "os" "syscall" ) func isProcessRunning(pid int) bool { if pid <= 0 { return false } p, err := os.FindProcess(pid) if err != nil { return false } return p.Signal(syscall.Signal(0)) == nil } ================================================ FILE: cmd/connect_process_windows.go ================================================ //go:build windows package cmd func isProcessRunning(pid int) bool { return pid > 0 } ================================================ FILE: cmd/root.go ================================================ package cmd import ( "bufio" "fmt" "os" "strings" "github.com/spf13/cobra" ) var rootCmd = &cobra.Command{ Use: "ch-ui", Short: "CH-UI - ClickHouse UI and management platform", Long: "CH-UI is a single binary that serves a ClickHouse management platform for local and remote deployments.", } func init() { loadEnvFile(".env") } func Execute() { if len(os.Args) == 1 { rootCmd.SetArgs([]string{"server"}) } if err := rootCmd.Execute(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } // loadEnvFile reads a .env file and sets environment variables. // Existing env vars are NOT overwritten (real env takes precedence). // Silently does nothing if the file doesn't exist. func loadEnvFile(path string) { f, err := os.Open(path) if err != nil { return } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if line == "" || strings.HasPrefix(line, "#") { continue } key, val, ok := strings.Cut(line, "=") if !ok { continue } key = strings.TrimSpace(key) val = strings.TrimSpace(val) // Strip surrounding quotes if len(val) >= 2 && ((val[0] == '"' && val[len(val)-1] == '"') || (val[0] == '\'' && val[len(val)-1] == '\'')) { val = val[1 : len(val)-1] } // Don't overwrite existing env vars if os.Getenv(key) == "" { os.Setenv(key, val) } } } ================================================ FILE: cmd/server.go ================================================ package cmd import ( "context" "errors" "fmt" "io/fs" "log/slog" "net" "os" "os/exec" "os/signal" "path/filepath" "strconv" "strings" "syscall" "time" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/embedded" "github.com/caioricciuti/ch-ui/internal/server" "github.com/caioricciuti/ch-ui/internal/version" "github.com/spf13/cobra" ) // FrontendFS holds the embedded frontend filesystem, set by main before Execute(). var FrontendFS fs.FS var ( serverPort int devMode bool serverClickHouse string serverConnectionName string serverDetach bool serverConfig string serverPIDFile string serverStopTimeout time.Duration restartDetach bool ) var serverCmd = &cobra.Command{ Use: "server", Short: "Start the CH-UI server", Long: "Start the CH-UI HTTP server that serves the API, frontend, and tunnel gateway.", PersistentPreRun: func(cmd *cobra.Command, args []string) { serverPIDFile = resolvePIDFile(serverPIDFile) }, RunE: func(cmd *cobra.Command, args []string) error { return runServer(cmd) }, } var serverStartCmd = &cobra.Command{ Use: "start", Short: "Start the CH-UI server", RunE: func(cmd *cobra.Command, args []string) error { return runServer(cmd) }, } var serverStopCmd = &cobra.Command{ Use: "stop", Short: "Stop the CH-UI server", RunE: func(cmd *cobra.Command, args []string) error { stopped, err := stopServer(serverPIDFile, serverStopTimeout) if err != nil { return err } if stopped { fmt.Println("CH-UI server stopped") } return nil }, } var serverStatusCmd = &cobra.Command{ Use: "status", Short: "Show CH-UI server status", RunE: func(cmd *cobra.Command, args []string) error { pid, running, err := getRunningServerPID(serverPIDFile) if err != nil { return err } if running { fmt.Printf("CH-UI server is running (PID %d)\n", pid) fmt.Printf("PID file: %s\n", serverPIDFile) return nil } addr := fmt.Sprintf("127.0.0.1:%d", serverPort) if isTCPPortOpen(addr) { fmt.Printf("CH-UI server PID file not found, but port %d is in use.\n", serverPort) fmt.Printf("Another process may be listening on %s.\n", addr) return nil } fmt.Println("CH-UI server is not running") return nil }, } var serverRestartCmd = &cobra.Command{ Use: "restart", Short: "Restart the CH-UI server", RunE: func(cmd *cobra.Command, args []string) error { _, err := stopServer(serverPIDFile, serverStopTimeout) if err != nil { return err } if restartDetach { startArgs := buildServerStartArgs(cmd) pid, logPath, err := startDetachedServer(startArgs) if err != nil { return fmt.Errorf("failed to restart in background: %w", err) } fmt.Printf("CH-UI server restarted in background (PID %d)\n", pid) if logPath != "" { fmt.Printf("Logs: %s\n", logPath) } return nil } serverDetach = false return runServer(cmd) }, } func init() { pf := serverCmd.PersistentFlags() pf.IntVarP(&serverPort, "port", "p", 3488, "Port to listen on") pf.BoolVar(&devMode, "dev", false, "Enable development mode (proxy to Vite)") pf.StringVar(&serverClickHouse, "clickhouse-url", "", "Local ClickHouse HTTP URL for the embedded connection") pf.StringVar(&serverConnectionName, "connection-name", "", "Display name for the embedded local connection") pf.StringVarP(&serverConfig, "config", "c", "", "Path to config file") pf.StringVar(&serverPIDFile, "pid-file", "ch-ui-server.pid", "Path to server PID file") pf.DurationVar(&serverStopTimeout, "stop-timeout", 10*time.Second, "Graceful stop timeout") serverCmd.Flags().BoolVar(&serverDetach, "detach", false, "Run server in background") serverStartCmd.Flags().BoolVar(&serverDetach, "detach", false, "Run server in background") serverRestartCmd.Flags().BoolVar(&restartDetach, "detach", true, "Run restarted server in background") serverCmd.AddCommand(serverStartCmd, serverStopCmd, serverStatusCmd, serverRestartCmd) rootCmd.AddCommand(serverCmd) } func runServer(cmd *cobra.Command) error { if serverDetach { startArgs := buildServerStartArgs(cmd) pid, logPath, err := startDetachedServer(startArgs) if err != nil { return fmt.Errorf("failed to start in background: %w", err) } fmt.Printf("CH-UI server started in background (PID %d)\n", pid) if logPath != "" { fmt.Printf("Logs: %s\n", logPath) } return nil } if err := preparePIDFileForStart(serverPIDFile); err != nil { return err } if err := writeServerPIDFile(serverPIDFile, os.Getpid()); err != nil { return fmt.Errorf("failed to write PID file %q: %w", serverPIDFile, err) } defer cleanupServerPIDFile(serverPIDFile, os.Getpid()) // Load configuration cfg := config.Load(serverConfig) // Override with flags if provided if cmd.Flags().Changed("port") { cfg.Port = serverPort } if cmd.Flags().Changed("clickhouse-url") { cfg.ClickHouseURL = strings.TrimSpace(serverClickHouse) } if cmd.Flags().Changed("connection-name") { cfg.ConnectionName = strings.TrimSpace(serverConnectionName) } // --dev flag is the authority for dev mode in the server command. // Without it, always serve the embedded frontend (production mode). cfg.DevMode = devMode // Setup structured logging logLevel := slog.LevelInfo if cfg.DevMode { logLevel = slog.LevelDebug } logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: logLevel})) slog.SetDefault(logger) slog.Info("Starting CH-UI server", "version", version.Version, "port", cfg.Port, "dev", cfg.DevMode, ) secretSource, err := config.EnsureAppSecretKey(cfg) if err != nil { return fmt.Errorf("failed to initialize app secret key: %w", err) } if secretSource == config.SecretKeySourceGenerated { slog.Warn("APP_SECRET_KEY was not configured; generated a persisted secret key", "path", config.AppSecretKeyPath(cfg.DatabasePath)) } else if secretSource == config.SecretKeySourceFile { slog.Info("Loaded persisted app secret key", "path", config.AppSecretKeyPath(cfg.DatabasePath)) } // Initialize database db, err := database.Open(cfg.DatabasePath) if err != nil { return fmt.Errorf("failed to open database: %w", err) } defer db.Close() slog.Info("Database initialized", "path", cfg.DatabasePath) // Load stored license from database if stored, err := db.GetSetting("license_json"); err == nil && stored != "" { cfg.LicenseJSON = stored slog.Info("License loaded from database") } // Create and start server srv := server.New(cfg, db, FrontendFS) // Start embedded agent (connects to local ClickHouse if configured) ea, err := embedded.Start(db, cfg.Port, cfg.ClickHouseURL, cfg.ConnectionName) if err != nil { slog.Warn("Failed to start embedded agent", "error", err) } // Graceful shutdown ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) defer stop() errCh := make(chan error, 1) go func() { errCh <- srv.Start() }() select { case err := <-errCh: if ea != nil { ea.Stop() } return err case <-ctx.Done(): slog.Info("Shutting down server...") if ea != nil { ea.Stop() } return srv.Shutdown(context.Background()) } } func buildServerStartArgs(cmd *cobra.Command) []string { args := []string{"server"} if cmd.Flags().Changed("port") { args = append(args, fmt.Sprintf("--port=%d", serverPort)) } if cmd.Flags().Changed("dev") && devMode { args = append(args, "--dev") } if cmd.Flags().Changed("config") && strings.TrimSpace(serverConfig) != "" { args = append(args, "--config", serverConfig) } if cmd.Flags().Changed("clickhouse-url") && strings.TrimSpace(serverClickHouse) != "" { args = append(args, "--clickhouse-url", serverClickHouse) } if cmd.Flags().Changed("connection-name") && strings.TrimSpace(serverConnectionName) != "" { args = append(args, "--connection-name", serverConnectionName) } // Always include absolute PID file path so the child process and // future update/restart commands can reliably locate the PID file // regardless of the caller's working directory. args = append(args, "--pid-file", serverPIDFile) if cmd.Flags().Changed("stop-timeout") { args = append(args, fmt.Sprintf("--stop-timeout=%s", serverStopTimeout.String())) } return args } func startDetachedServer(args []string) (int, string, error) { exe, err := os.Executable() if err != nil { return 0, "", err } exe, err = filepath.EvalSymlinks(exe) if err != nil { return 0, "", err } if err := preparePIDFileForStart(serverPIDFile); err != nil { return 0, "", err } logPath := filepath.Join(".", "ch-ui-server.log") logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) if err != nil { return 0, "", err } child := exec.Command(exe, args...) child.Env = append(os.Environ(), "CHUI_DETACHED=1") child.Stdout = logFile child.Stderr = logFile setProcessDetachedAttr(child) if err := child.Start(); err != nil { _ = logFile.Close() return 0, "", err } _ = logFile.Close() absLog, _ := filepath.Abs(logPath) return child.Process.Pid, absLog, nil } func stopServer(pidFile string, timeout time.Duration) (bool, error) { pid, running, err := getRunningServerPID(pidFile) if err != nil { return false, err } if !running { addr := fmt.Sprintf("127.0.0.1:%d", serverPort) if isTCPPortOpen(addr) { fmt.Printf("CH-UI server PID file not found, but port %d is in use.\n", serverPort) fmt.Printf("This can happen after upgrading from an older build without PID management.\n") fmt.Printf("Stop that process once manually, then start with this build.\n") fmt.Printf("Expected PID file: %s\n", pidFile) return false, nil } fmt.Println("CH-UI server is not running") return false, nil } proc, err := os.FindProcess(pid) if err != nil { return false, fmt.Errorf("failed to locate process %d: %w", pid, err) } if err := proc.Signal(syscall.SIGTERM); err != nil { if !processExists(pid) { _ = os.Remove(pidFile) return false, nil } return false, fmt.Errorf("failed to stop PID %d: %w", pid, err) } deadline := time.Now().Add(timeout) for time.Now().Before(deadline) { if !processExists(pid) { _ = os.Remove(pidFile) return true, nil } time.Sleep(200 * time.Millisecond) } return false, fmt.Errorf("timeout waiting for PID %d to stop (waited %s)", pid, timeout.String()) } func getRunningServerPID(pidFile string) (int, bool, error) { pid, err := readServerPIDFile(pidFile) if err != nil { if errors.Is(err, os.ErrNotExist) { return 0, false, nil } return 0, false, err } if processExists(pid) { return pid, true, nil } _ = os.Remove(pidFile) return 0, false, nil } func preparePIDFileForStart(pidFile string) error { pid, running, err := getRunningServerPID(pidFile) if err != nil { return err } if running { return fmt.Errorf("server already running (PID %d); stop it first with `ch-ui server stop`", pid) } return nil } func writeServerPIDFile(pidFile string, pid int) error { if strings.TrimSpace(pidFile) == "" { return fmt.Errorf("pid file path is empty") } dir := filepath.Dir(pidFile) if dir != "." && dir != "" { if err := os.MkdirAll(dir, 0o755); err != nil { return err } } return os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", pid)), 0o644) } func readServerPIDFile(pidFile string) (int, error) { data, err := os.ReadFile(pidFile) if err != nil { return 0, err } raw := strings.TrimSpace(string(data)) if raw == "" { return 0, fmt.Errorf("pid file %q is empty", pidFile) } pid, err := strconv.Atoi(raw) if err != nil || pid <= 0 { return 0, fmt.Errorf("invalid PID in %q", pidFile) } return pid, nil } func cleanupServerPIDFile(pidFile string, expectedPID int) { pid, err := readServerPIDFile(pidFile) if err != nil { return } if pid == expectedPID { _ = os.Remove(pidFile) } } func processExists(pid int) bool { if pid <= 0 { return false } proc, err := os.FindProcess(pid) if err != nil { return false } err = proc.Signal(syscall.Signal(0)) if err == nil { return true } if errors.Is(err, syscall.EPERM) { return true } var sysErr *os.SyscallError if errors.As(err, &sysErr) && errors.Is(sysErr.Err, syscall.EPERM) { return true } return false } func isTCPPortOpen(addr string) bool { conn, err := net.DialTimeout("tcp", addr, 400*time.Millisecond) if err != nil { return false } _ = conn.Close() return true } // resolvePIDFile converts a relative PID file path to absolute so that // server detection works regardless of the caller's working directory. func resolvePIDFile(pidFile string) string { if filepath.IsAbs(pidFile) { return pidFile } abs, err := filepath.Abs(pidFile) if err != nil { return pidFile } return abs } ================================================ FILE: cmd/service.go ================================================ package cmd import ( "fmt" "os" "path/filepath" "github.com/caioricciuti/ch-ui/connector/config" "github.com/caioricciuti/ch-ui/connector/service" "github.com/spf13/cobra" ) // ── service (parent) ──────────────────────────────────────────────────────── var serviceCmd = &cobra.Command{ Use: "service", Short: "Manage CH-UI as a system service", } // ── service install ───────────────────────────────────────────────────────── var ( svcInstallKey string svcInstallURL string svcInstallCH string ) var serviceInstallCmd = &cobra.Command{ Use: "install", Short: "Install CH-UI connect as a system service", Long: `Install CH-UI as a system service (launchd on macOS, systemd on Linux) so it automatically connects to the server on boot.`, RunE: func(cmd *cobra.Command, args []string) error { svc := service.New() if svc.IsInstalled() { fmt.Println("Service is already installed") fmt.Println("Use 'ch-ui service restart' to restart, or 'ch-ui service uninstall' first") return nil } // Resolve current binary currentBin, err := os.Executable() if err != nil { return fmt.Errorf("failed to determine current binary path: %w", err) } currentBin, err = filepath.EvalSymlinks(currentBin) if err != nil { return fmt.Errorf("failed to resolve binary path: %w", err) } // Copy binary to service location if needed if currentBin != service.BinaryPath { fmt.Printf("Copying binary to %s...\n", service.BinaryPath) if err := copyFile(currentBin, service.BinaryPath); err != nil { return fmt.Errorf("failed to copy binary: %w (try: sudo cp %s %s)", err, currentBin, service.BinaryPath) } if err := os.Chmod(service.BinaryPath, 0755); err != nil { return fmt.Errorf("failed to set binary permissions: %w", err) } fmt.Printf("Binary installed at %s\n", service.BinaryPath) } // Create config file configPath := service.GetConfigPath() if svcInstallKey != "" { configDir := service.GetConfigDir() if err := os.MkdirAll(configDir, 0755); err != nil { return fmt.Errorf("failed to create config directory: %w", err) } chURL := svcInstallCH if chURL == "" { chURL = config.Defaults.ClickHouseURL } tURL := svcInstallURL if tURL == "" { tURL = config.Defaults.TunnelURL } configContent := fmt.Sprintf(`# CH-UI Configuration tunnel_token: "%s" clickhouse_url: "%s" tunnel_url: "%s" `, svcInstallKey, chURL, tURL) if err := os.WriteFile(configPath, []byte(configContent), 0600); err != nil { return fmt.Errorf("failed to write config file: %w", err) } fmt.Printf("Configuration saved to %s\n", configPath) } else if !fileExists(configPath) { return fmt.Errorf("no config file found at %s and no --key provided\n\nUsage:\n ch-ui service install --key --url ", configPath) } // Install the service fmt.Println("Installing service...") if err := svc.Install(configPath); err != nil { return fmt.Errorf("failed to install service: %w", err) } fmt.Println("Service installed and started") fmt.Println(" Check status: ch-ui service status") fmt.Println(" View logs: ch-ui service logs -f") return nil }, } // ── service uninstall ─────────────────────────────────────────────────────── var ( svcUninstallPurge bool svcUninstallForce bool ) var serviceUninstallCmd = &cobra.Command{ Use: "uninstall", Short: "Uninstall the CH-UI service", Long: "Stop and remove the system service. Use --purge to also remove the binary and config files.", RunE: func(cmd *cobra.Command, args []string) error { svc := service.New() if !svc.IsInstalled() && !svcUninstallForce { fmt.Println("Service is not installed") return nil } fmt.Println("Stopping service...") _ = svc.Stop() fmt.Println("Removing service configuration...") if err := svc.Uninstall(); err != nil { if !svcUninstallForce { return fmt.Errorf("failed to uninstall service: %w", err) } fmt.Printf("Warning: failed to uninstall service: %v (continuing with --force)\n", err) } fmt.Println("Service uninstalled") if svcUninstallPurge { if fileExists(service.BinaryPath) { fmt.Printf("Removing binary %s...\n", service.BinaryPath) if err := os.Remove(service.BinaryPath); err != nil { fmt.Printf("Warning: failed to remove binary: %v\n", err) } else { fmt.Println("Binary removed") } } configDir := service.GetConfigDir() if fileExists(configDir) { fmt.Printf("Removing config directory %s...\n", configDir) if err := os.RemoveAll(configDir); err != nil { fmt.Printf("Warning: failed to remove config directory: %v\n", err) } else { fmt.Println("Configuration removed") } } } return nil }, } // ── service start/stop/restart/status/logs ────────────────────────────────── var serviceStartCmd = &cobra.Command{ Use: "start", Short: "Start the service", RunE: func(cmd *cobra.Command, args []string) error { if err := service.New().Start(); err != nil { return err } fmt.Println("Service started") return nil }, } var serviceStopCmd = &cobra.Command{ Use: "stop", Short: "Stop the service", RunE: func(cmd *cobra.Command, args []string) error { if err := service.New().Stop(); err != nil { return err } fmt.Println("Service stopped") return nil }, } var serviceRestartCmd = &cobra.Command{ Use: "restart", Short: "Restart the service", RunE: func(cmd *cobra.Command, args []string) error { if err := service.New().Restart(); err != nil { return err } fmt.Println("Service restarted") return nil }, } var serviceStatusCmd = &cobra.Command{ Use: "status", Short: "Show service status", RunE: func(cmd *cobra.Command, args []string) error { svc := service.New() if !svc.IsInstalled() { fmt.Println("Service is not installed") fmt.Println("Install with: ch-ui service install --key --url ") return nil } status, err := svc.Status() if err != nil { return fmt.Errorf("failed to get service status: %w", err) } running, _ := svc.IsRunning() fmt.Println() fmt.Printf(" Service: %s\n", service.ServiceName) fmt.Printf(" Status: %s\n", status) fmt.Printf(" Running: %v\n", running) fmt.Printf(" Config: %s\n", service.GetConfigPath()) if logPath := svc.GetLogPath(); logPath != "" { fmt.Printf(" Logs: %s\n", logPath) } fmt.Printf(" Platform: %s\n", svc.Platform()) fmt.Println() return nil }, } var ( svcLogsFollow bool svcLogsLines int ) var serviceLogsCmd = &cobra.Command{ Use: "logs", Short: "View service logs", RunE: func(cmd *cobra.Command, args []string) error { return service.New().Logs(svcLogsFollow, svcLogsLines) }, } // ── init ──────────────────────────────────────────────────────────────────── func init() { // Install flags serviceInstallCmd.Flags().StringVar(&svcInstallKey, "key", "", "Tunnel token (cht_...)") serviceInstallCmd.Flags().StringVar(&svcInstallURL, "url", "", "CH-UI server WebSocket URL") serviceInstallCmd.Flags().StringVar(&svcInstallCH, "clickhouse-url", "", "ClickHouse HTTP URL") // Uninstall flags serviceUninstallCmd.Flags().BoolVar(&svcUninstallPurge, "purge", false, "Also remove binary and config files") serviceUninstallCmd.Flags().BoolVar(&svcUninstallForce, "force", false, "Force uninstall even if errors occur") // Logs flags serviceLogsCmd.Flags().BoolVarP(&svcLogsFollow, "follow", "f", false, "Follow log output") serviceLogsCmd.Flags().IntVarP(&svcLogsLines, "lines", "n", 50, "Number of log lines to show") // Wire up serviceCmd.AddCommand(serviceInstallCmd, serviceUninstallCmd, serviceStartCmd, serviceStopCmd, serviceRestartCmd, serviceStatusCmd, serviceLogsCmd) rootCmd.AddCommand(serviceCmd) } ================================================ FILE: cmd/tunnel.go ================================================ package cmd import ( "errors" "fmt" "net/url" "os" "strings" serverconfig "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/license" "github.com/spf13/cobra" ) var ( tunnelConfigPath string tunnelDBPath string tunnelURLOverride string tunnelCreateName string tunnelShowToken bool tunnelDeleteForce bool ) var tunnelCmd = &cobra.Command{ Use: "tunnel", Short: "Manage tunnel keys for remote ClickHouse agents", Long: `Create and manage tunnel connection keys in this CH-UI server database. Run these commands on the server host (VM where CH-UI server stores its SQLite DB) to bootstrap remote agents from other machines (VM2, VM3, ...).`, } var tunnelCreateCmd = &cobra.Command{ Use: "create", Short: "Create a new tunnel connection and token", RunE: func(cmd *cobra.Command, args []string) error { name := strings.TrimSpace(tunnelCreateName) if name == "" { return errors.New("connection name is required (use --name)") } db, cfg, err := openTunnelDB() if err != nil { return err } defer db.Close() token := license.GenerateTunnelToken() id, err := db.CreateConnection(name, token, false) if err != nil { return fmt.Errorf("create connection: %w", err) } conn, err := db.GetConnectionByID(id) if err != nil { return fmt.Errorf("connection created but failed to load: %w", err) } if conn == nil { return errors.New("connection created but failed to load: not found") } printTunnelConnectionInfo(cfg, *conn) return nil }, } var tunnelListCmd = &cobra.Command{ Use: "list", Short: "List tunnel connections", RunE: func(cmd *cobra.Command, args []string) error { db, _, err := openTunnelDB() if err != nil { return err } defer db.Close() conns, err := db.GetConnections() if err != nil { return fmt.Errorf("list connections: %w", err) } if len(conns) == 0 { fmt.Println("No tunnel connections found.") fmt.Println("Create one with: ch-ui tunnel create --name ") return nil } fmt.Printf("%-36s %-22s %-12s %-8s %-35s\n", "ID", "NAME", "STATUS", "EMBEDDED", "TOKEN") for _, c := range conns { token := maskToken(c.TunnelToken) if tunnelShowToken { token = c.TunnelToken } embedded := "no" if c.IsEmbedded { embedded = "yes" } fmt.Printf("%-36s %-22s %-12s %-8s %-35s\n", c.ID, truncate(c.Name, 22), c.Status, embedded, token, ) } return nil }, } var tunnelShowCmd = &cobra.Command{ Use: "show ", Short: "Show token and setup instructions for a connection", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { db, cfg, err := openTunnelDB() if err != nil { return err } defer db.Close() connID := strings.TrimSpace(args[0]) conn, err := db.GetConnectionByID(connID) if err != nil { return fmt.Errorf("load connection: %w", err) } if conn == nil { return fmt.Errorf("connection %q not found", connID) } printTunnelConnectionInfo(cfg, *conn) return nil }, } var tunnelRotateCmd = &cobra.Command{ Use: "rotate ", Short: "Rotate (regenerate) tunnel token for a connection", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { db, cfg, err := openTunnelDB() if err != nil { return err } defer db.Close() connID := strings.TrimSpace(args[0]) conn, err := db.GetConnectionByID(connID) if err != nil { return fmt.Errorf("load connection: %w", err) } if conn == nil { return fmt.Errorf("connection %q not found", connID) } newToken := license.GenerateTunnelToken() if err := db.UpdateConnectionToken(connID, newToken); err != nil { return fmt.Errorf("rotate token: %w", err) } updated, err := db.GetConnectionByID(connID) if err != nil { return fmt.Errorf("token rotated but failed to reload connection: %w", err) } if updated == nil { return errors.New("token rotated but failed to reload connection: not found") } fmt.Println("Token rotated successfully. Previous token is now invalid.") printTunnelConnectionInfo(cfg, *updated) return nil }, } var tunnelDeleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a tunnel connection", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { db, _, err := openTunnelDB() if err != nil { return err } defer db.Close() connID := strings.TrimSpace(args[0]) conn, err := db.GetConnectionByID(connID) if err != nil { return fmt.Errorf("load connection: %w", err) } if conn == nil { return fmt.Errorf("connection %q not found", connID) } if conn.IsEmbedded && !tunnelDeleteForce { return errors.New("refusing to delete embedded connection without --force") } if err := db.DeleteConnection(connID); err != nil { return fmt.Errorf("delete connection: %w", err) } fmt.Printf("Deleted connection %q (%s)\n", conn.Name, conn.ID) return nil }, } func init() { tunnelCmd.PersistentFlags().StringVarP(&tunnelConfigPath, "config", "c", "", "Path to server config file") tunnelCmd.PersistentFlags().StringVar(&tunnelDBPath, "db", "", "Override SQLite database path") tunnelCmd.PersistentFlags().StringVar(&tunnelURLOverride, "url", "", "Public tunnel URL (ws:// or wss://) for setup output") tunnelCreateCmd.Flags().StringVar(&tunnelCreateName, "name", "", "Connection name (e.g. VM2 ClickHouse)") _ = tunnelCreateCmd.MarkFlagRequired("name") tunnelListCmd.Flags().BoolVar(&tunnelShowToken, "show-token", false, "Show full tunnel tokens") tunnelDeleteCmd.Flags().BoolVar(&tunnelDeleteForce, "force", false, "Force delete embedded connection") tunnelCmd.AddCommand(tunnelCreateCmd, tunnelListCmd, tunnelShowCmd, tunnelRotateCmd, tunnelDeleteCmd) rootCmd.AddCommand(tunnelCmd) } func openTunnelDB() (*database.DB, *serverconfig.Config, error) { cfg := serverconfig.Load(tunnelConfigPath) if strings.TrimSpace(tunnelDBPath) != "" { cfg.DatabasePath = strings.TrimSpace(tunnelDBPath) } db, err := database.Open(cfg.DatabasePath) if err != nil { return nil, nil, fmt.Errorf("open database %q: %w", cfg.DatabasePath, err) } return db, cfg, nil } func printTunnelConnectionInfo(cfg *serverconfig.Config, conn database.Connection) { tunnelURL := inferPublicTunnelURL(cfg) token := conn.TunnelToken connectCmd := fmt.Sprintf("ch-ui connect --url %s --key %s --clickhouse-url http://localhost:8123", tunnelURL, token) serviceCmd := fmt.Sprintf("ch-ui service install --url %s --key %s --clickhouse-url http://localhost:8123", tunnelURL, token) fmt.Println() fmt.Printf("Connection: %s\n", conn.Name) fmt.Printf("Connection ID: %s\n", conn.ID) fmt.Printf("Tunnel Token: %s\n", token) fmt.Println() fmt.Println("Use on the ClickHouse host:") fmt.Printf(" %s\n", connectCmd) fmt.Println() fmt.Println("Run as service on the ClickHouse host:") fmt.Printf(" %s\n", serviceCmd) fmt.Println() if isLoopbackTunnelURL(tunnelURL) { fmt.Fprintf(os.Stderr, "Warning: tunnel URL %q is loopback/local. Set --url or APP_URL/TUNNEL_URL in server config for remote VM setup.\n", tunnelURL) } } func inferPublicTunnelURL(cfg *serverconfig.Config) string { if strings.TrimSpace(tunnelURLOverride) != "" { return websocketConnectURL(strings.TrimSpace(tunnelURLOverride)) } configTunnelURL := strings.TrimSpace(cfg.TunnelURL) if configTunnelURL != "" && !isLoopbackTunnelURL(configTunnelURL) { return websocketConnectURL(configTunnelURL) } if appURL := strings.TrimSpace(cfg.AppURL); appURL != "" { return websocketConnectURL(appURL) } if configTunnelURL != "" { return websocketConnectURL(configTunnelURL) } return "ws://127.0.0.1:3488/connect" } func websocketConnectURL(raw string) string { u, err := url.Parse(raw) if err != nil { return raw } switch strings.ToLower(u.Scheme) { case "http": u.Scheme = "ws" case "https": u.Scheme = "wss" case "ws", "wss": // already websocket scheme default: // keep as-is (can still be validated by caller command later) } path := strings.TrimRight(u.Path, "/") if path == "" { u.Path = "/connect" } else if !strings.HasSuffix(path, "/connect") { u.Path = path + "/connect" } u.RawQuery = "" u.Fragment = "" return u.String() } func isLoopbackTunnelURL(raw string) bool { u, err := url.Parse(raw) if err != nil { s := strings.ToLower(raw) return strings.Contains(s, "127.0.0.1") || strings.Contains(s, "localhost") } host := strings.ToLower(u.Hostname()) return host == "127.0.0.1" || host == "localhost" || host == "::1" } func maskToken(token string) string { if len(token) <= 12 { return token } return token[:8] + "..." + token[len(token)-4:] } func truncate(s string, max int) string { if max < 4 || len(s) <= max { return s } return s[:max-3] + "..." } ================================================ FILE: cmd/uninstall.go ================================================ package cmd import ( "errors" "fmt" "os" "os/exec" "path/filepath" "runtime" "strings" "syscall" "time" "github.com/caioricciuti/ch-ui/connector/service" serverconfig "github.com/caioricciuti/ch-ui/internal/config" "github.com/spf13/cobra" ) var ( uninstallConfigPath string uninstallDBPath string uninstallForce bool uninstallPrintOnly bool uninstallPIDFiles []string ) type uninstallPlan struct { serverConfigPath string databasePath string serverPIDFiles []string connectorPIDFile string cleanupPaths []string } var uninstallCmd = &cobra.Command{ Use: "uninstall", Short: "Uninstall CH-UI from this machine", Long: `Best-effort local uninstall for CH-UI. This command stops services/processes, removes local CH-UI files, and prints manual cleanup commands for anything that still requires privileged shell access.`, RunE: runUninstall, } func init() { uninstallCmd.Flags().StringVarP(&uninstallConfigPath, "config", "c", "", "Path to server config file (used to locate database)") uninstallCmd.Flags().StringVar(&uninstallDBPath, "db", "", "Override server SQLite database path") uninstallCmd.Flags().BoolVar(&uninstallForce, "force", false, "Continue uninstall even if some steps fail") uninstallCmd.Flags().BoolVar(&uninstallPrintOnly, "print-only", false, "Only print cleanup commands without executing uninstall") uninstallCmd.Flags().StringSliceVar(&uninstallPIDFiles, "pid-file", nil, "Additional server PID file path to stop/remove (repeatable)") rootCmd.AddCommand(uninstallCmd) } func runUninstall(cmd *cobra.Command, args []string) error { plan := buildUninstallPlan() fmt.Println("CH-UI uninstall (best effort)") fmt.Printf("Server config: %s\n", plan.serverConfigPath) fmt.Printf("Database: %s\n", plan.databasePath) if uninstallPrintOnly { printManualUninstallCommands(plan) return nil } var failures []string if err := stopDetachedConnectProcess(plan.connectorPIDFile); err != nil { failures = append(failures, err.Error()) } for _, pidFile := range plan.serverPIDFiles { if err := stopServerByPIDFile(pidFile); err != nil { failures = append(failures, err.Error()) } } if err := uninstallConnectorService(); err != nil { failures = append(failures, err.Error()) } if err := uninstallServerSystemService(); err != nil { failures = append(failures, err.Error()) } for _, p := range plan.cleanupPaths { removed, err := removePathIfExists(p) if err != nil { failures = append(failures, fmt.Sprintf("remove %s: %v", p, err)) continue } if removed { fmt.Printf("Removed %s\n", p) } } printManualUninstallCommands(plan) if len(failures) == 0 { fmt.Println("Uninstall completed.") return nil } fmt.Println("Uninstall completed with warnings:") for _, failure := range failures { fmt.Printf(" - %s\n", failure) } if uninstallForce { return nil } return errors.New("one or more uninstall steps failed (rerun with --force to continue)" + "\nUse the manual cleanup commands shown above") } func buildUninstallPlan() uninstallPlan { cfg := serverconfig.Load(uninstallConfigPath) if strings.TrimSpace(uninstallDBPath) != "" { cfg.DatabasePath = strings.TrimSpace(uninstallDBPath) } serverConfigPath := strings.TrimSpace(uninstallConfigPath) if serverConfigPath == "" { serverConfigPath = serverconfig.DefaultServerConfigPath() } pidFiles := append([]string{"ch-ui-server.pid", "/var/lib/ch-ui/run/ch-ui-server.pid"}, uninstallPIDFiles...) pidFiles = uniqueNonEmpty(pidFiles) cleanupPaths := []string{ service.BinaryPath, service.GetConfigDir(), serverConfigPath, cfg.DatabasePath, "ch-ui-server.log", } cleanupPaths = append(cleanupPaths, pidFiles...) if runtime.GOOS == "darwin" { home, _ := os.UserHomeDir() cleanupPaths = append(cleanupPaths, filepath.Join(home, "Library", "LaunchAgents", service.ServiceLabel+".plist"), filepath.Join(home, "Library", "Logs", "ch-ui"), ) } if runtime.GOOS == "linux" { cleanupPaths = append(cleanupPaths, "/etc/systemd/system/ch-ui.service", "/etc/systemd/system/ch-ui-server.service", ) } cleanupPaths = uniqueNonEmpty(cleanupPaths) return uninstallPlan{ serverConfigPath: serverConfigPath, databasePath: cfg.DatabasePath, serverPIDFiles: pidFiles, connectorPIDFile: filepath.Join(service.GetConfigDir(), "ch-ui.pid"), cleanupPaths: cleanupPaths, } } func stopDetachedConnectProcess(pidFile string) error { pid, err := readPIDFile(pidFile) if err != nil { if os.IsNotExist(err) { return nil } return fmt.Errorf("read connect pid file %s: %w", pidFile, err) } if !processExists(pid) { _ = os.Remove(pidFile) return nil } proc, err := os.FindProcess(pid) if err != nil { return fmt.Errorf("locate connect process %d: %w", pid, err) } if err := proc.Signal(syscall.SIGTERM); err != nil { return fmt.Errorf("stop connect process %d: %w", pid, err) } deadline := time.Now().Add(10 * time.Second) for time.Now().Before(deadline) { if !processExists(pid) { _ = os.Remove(pidFile) fmt.Printf("Stopped connect process (PID %d)\n", pid) return nil } time.Sleep(200 * time.Millisecond) } return fmt.Errorf("timeout waiting for connect process %d to stop", pid) } func stopServerByPIDFile(pidFile string) error { pid, running, err := getRunningServerPID(pidFile) if err != nil { return fmt.Errorf("inspect server pid file %s: %w", pidFile, err) } if !running { return nil } stopped, err := stopServer(pidFile, 10*time.Second) if err != nil { return fmt.Errorf("stop server process %d from %s: %w", pid, pidFile, err) } if stopped { fmt.Printf("Stopped server process (PID %d) from %s\n", pid, pidFile) } return nil } func uninstallConnectorService() error { svc := service.New() if !svc.IsInstalled() { fmt.Println("Connector service is not installed") return nil } fmt.Println("Stopping connector service...") _ = svc.Stop() fmt.Println("Uninstalling connector service...") if err := svc.Uninstall(); err != nil { return fmt.Errorf("uninstall connector service: %w", err) } fmt.Println("Connector service uninstalled") return nil } func uninstallServerSystemService() error { if runtime.GOOS != "linux" { return nil } var warnings []string steps := [][]string{ {"systemctl", "stop", "ch-ui-server"}, {"systemctl", "disable", "ch-ui-server"}, {"systemctl", "daemon-reload"}, } for _, step := range steps { if err := runPrivileged(step[0], step[1:]...); err != nil { warnings = append(warnings, fmt.Sprintf("%s: %v", strings.Join(step, " "), err)) } } if len(warnings) == 0 { return nil } return errors.New(strings.Join(warnings, "; ")) } func runPrivileged(name string, args ...string) error { cmdName := name cmdArgs := args if runtime.GOOS == "linux" && os.Geteuid() != 0 { cmdArgs = append([]string{name}, args...) cmdName = "sudo" } cmd := exec.Command(cmdName, cmdArgs...) out, err := cmd.CombinedOutput() if err == nil { return nil } msg := strings.TrimSpace(string(out)) if msg == "" { return err } return fmt.Errorf("%w: %s", err, msg) } func removePathIfExists(path string) (bool, error) { path = strings.TrimSpace(path) if path == "" { return false, nil } if path == "/" { return false, fmt.Errorf("refusing to remove root path") } info, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } if info.IsDir() { if err := os.RemoveAll(path); err != nil { return false, err } return true, nil } if err := os.Remove(path); err != nil { return false, err } return true, nil } func printManualUninstallCommands(plan uninstallPlan) { fmt.Println() fmt.Println("Manual cleanup commands (run if anything remains):") for _, cmd := range manualUninstallCommands(plan) { fmt.Printf(" %s\n", cmd) } fmt.Println() fmt.Println("Optional verification:") fmt.Println(" ch-ui version") fmt.Println(" ch-ui service status") fmt.Println(" ch-ui server status") } func manualUninstallCommands(plan uninstallPlan) []string { commands := []string{} quotedConfig := shellQuote(plan.serverConfigPath) quotedDB := shellQuote(plan.databasePath) switch runtime.GOOS { case "darwin": home, _ := os.UserHomeDir() launchAgent := filepath.Join(home, "Library", "LaunchAgents", service.ServiceLabel+".plist") logDir := filepath.Join(home, "Library", "Logs", "ch-ui") commands = append(commands, "launchctl unload "+shellQuote(launchAgent)+" 2>/dev/null || true", "rm -f "+shellQuote(launchAgent), "rm -rf "+shellQuote(service.GetConfigDir()), "rm -rf "+shellQuote(logDir), "rm -f "+shellQuote(service.BinaryPath), "rm -f "+quotedConfig, "rm -f "+quotedDB, ) default: commands = append(commands, "sudo systemctl stop ch-ui 2>/dev/null || true", "sudo systemctl disable ch-ui 2>/dev/null || true", "sudo rm -f /etc/systemd/system/ch-ui.service", "sudo systemctl stop ch-ui-server 2>/dev/null || true", "sudo systemctl disable ch-ui-server 2>/dev/null || true", "sudo rm -f /etc/systemd/system/ch-ui-server.service", "sudo systemctl daemon-reload", "sudo rm -rf "+shellQuote(service.GetConfigDir()), "sudo rm -f "+shellQuote(service.BinaryPath), "sudo rm -f "+quotedConfig, "sudo rm -f "+quotedDB, ) } if len(plan.serverPIDFiles) > 0 { var quoted []string for _, p := range plan.serverPIDFiles { quoted = append(quoted, shellQuote(p)) } commands = append(commands, "rm -f "+strings.Join(quoted, " ")) } commands = append(commands, "rm -f "+shellQuote("ch-ui-server.log"), ) return commands } func shellQuote(s string) string { return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'" } func uniqueNonEmpty(in []string) []string { seen := make(map[string]struct{}, len(in)) out := make([]string, 0, len(in)) for _, raw := range in { p := strings.TrimSpace(raw) if p == "" { continue } if _, ok := seen[p]; ok { continue } seen[p] = struct{}{} out = append(out, p) } return out } ================================================ FILE: cmd/update.go ================================================ package cmd import ( "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "net/http" "os" "path/filepath" "runtime" "strings" "time" "github.com/caioricciuti/ch-ui/internal/version" "github.com/spf13/cobra" ) const ( releasesURL = "https://api.github.com/repos/caioricciuti/ch-ui/releases/latest" ) type ghRelease struct { TagName string `json:"tag_name"` Assets []ghAsset `json:"assets"` } type ghAsset struct { Name string `json:"name"` BrowserDownloadURL string `json:"browser_download_url"` } var updateCmd = &cobra.Command{ Use: "update", Short: "Update CH-UI to the latest version", Long: "Download the latest CH-UI release from GitHub and replace the current binary.", RunE: runUpdate, } var ( updateRestartServer bool updatePIDFile string updateStopTimeout time.Duration ) func init() { updateCmd.Flags().BoolVar(&updateRestartServer, "restart-server", true, "Automatically restart a running CH-UI server after update") updateCmd.Flags().StringVar(&updatePIDFile, "pid-file", "ch-ui-server.pid", "Server PID file path used to detect/restart a running server") updateCmd.Flags().DurationVar(&updateStopTimeout, "stop-timeout", 10*time.Second, "Graceful stop timeout used when restarting after update") rootCmd.AddCommand(updateCmd) } func runUpdate(cmd *cobra.Command, args []string) error { // Resolve PID file to absolute path so we can detect the running // server regardless of the caller's working directory. updatePIDFile = resolvePIDFile(updatePIDFile) // Resolve current binary path currentBin, err := os.Executable() if err != nil { return fmt.Errorf("failed to determine current binary path: %w", err) } currentBin, err = filepath.EvalSymlinks(currentBin) if err != nil { return fmt.Errorf("failed to resolve binary path: %w", err) } var runningPID int var running bool restartArgs := []string{"server", "--pid-file", updatePIDFile} if updateRestartServer { runningPID, running, err = getRunningServerPID(updatePIDFile) if err != nil { return fmt.Errorf("failed to inspect server status via PID file %q: %w", updatePIDFile, err) } if running { restartArgs = detectServerRestartArgs(runningPID, updatePIDFile) fmt.Printf("Detected running CH-UI server (PID %d); it will be restarted after update.\n", runningPID) } } // Check write permissions dir := filepath.Dir(currentBin) if err := checkWritable(dir); err != nil { return fmt.Errorf("cannot write to %s: %w (try running with sudo)", dir, err) } fmt.Printf("Current version: %s\n", version.Version) fmt.Println("Checking for updates...") // Fetch latest release info release, err := fetchLatestRelease() if err != nil { return fmt.Errorf("failed to check for updates: %w", err) } latestTag := release.TagName if latestTag == version.Version { fmt.Printf("Already up to date (%s)\n", version.Version) return nil } fmt.Printf("New version available: %s → %s\n", version.Version, latestTag) // Find the right asset for this platform assetName := fmt.Sprintf("ch-ui-%s-%s", runtime.GOOS, runtime.GOARCH) var assetURL string var checksumsURL string for _, a := range release.Assets { if a.Name == assetName { assetURL = a.BrowserDownloadURL } if a.Name == "checksums.txt" { checksumsURL = a.BrowserDownloadURL } } if assetURL == "" { return fmt.Errorf("no release asset found for %s/%s (expected %s)", runtime.GOOS, runtime.GOARCH, assetName) } // Download checksums var expectedHash string if checksumsURL != "" { expectedHash, err = fetchExpectedChecksum(checksumsURL, assetName) if err != nil { fmt.Printf("Warning: could not verify checksum: %v\n", err) } } // Download binary to temp file in the same directory (for atomic rename) tmpPath := currentBin + ".update-tmp" fmt.Printf("Downloading %s...\n", assetName) if err := downloadFile(assetURL, tmpPath); err != nil { os.Remove(tmpPath) return fmt.Errorf("failed to download update: %w", err) } // Verify checksum if expectedHash != "" { actualHash, err := fileSHA256(tmpPath) if err != nil { os.Remove(tmpPath) return fmt.Errorf("failed to compute checksum: %w", err) } if actualHash != expectedHash { os.Remove(tmpPath) return fmt.Errorf("checksum mismatch: expected %s, got %s", expectedHash, actualHash) } fmt.Println("Checksum verified ✓") } // Make executable if err := os.Chmod(tmpPath, 0755); err != nil { os.Remove(tmpPath) return fmt.Errorf("failed to set permissions: %w", err) } // Atomic replace if err := os.Rename(tmpPath, currentBin); err != nil { os.Remove(tmpPath) return fmt.Errorf("failed to replace binary: %w", err) } fmt.Printf("Updated successfully: %s → %s\n", version.Version, latestTag) if !updateRestartServer || !running { fmt.Println("Restart CH-UI to use the new version.") return nil } fmt.Printf("Restarting CH-UI server (PID %d)...\n", runningPID) stopped, err := stopServer(updatePIDFile, updateStopTimeout) if err != nil { return fmt.Errorf("binary updated to %s but failed to stop running server: %w", latestTag, err) } if !stopped { return fmt.Errorf("binary updated to %s but could not confirm server stop; run `ch-ui server restart --detach --pid-file %s`", latestTag, updatePIDFile) } prevPIDFile := serverPIDFile serverPIDFile = updatePIDFile pid, logPath, err := startDetachedServer(restartArgs) serverPIDFile = prevPIDFile if err != nil { return fmt.Errorf("binary updated to %s and server stopped, but failed to start it again: %w", latestTag, err) } fmt.Printf("CH-UI server restarted in background (PID %d)\n", pid) if logPath != "" { fmt.Printf("Logs: %s\n", logPath) } fmt.Println("Update complete and running the new version.") return nil } func detectServerRestartArgs(pid int, pidFile string) []string { args, err := readProcessArgs(pid) if err != nil { return []string{"server", "--pid-file", pidFile} } sanitized := sanitizeServerStartArgs(args, pidFile) if len(sanitized) == 0 { return []string{"server", "--pid-file", pidFile} } return sanitized } func readProcessArgs(pid int) ([]string, error) { if runtime.GOOS != "linux" { return nil, fmt.Errorf("unsupported OS for process args inspection: %s", runtime.GOOS) } data, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", pid)) if err != nil { return nil, err } parts := strings.Split(string(data), "\x00") if len(parts) == 0 { return nil, fmt.Errorf("empty cmdline for PID %d", pid) } out := make([]string, 0, len(parts)) for i, part := range parts { if i == 0 { continue // executable path } if strings.TrimSpace(part) == "" { continue } out = append(out, part) } return out, nil } func sanitizeServerStartArgs(args []string, pidFile string) []string { // Safe fallback that keeps behavior predictable. out := []string{"server"} // Expect args from the running server process to start with "server" // (or "server start" in older/manual invocations). i := 0 if len(args) > 0 && args[0] == "server" { i = 1 if i < len(args) && args[i] == "start" { i++ } } for i < len(args) { a := args[i] switch { case a == "server" || a == "start" || a == "stop" || a == "status" || a == "restart": i++ case a == "--detach" || a == "-h" || a == "--help": i++ case a == "--dev": out = append(out, a) i++ case a == "--port" || a == "-p" || a == "--config" || a == "-c" || a == "--clickhouse-url" || a == "--connection-name" || a == "--stop-timeout": if i+1 < len(args) { out = append(out, a, args[i+1]) i += 2 continue } i++ case a == "--pid-file": if i+1 < len(args) { out = append(out, a, resolvePIDFile(args[i+1])) i += 2 continue } i++ case strings.HasPrefix(a, "--port=") || strings.HasPrefix(a, "--config=") || strings.HasPrefix(a, "--clickhouse-url=") || strings.HasPrefix(a, "--connection-name=") || strings.HasPrefix(a, "--stop-timeout="): out = append(out, a) i++ case strings.HasPrefix(a, "--pid-file="): val := strings.TrimPrefix(a, "--pid-file=") out = append(out, "--pid-file="+resolvePIDFile(val)) i++ default: i++ } } if !hasFlag(out, "--pid-file") { out = append(out, "--pid-file", pidFile) } return out } func hasFlag(args []string, longName string) bool { for _, a := range args { if a == longName || strings.HasPrefix(a, longName+"=") { return true } } return false } func fetchLatestRelease() (*ghRelease, error) { resp, err := http.Get(releasesURL) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("GitHub API returned %s", resp.Status) } var release ghRelease if err := json.NewDecoder(resp.Body).Decode(&release); err != nil { return nil, fmt.Errorf("failed to parse release info: %w", err) } return &release, nil } func fetchExpectedChecksum(url, assetName string) (string, error) { resp, err := http.Get(url) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", err } for _, line := range strings.Split(string(body), "\n") { parts := strings.Fields(line) if len(parts) == 2 && parts[1] == assetName { return parts[0], nil } } return "", fmt.Errorf("checksum not found for %s", assetName) } func downloadFile(url, dest string) error { resp, err := http.Get(url) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("download returned %s", resp.Status) } f, err := os.Create(dest) if err != nil { return err } defer f.Close() _, err = io.Copy(f, resp.Body) return err } func fileSHA256(path string) (string, error) { f, err := os.Open(path) if err != nil { return "", err } defer f.Close() h := sha256.New() if _, err := io.Copy(h, f); err != nil { return "", err } return hex.EncodeToString(h.Sum(nil)), nil } func checkWritable(dir string) error { tmp := filepath.Join(dir, ".ch-ui-update-check") f, err := os.Create(tmp) if err != nil { return err } f.Close() return os.Remove(tmp) } ================================================ FILE: cmd/version.go ================================================ package cmd import ( "fmt" "github.com/caioricciuti/ch-ui/internal/version" "github.com/spf13/cobra" ) var versionCmd = &cobra.Command{ Use: "version", Short: "Print version information", Run: func(cmd *cobra.Command, args []string) { fmt.Printf("ch-ui %s (commit: %s, built: %s)\n", version.Version, version.Commit, version.BuildDate) }, } func init() { rootCmd.AddCommand(versionCmd) } ================================================ FILE: connector/clickhouse.go ================================================ package connector import ( "bufio" "context" "crypto/tls" "encoding/json" "errors" "fmt" "io" "net" "net/http" "net/url" "regexp" "strconv" "strings" "time" ) // CHClient handles ClickHouse query execution type CHClient struct { baseURL string transport *http.Transport httpClient *http.Client } // NewCHClient creates a new ClickHouse HTTP client func NewCHClient(baseURL string, insecureSkipVerify bool) *CHClient { transport := &http.Transport{ DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, MaxIdleConns: 100, MaxIdleConnsPerHost: 10, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, ResponseHeaderTimeout: 5 * time.Minute, DisableKeepAlives: false, ForceAttemptHTTP2: true, } return &CHClient{ baseURL: strings.TrimSuffix(baseURL, "/"), transport: transport, httpClient: &http.Client{ Transport: transport, Timeout: 5 * time.Minute, }, } } // QueryResult holds the result of a query execution type QueryResult struct { Data []map[string]interface{} `json:"data"` Meta []ColumnMeta `json:"meta"` Rows int `json:"rows"` Statistics struct { Elapsed float64 `json:"elapsed"` RowsRead uint64 `json:"rows_read"` BytesRead uint64 `json:"bytes_read"` } `json:"statistics"` } // ColumnMeta describes a column in the result type ColumnMeta struct { Name string `json:"name"` Type string `json:"type"` } // isTransientError checks if an error is a transient connection error that // should be retried (e.g. server closed an idle keep-alive connection). func isTransientError(err error) bool { if err == nil { return false } s := err.Error() if strings.Contains(s, "unexpected EOF") || strings.Contains(s, "connection reset by peer") || strings.Contains(s, "transport connection broken") || strings.Contains(s, "use of closed network connection") { return true } var netErr net.Error if errors.As(err, &netErr) && netErr.Timeout() { return false // real timeouts should not be retried } return false } // doWithRetry executes an HTTP request, retrying once on transient connection errors. func (c *CHClient) doWithRetry(req *http.Request, client *http.Client) (*http.Response, error) { resp, err := client.Do(req) if err != nil && isTransientError(err) { // Close any idle connections that may be stale, then retry once. c.transport.CloseIdleConnections() // Clone the request for retry (the body must be re-readable). retryReq := req.Clone(req.Context()) if req.GetBody != nil { body, bodyErr := req.GetBody() if bodyErr != nil { return nil, err // return original error } retryReq.Body = body } return client.Do(retryReq) } return resp, err } // Execute runs a query against ClickHouse func (c *CHClient) Execute(ctx context.Context, query, user, password string) (*QueryResult, error) { // Determine if this is a read or write query isWrite := isWriteQuery(query) hasFormat := hasFormatClause(query) // Build URL with parameters params := url.Values{} params.Set("default_format", "JSON") // For read queries without explicit FORMAT, add FORMAT JSON finalQuery := query if !isWrite && !hasFormat { finalQuery = strings.TrimRight(query, "; \n\t") + " FORMAT JSON" } fullURL := c.baseURL + "/?" + params.Encode() // Create request req, err := http.NewRequestWithContext(ctx, "POST", fullURL, strings.NewReader(finalQuery)) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } // Set auth if provided if user != "" { req.SetBasicAuth(user, password) } req.Header.Set("Content-Type", "text/plain") // GetBody allows doWithRetry to re-create the body on retry bodyStr := finalQuery req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader(bodyStr)), nil } // Execute with retry on transient connection errors resp, err := c.doWithRetry(req, c.httpClient) if err != nil { return nil, fmt.Errorf("request failed: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read response: %w", err) } // Check for errors if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("ClickHouse error: %s", string(body)) } // For write queries or queries with explicit format, we may get empty body if len(body) == 0 || (isWrite && !hasFormat) { return &QueryResult{ Data: []map[string]interface{}{}, Meta: []ColumnMeta{}, Rows: 0, }, nil } // Parse JSON response var result QueryResult if err := json.Unmarshal(body, &result); err != nil { // If JSON parse fails but status was OK, treat as DDL success if isWrite { return &QueryResult{ Data: []map[string]interface{}{}, Meta: []ColumnMeta{}, Rows: 0, }, nil } return nil, fmt.Errorf("failed to parse response: %w (body: %s)", err, truncate(string(body), 200)) } return &result, nil } // ExecuteRaw runs a query and returns the raw ClickHouse response bytes without intermediate parsing. // The format parameter controls the FORMAT clause appended to read queries (e.g. "JSONCompact"). func (c *CHClient) ExecuteRaw(ctx context.Context, query, user, password, format string) (json.RawMessage, error) { isWrite := isWriteQuery(query) hasFormat := hasFormatClause(query) finalQuery := query if !isWrite && !hasFormat { if format == "" { format = "JSON" } finalQuery = strings.TrimRight(query, "; \n\t") + " FORMAT " + format } params := url.Values{} params.Set("default_format", "JSON") fullURL := c.baseURL + "/?" + params.Encode() req, err := http.NewRequestWithContext(ctx, "POST", fullURL, strings.NewReader(finalQuery)) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } if user != "" { req.SetBasicAuth(user, password) } req.Header.Set("Content-Type", "text/plain") // GetBody allows doWithRetry to re-create the body on retry bodyStr := finalQuery req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(strings.NewReader(bodyStr)), nil } resp, err := c.doWithRetry(req, c.httpClient) if err != nil { return nil, fmt.Errorf("request failed: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("failed to read response: %w", err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("ClickHouse error: %s", string(body)) } if len(body) == 0 || (isWrite && !hasFormat) { return json.RawMessage(`{"data":[],"meta":[],"rows":0}`), nil } return json.RawMessage(body), nil } // StreamChunk holds a batch of rows for streaming execution. type StreamChunk struct { Seq int `json:"seq"` Data json.RawMessage `json:"data"` // JSON array of arrays: [[v1,v2],[v3,v4],...] } // ExecuteStreaming runs a query using JSONCompactEachRow format, reading the response // line-by-line without buffering the entire result. It calls onMeta with column metadata, // then onChunk for each batch of chunkSize rows, and returns final statistics. func (c *CHClient) ExecuteStreaming( ctx context.Context, query, user, password string, chunkSize int, settings map[string]string, onMeta func(meta json.RawMessage) error, onChunk func(seq int, data json.RawMessage) error, ) (*json.RawMessage, int64, error) { isWrite := isWriteQuery(query) hasFormat := hasFormatClause(query) if chunkSize <= 0 { chunkSize = 5000 } // Get column metadata via a LIMIT 0 query with JSONCompact, or send empty meta for writes if !isWrite && !hasFormat { trimmed := strings.TrimRight(query, "; \n\t") var metaQuery string if limitRe := regexp.MustCompile(`(?i)\bLIMIT\s+\d+(\s*,\s*\d+)?(\s+OFFSET\s+\d+)?`); limitRe.MatchString(trimmed) { metaQuery = limitRe.ReplaceAllString(trimmed, "LIMIT 0") } else { // Use a newline so that trailing -- line comments don't swallow the injected clause metaQuery = trimmed + "\nLIMIT 0" } metaResult, err := c.ExecuteRaw(ctx, metaQuery, user, password, "JSONCompact") if err != nil { return nil, 0, fmt.Errorf("metadata query failed: %w", err) } var compact struct { Meta json.RawMessage `json:"meta"` } if err := json.Unmarshal(metaResult, &compact); err == nil && len(compact.Meta) > 0 { if err := onMeta(compact.Meta); err != nil { return nil, 0, err } } } else { // Write queries: send empty meta so consumers always get exactly one meta message if err := onMeta(json.RawMessage("[]")); err != nil { return nil, 0, err } } // Now execute the actual query with JSONCompactEachRow for streaming finalQuery := query if !isWrite && !hasFormat { // Use a newline so trailing -- line comments don't swallow the FORMAT clause finalQuery = strings.TrimRight(query, "; \n\t") + "\nFORMAT JSONCompactEachRow" } // Extract max_result_rows for precise client-side enforcement in the scanner loop. var maxRows int64 if v, ok := settings["max_result_rows"]; ok { if n, err := strconv.ParseInt(v, 10, 64); err == nil && n > 0 { maxRows = n } } params := url.Values{} params.Set("default_format", "JSON") params.Set("send_progress_in_http_headers", "0") // Pass settings as ClickHouse HTTP URL params for coarse server-side abort. // max_result_rows + result_overflow_mode=break causes ClickHouse to stop at block // granularity (~65k rows), preventing the server from doing unbounded work. // The scanner loop below enforces the exact row count on top of this. for k, v := range settings { params.Set(k, v) } fullURL := c.baseURL + "/?" + params.Encode() req, err := http.NewRequestWithContext(ctx, "POST", fullURL, strings.NewReader(finalQuery)) if err != nil { return nil, 0, fmt.Errorf("failed to create request: %w", err) } if user != "" { req.SetBasicAuth(user, password) } req.Header.Set("Content-Type", "text/plain") // Use a client without timeout for streaming (context controls cancellation) // but share the configured transport for proper TLS and connection management. streamClient := &http.Client{Transport: c.transport} resp, err := streamClient.Do(req) if err != nil { return nil, 0, fmt.Errorf("request failed: %w", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(io.LimitReader(resp.Body, 4096)) return nil, 0, fmt.Errorf("ClickHouse error: %s", string(body)) } // Read line by line, accumulate chunks scanner := bufio.NewScanner(resp.Body) scanner.Buffer(make([]byte, 0, 1024*1024), 10*1024*1024) // 10MB max line var batch []json.RawMessage seq := 0 var totalRows int64 for scanner.Scan() { select { case <-ctx.Done(): return nil, totalRows, ctx.Err() default: } line := scanner.Bytes() if len(line) == 0 { continue } // Each line is a JSON array: [v1, v2, v3] row := make(json.RawMessage, len(line)) copy(row, line) batch = append(batch, row) totalRows++ // Enforce max_result_rows limit: break early (closes body, aborts ClickHouse query) if maxRows > 0 && totalRows >= maxRows { break } if len(batch) >= chunkSize { chunkData, _ := json.Marshal(batch) if err := onChunk(seq, chunkData); err != nil { return nil, totalRows, err } batch = batch[:0] seq++ } } if err := scanner.Err(); err != nil { return nil, totalRows, fmt.Errorf("stream read error: %w", err) } // Flush remaining rows if len(batch) > 0 { chunkData, _ := json.Marshal(batch) if err := onChunk(seq, chunkData); err != nil { return nil, totalRows, err } } // We don't get statistics from JSONCompactEachRow format directly. // Return nil stats — the server can compute elapsed time itself. return nil, totalRows, nil } // TestConnection verifies connectivity and returns the ClickHouse version func (c *CHClient) TestConnection(ctx context.Context, user, password string) (string, error) { query := "SELECT version() as version FORMAT JSON" result, err := c.Execute(ctx, query, user, password) if err != nil { return "", err } if len(result.Data) > 0 { if v, ok := result.Data[0]["version"]; ok { return fmt.Sprintf("%v", v), nil } } return "unknown", nil } // Query patterns var ( writeQueryPattern = regexp.MustCompile(`(?i)^\s*(INSERT|CREATE|DROP|ALTER|TRUNCATE|RENAME|ATTACH|DETACH|OPTIMIZE|GRANT|REVOKE|KILL|SYSTEM|SET|USE)`) formatPattern = regexp.MustCompile(`(?i)\bFORMAT\s+\w+\s*$`) commentPattern = regexp.MustCompile(`(?m)^\s*--.*$`) ) func isWriteQuery(query string) bool { // Strip leading comments stripped := commentPattern.ReplaceAllString(query, "") stripped = strings.TrimSpace(stripped) return writeQueryPattern.MatchString(stripped) } func hasFormatClause(query string) bool { return formatPattern.MatchString(strings.TrimSpace(query)) } func truncate(s string, maxLen int) string { if len(s) <= maxLen { return s } return s[:maxLen] + "..." } ================================================ FILE: connector/config/config.go ================================================ package config import ( "fmt" "os" "path/filepath" "runtime" "strings" "time" "gopkg.in/yaml.v3" ) // Config holds all agent configuration type Config struct { // Required Token string `yaml:"tunnel_token"` // URLs ClickHouseURL string `yaml:"clickhouse_url"` TunnelURL string `yaml:"tunnel_url"` // Timing ReconnectDelay time.Duration `yaml:"reconnect_delay"` MaxReconnectDelay time.Duration `yaml:"max_reconnect_delay"` HeartbeatInterval time.Duration `yaml:"heartbeat_interval"` InsecureSkipVerify bool `yaml:"insecure_skip_verify"` // Output control Verbose bool `yaml:"-"` Quiet bool `yaml:"-"` NoColor bool `yaml:"-"` JSON bool `yaml:"-"` // Connect behavior Takeover bool `yaml:"-"` } // Default configuration values var Defaults = Config{ ClickHouseURL: "http://localhost:8123", TunnelURL: "ws://127.0.0.1:3488/connect", ReconnectDelay: 1 * time.Second, MaxReconnectDelay: 30 * time.Second, HeartbeatInterval: 30 * time.Second, InsecureSkipVerify: false, } // configFile is the YAML structure for config file type configFile struct { TunnelToken string `yaml:"tunnel_token"` ClickHouseURL string `yaml:"clickhouse_url"` TunnelURL string `yaml:"tunnel_url"` InsecureSkipVerify bool `yaml:"insecure_skip_verify"` } // DefaultConfigPath returns the platform-specific default config path func DefaultConfigPath() string { switch runtime.GOOS { case "darwin": home, _ := os.UserHomeDir() return filepath.Join(home, ".config", "ch-ui", "config.yaml") default: // linux and others return "/etc/ch-ui/config.yaml" } } // Load creates a Config by merging: CLI flags -> config file -> environment variables // Priority: CLI flags override config file, config file overrides env vars func Load(configPath string, cliConfig *Config) (*Config, error) { cfg := Defaults // 1. Load from config file (lowest priority after defaults) if configPath != "" { if err := loadFromFile(configPath, &cfg); err != nil { // Only error if file was explicitly specified and doesn't exist if !os.IsNotExist(err) { return nil, fmt.Errorf("failed to load config file: %w", err) } } } else { // Try default path, ignore if not exists _ = loadFromFile(DefaultConfigPath(), &cfg) } // 2. Override with environment variables loadFromEnv(&cfg) // 3. Override with CLI flags (highest priority) if cliConfig != nil { mergeConfig(&cfg, cliConfig) } // Validate if err := cfg.Validate(); err != nil { return nil, err } return &cfg, nil } func loadFromFile(path string, cfg *Config) error { data, err := os.ReadFile(path) if err != nil { return err } var fc configFile if err := yaml.Unmarshal(data, &fc); err != nil { return fmt.Errorf("invalid YAML: %w", err) } if fc.TunnelToken != "" { cfg.Token = fc.TunnelToken } if fc.ClickHouseURL != "" { cfg.ClickHouseURL = fc.ClickHouseURL } if fc.TunnelURL != "" { cfg.TunnelURL = fc.TunnelURL } cfg.InsecureSkipVerify = fc.InsecureSkipVerify return nil } func loadFromEnv(cfg *Config) { if v := os.Getenv("TUNNEL_TOKEN"); v != "" { cfg.Token = v } if v := os.Getenv("CLICKHOUSE_URL"); v != "" { cfg.ClickHouseURL = v } if v := os.Getenv("TUNNEL_URL"); v != "" { cfg.TunnelURL = v } if v := os.Getenv("TUNNEL_INSECURE_SKIP_VERIFY"); v == "1" || strings.EqualFold(v, "true") || strings.EqualFold(v, "yes") { cfg.InsecureSkipVerify = true } } func mergeConfig(dst, src *Config) { if src.Token != "" { dst.Token = src.Token } if src.ClickHouseURL != "" && src.ClickHouseURL != Defaults.ClickHouseURL { dst.ClickHouseURL = src.ClickHouseURL } if src.TunnelURL != "" && src.TunnelURL != Defaults.TunnelURL { dst.TunnelURL = src.TunnelURL } if src.ReconnectDelay != 0 && src.ReconnectDelay != Defaults.ReconnectDelay { dst.ReconnectDelay = src.ReconnectDelay } if src.MaxReconnectDelay != 0 && src.MaxReconnectDelay != Defaults.MaxReconnectDelay { dst.MaxReconnectDelay = src.MaxReconnectDelay } if src.HeartbeatInterval != 0 && src.HeartbeatInterval != Defaults.HeartbeatInterval { dst.HeartbeatInterval = src.HeartbeatInterval } dst.Verbose = src.Verbose dst.Quiet = src.Quiet dst.NoColor = src.NoColor dst.JSON = src.JSON dst.Takeover = src.Takeover if src.InsecureSkipVerify { dst.InsecureSkipVerify = true } } // Validate checks if the configuration is valid func (c *Config) Validate() error { if c.Token == "" { return fmt.Errorf("tunnel token is required (use --key, TUNNEL_TOKEN env, or config file)") } if !strings.HasPrefix(c.Token, "cht_") { return fmt.Errorf("invalid tunnel token format (should start with 'cht_')") } if !strings.HasPrefix(c.TunnelURL, "ws://") && !strings.HasPrefix(c.TunnelURL, "wss://") { return fmt.Errorf("tunnel URL must start with ws:// or wss://") } if !strings.HasPrefix(c.ClickHouseURL, "http://") && !strings.HasPrefix(c.ClickHouseURL, "https://") { return fmt.Errorf("ClickHouse URL must start with http:// or https://") } return nil } // GenerateTemplate returns a YAML config template func GenerateTemplate() string { return `# CH-UI Agent Configuration # # This file can be placed at: # - Linux: /etc/ch-ui/config.yaml # - macOS: ~/.config/ch-ui/config.yaml # # All settings can also be specified via environment variables or CLI flags. # Priority: CLI flags > Environment variables > Config file # Required: Your tunnel token from CH-UI server (ch-ui tunnel create --name ) tunnel_token: "cht_your_token_here" # ClickHouse HTTP API URL (default: http://localhost:8123) clickhouse_url: "http://localhost:8123" # CH-UI tunnel URL (default: ws://127.0.0.1:3488/connect) tunnel_url: "ws://127.0.0.1:3488/connect" # Skip TLS certificate validation for tunnel connection (unsafe, dev only) # insecure_skip_verify: false ` } // Redacted returns a copy of the config with sensitive fields redacted func (c *Config) Redacted() Config { redacted := *c if redacted.Token != "" { if len(redacted.Token) > 8 { redacted.Token = redacted.Token[:8] + "..." } else { redacted.Token = "***" } } return redacted } ================================================ FILE: connector/connector.go ================================================ package connector import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "net/http" "net/url" "strings" "sync" "sync/atomic" "time" "github.com/caioricciuti/ch-ui/connector/config" "github.com/caioricciuti/ch-ui/connector/ui" "github.com/gorilla/websocket" ) // Connector manages the tunnel connection to a CH-UI server type Connector struct { cfg *config.Config ui *ui.UI chClient *CHClient conn *websocket.Conn connMu sync.Mutex authenticated bool startTime time.Time // Stats queriesExecuted atomic.Int64 lastQueryTime atomic.Int64 // Control ctx context.Context cancel context.CancelFunc done chan struct{} // Reconnection reconnectDelay time.Duration } // New creates a new Connector instance func New(cfg *config.Config, u *ui.UI) *Connector { ctx, cancel := context.WithCancel(context.Background()) return &Connector{ cfg: cfg, ui: u, chClient: NewCHClient(cfg.ClickHouseURL, cfg.InsecureSkipVerify), reconnectDelay: cfg.ReconnectDelay, ctx: ctx, cancel: cancel, done: make(chan struct{}), } } // Run starts the connector and blocks until shutdown func (c *Connector) Run() error { c.startTime = time.Now() // Initial connection if err := c.connect(); err != nil { if ce, ok := err.(*ConnectError); ok && ce.Type == "auth" { c.ui.Error("Authentication failed — not retrying (token may be invalid or revoked)") return err } return err } // Start message handler go c.messageLoop() // Start heartbeat go c.heartbeatLoop() // Start host info reporting go c.hostInfoLoop() // Wait for shutdown <-c.done return nil } // Shutdown gracefully stops the connector func (c *Connector) Shutdown() { c.cancel() c.connMu.Lock() if c.conn != nil { c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "shutdown")) c.conn.Close() } c.connMu.Unlock() close(c.done) } // Stats returns current connector statistics func (c *Connector) Stats() (queriesExecuted int64, uptime time.Duration, lastQuery time.Time) { queriesExecuted = c.queriesExecuted.Load() uptime = time.Since(c.startTime) if ts := c.lastQueryTime.Load(); ts > 0 { lastQuery = time.Unix(0, ts) } return } // ConnectError represents a classified connection error type ConnectError struct { Type string // "network", "auth", "server", "protocol" Message string Err error } func (e *ConnectError) Error() string { return e.Message } func (c *Connector) connect() error { c.ui.Info("Connecting to %s...", extractHost(c.cfg.TunnelURL)) dialer := websocket.Dialer{ HandshakeTimeout: 10 * time.Second, TLSClientConfig: &tls.Config{InsecureSkipVerify: c.cfg.InsecureSkipVerify}, } if c.cfg.InsecureSkipVerify { c.ui.Warn("TLS certificate verification is disabled (insecure_skip_verify=true)") } headers := http.Header{} headers.Set("User-Agent", "ch-ui-agent/1.0") conn, dialResp, err := dialer.DialContext(c.ctx, c.cfg.TunnelURL, headers) if err != nil { dialErr := err if dialResp != nil { body, _ := io.ReadAll(io.LimitReader(dialResp.Body, 2048)) dialResp.Body.Close() if len(body) > 0 { dialErr = fmt.Errorf("%w (status=%d body=%q)", err, dialResp.StatusCode, strings.TrimSpace(string(body))) } else { dialErr = fmt.Errorf("%w (status=%d)", err, dialResp.StatusCode) } } c.ui.ConnectionError(dialErr, c.cfg.TunnelURL) return &ConnectError{Type: "network", Message: "Failed to connect to CH-UI server", Err: dialErr} } c.connMu.Lock() c.conn = conn c.connMu.Unlock() // Send auth message authMsg := AgentMessage{ Type: MsgTypeAuth, Token: c.cfg.Token, Takeover: c.cfg.Takeover, } if err := c.send(authMsg); err != nil { conn.Close() c.ui.ConnectionError(err, c.cfg.TunnelURL) return &ConnectError{Type: "network", Message: "Failed to send authentication", Err: err} } c.ui.Debug("Auth message sent, waiting for response...") // Wait for auth response conn.SetReadDeadline(time.Now().Add(10 * time.Second)) _, message, err := conn.ReadMessage() if err != nil { conn.Close() c.ui.ConnectionError(err, c.cfg.TunnelURL) return &ConnectError{Type: "network", Message: "Failed to receive auth response", Err: err} } conn.SetReadDeadline(time.Time{}) // Clear deadline var authResp GatewayMessage if err := json.Unmarshal(message, &authResp); err != nil { conn.Close() c.ui.DiagnosticError(ui.ErrorTypeServer, "CH-UI Server", "Received invalid response from server", []string{ "The server may be running an incompatible version", "Try updating the agent to the latest version", "Contact support if the issue persists", }) return &ConnectError{Type: "protocol", Message: "Invalid server response", Err: err} } switch authResp.Type { case MsgTypeAuthOK: c.authenticated = true c.reconnectDelay = c.cfg.ReconnectDelay // Reset on successful connection c.ui.Success("Authenticated successfully") c.ui.Success("Tunnel established") c.ui.Status(c.cfg.TunnelURL, c.cfg.ClickHouseURL, time.Since(c.startTime)) return nil case MsgTypeAuthError: conn.Close() // Server may send error in either "error" or "message" field errMsg := authResp.Error if errMsg == "" { errMsg = authResp.Message } if errMsg == "" { errMsg = "Authentication failed (no details provided)" } if isPermanentAuthError(errMsg) { c.ui.AuthError(errMsg) return &ConnectError{Type: "auth", Message: errMsg} } c.ui.Warn("Server temporarily rejected authentication: %s", errMsg) return &ConnectError{Type: "server", Message: errMsg} default: conn.Close() c.ui.DiagnosticError(ui.ErrorTypeServer, "CH-UI Server", fmt.Sprintf("Unexpected response type: %s", authResp.Type), []string{ "The server may be running an incompatible version", "Try updating the agent to the latest version", }) return &ConnectError{Type: "protocol", Message: fmt.Sprintf("Unexpected response: %s", authResp.Type)} } } func isPermanentAuthError(msg string) bool { lower := strings.ToLower(strings.TrimSpace(msg)) if lower == "" { return false } return strings.Contains(lower, "invalid tunnel token") || strings.Contains(lower, "invalid token") || strings.Contains(lower, "revoked") } func (c *Connector) messageLoop() { for { select { case <-c.ctx.Done(): return default: } c.connMu.Lock() conn := c.conn c.connMu.Unlock() if conn == nil { time.Sleep(100 * time.Millisecond) continue } _, message, err := conn.ReadMessage() if err != nil { if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) { c.ui.Disconnected("server closed connection") } else { c.ui.Disconnected(err.Error()) } c.connMu.Lock() c.conn = nil c.authenticated = false c.connMu.Unlock() // Attempt reconnection c.reconnect() continue } var msg GatewayMessage if err := json.Unmarshal(message, &msg); err != nil { c.ui.Debug("Invalid message: %v", err) continue } c.handleMessage(msg) } } func (c *Connector) handleMessage(msg GatewayMessage) { switch msg.Type { case MsgTypePing: c.send(AgentMessage{Type: MsgTypePong}) case MsgTypeQuery: go c.executeQuery(msg) case MsgTypeQueryStream: go c.executeStreamQuery(msg) case MsgTypeTestConnection: go c.testConnection(msg) case MsgTypeCancelQuery: c.ui.Debug("Cancel query requested for %s (not implemented)", msg.QueryID) default: c.ui.Debug("Unknown message type: %s", msg.Type) } } func (c *Connector) executeQuery(msg GatewayMessage) { start := time.Now() queryID := msg.QueryID sql := msg.Query format := msg.Format // "" or "JSON" = legacy, "JSONCompact" = tier 1 // If a compact format is requested, use ExecuteRaw to avoid intermediate parsing if format != "" && format != "JSON" { raw, err := c.chClient.ExecuteRaw(c.ctx, sql, msg.User, msg.Password, format) elapsed := time.Since(start) if err != nil { c.ui.QueryError(queryID, err) c.send(AgentMessage{ Type: MsgTypeQueryError, QueryID: queryID, Error: err.Error(), }) return } c.queriesExecuted.Add(1) c.lastQueryTime.Store(time.Now().UnixNano()) c.ui.QueryLog(queryID, elapsed, 0) // Send raw bytes directly — no intermediate parse/reserialize c.send(AgentMessage{ Type: MsgTypeQueryResult, QueryID: queryID, Data: raw, }) return } // Legacy JSON path — parse into structured result result, err := c.chClient.Execute(c.ctx, sql, msg.User, msg.Password) elapsed := time.Since(start) if err != nil { c.ui.QueryError(queryID, err) c.send(AgentMessage{ Type: MsgTypeQueryError, QueryID: queryID, Error: err.Error(), }) return } c.queriesExecuted.Add(1) c.lastQueryTime.Store(time.Now().UnixNano()) rows := len(result.Data) c.ui.QueryLog(queryID, elapsed, rows) c.send(AgentMessage{ Type: MsgTypeQueryResult, QueryID: queryID, Data: result.Data, Meta: result.Meta, Stats: &QueryStats{ Elapsed: result.Statistics.Elapsed, RowsRead: result.Statistics.RowsRead, BytesRead: result.Statistics.BytesRead, }, }) } func (c *Connector) executeStreamQuery(msg GatewayMessage) { start := time.Now() queryID := msg.QueryID sql := msg.Query c.ui.Debug("Stream query %s: %s", queryID, truncateStr(sql, 80)) // Send chunks as they arrive onMeta := func(meta json.RawMessage) error { return c.send(AgentMessage{ Type: MsgTypeQueryStreamStart, QueryID: queryID, Meta: meta, }) } onChunk := func(seq int, data json.RawMessage) error { return c.send(AgentMessage{ Type: MsgTypeQueryStreamChunk, QueryID: queryID, Data: data, Seq: seq, }) } _, totalRows, err := c.chClient.ExecuteStreaming(c.ctx, sql, msg.User, msg.Password, 5000, msg.Settings, onMeta, onChunk) elapsed := time.Since(start) if err != nil { c.ui.QueryError(queryID, err) c.send(AgentMessage{ Type: MsgTypeQueryStreamError, QueryID: queryID, Error: err.Error(), }) return } c.queriesExecuted.Add(1) c.lastQueryTime.Store(time.Now().UnixNano()) c.ui.QueryLog(queryID, elapsed, int(totalRows)) c.send(AgentMessage{ Type: MsgTypeQueryStreamEnd, QueryID: queryID, TotalRows: totalRows, Stats: &QueryStats{ Elapsed: elapsed.Seconds(), }, }) } func truncateStr(s string, maxLen int) string { if len(s) <= maxLen { return s } return s[:maxLen] + "..." } func (c *Connector) testConnection(msg GatewayMessage) { version, err := c.chClient.TestConnection(c.ctx, msg.User, msg.Password) if err != nil { c.ui.Debug("Connection test failed: %v", err) c.send(AgentMessage{ Type: MsgTypeTestResult, QueryID: msg.QueryID, Online: false, Error: err.Error(), }) return } c.ui.Debug("Connection test successful, version: %s", version) c.send(AgentMessage{ Type: MsgTypeTestResult, QueryID: msg.QueryID, Online: true, Version: version, }) } func (c *Connector) heartbeatLoop() { ticker := time.NewTicker(c.cfg.HeartbeatInterval) defer ticker.Stop() for { select { case <-c.ctx.Done(): return case <-ticker.C: c.connMu.Lock() conn := c.conn authenticated := c.authenticated if conn != nil && authenticated { if err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(5*time.Second)); err != nil { c.ui.Debug("Heartbeat failed: %v", err) } } c.connMu.Unlock() } } } // sendHostInfo collects and sends host machine metrics to the server func (c *Connector) sendHostInfo() { hostInfo := CollectHostInfo(c.startTime) if err := c.send(AgentMessage{ Type: MsgTypeHostInfo, HostInfo: hostInfo, }); err != nil { c.ui.Debug("Failed to send host info: %v", err) } else { c.ui.Debug("Host info sent (CPU: %d cores, Mem: %d MB, Disk: %d GB)", hostInfo.CPUCores, hostInfo.MemoryTotal/(1024*1024), hostInfo.DiskTotal/(1024*1024*1024)) } } // hostInfoLoop sends host info periodically (every 60 seconds) func (c *Connector) hostInfoLoop() { // Send initial host info after a short delay to allow auth to complete time.Sleep(2 * time.Second) c.sendHostInfo() ticker := time.NewTicker(60 * time.Second) defer ticker.Stop() for { select { case <-c.ctx.Done(): return case <-ticker.C: c.connMu.Lock() authenticated := c.authenticated c.connMu.Unlock() if authenticated { c.sendHostInfo() } } } } func (c *Connector) reconnect() { for { select { case <-c.ctx.Done(): return default: } c.ui.Reconnecting(c.reconnectDelay) time.Sleep(c.reconnectDelay) // Exponential backoff c.reconnectDelay *= 2 if c.reconnectDelay > c.cfg.MaxReconnectDelay { c.reconnectDelay = c.cfg.MaxReconnectDelay } if err := c.connect(); err != nil { if ce, ok := err.(*ConnectError); ok && ce.Type == "auth" { c.ui.Error("Authentication failed — stopping reconnection (token is invalid or revoked)") close(c.done) return } c.ui.Error("Reconnection failed: %v", err) continue } return } } func (c *Connector) send(msg AgentMessage) error { c.connMu.Lock() defer c.connMu.Unlock() if c.conn == nil { return fmt.Errorf("not connected") } data, err := json.Marshal(msg) if err != nil { return err } return c.conn.WriteMessage(websocket.TextMessage, data) } func extractHost(urlStr string) string { parsed, err := url.Parse(urlStr) if err == nil && parsed.Host != "" { if hostname := parsed.Hostname(); hostname != "" { return hostname } return parsed.Host } trimmed := strings.TrimPrefix(strings.TrimPrefix(urlStr, "wss://"), "ws://") for i, c := range trimmed { if c == '/' || c == ':' { return trimmed[:i] } } return trimmed } ================================================ FILE: connector/hostinfo.go ================================================ package connector import ( "os" "runtime" "time" ) // HostInfo contains system metrics from the host machine type HostInfo struct { Hostname string `json:"hostname"` OS string `json:"os"` Arch string `json:"arch"` CPUCores int `json:"cpu_cores"` MemoryTotal uint64 `json:"memory_total"` // bytes MemoryFree uint64 `json:"memory_free"` // bytes DiskTotal uint64 `json:"disk_total"` // bytes DiskFree uint64 `json:"disk_free"` // bytes GoVersion string `json:"go_version"` AgentUptime int64 `json:"agent_uptime"` // seconds CollectedAt string `json:"collected_at"` // ISO 8601 } // CollectHostInfo gathers system metrics from the host machine func CollectHostInfo(agentStartTime time.Time) *HostInfo { info := &HostInfo{ OS: runtime.GOOS, Arch: runtime.GOARCH, CPUCores: runtime.NumCPU(), GoVersion: runtime.Version(), AgentUptime: int64(time.Since(agentStartTime).Seconds()), CollectedAt: time.Now().UTC().Format(time.RFC3339), } // Hostname if hostname, err := os.Hostname(); err == nil { info.Hostname = hostname } else { info.Hostname = "unknown" } // Memory stats (use Go runtime as cross-platform source) info.MemoryTotal, info.MemoryFree = getMemoryInfo() // Disk stats for root filesystem (platform-specific) info.DiskTotal, info.DiskFree = getDiskInfo() return info } // getMemoryInfo returns total and free memory in bytes // Uses runtime.MemStats as a cross-platform approach func getMemoryInfo() (total, free uint64) { var m runtime.MemStats runtime.ReadMemStats(&m) // Use runtime stats as cross-platform source // Sys is total memory obtained from OS // Alloc is memory currently in use total = m.Sys free = m.Sys - m.Alloc return } ================================================ FILE: connector/hostinfo_unix.go ================================================ //go:build !windows package connector import "syscall" // getDiskInfo returns total and free disk space for the root filesystem func getDiskInfo() (total, free uint64) { var stat syscall.Statfs_t if err := syscall.Statfs("/", &stat); err != nil { return 0, 0 } total = stat.Blocks * uint64(stat.Bsize) free = stat.Bfree * uint64(stat.Bsize) return } ================================================ FILE: connector/hostinfo_windows.go ================================================ //go:build windows package connector import ( "syscall" "unsafe" ) // getDiskInfo returns total and free disk space for the C: drive func getDiskInfo() (total, free uint64) { kernel32 := syscall.MustLoadDLL("kernel32.dll") getDiskFreeSpaceEx := kernel32.MustFindProc("GetDiskFreeSpaceExW") var freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64 path, _ := syscall.UTF16PtrFromString("C:\\") r, _, _ := getDiskFreeSpaceEx.Call( uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(&freeBytesAvailable)), uintptr(unsafe.Pointer(&totalNumberOfBytes)), uintptr(unsafe.Pointer(&totalNumberOfFreeBytes)), ) if r == 0 { return 0, 0 } return totalNumberOfBytes, totalNumberOfFreeBytes } ================================================ FILE: connector/protocol.go ================================================ package connector // GatewayMessage represents messages received from the CH-UI tunnel server. type GatewayMessage struct { Type string `json:"type"` // Message type: auth_ok, auth_error, query, query_stream, ping, cancel_query, test_connection QueryID string `json:"query_id,omitempty"` // Query identifier Query string `json:"query,omitempty"` // SQL query to execute User string `json:"user,omitempty"` // ClickHouse username for this query Password string `json:"password,omitempty"` // ClickHouse password for this query Format string `json:"format,omitempty"` // ClickHouse output format (JSONCompact, stream, etc.) Error string `json:"error,omitempty"` // Error message (for auth_error) Message string `json:"message,omitempty"` // Additional message info Settings map[string]string `json:"settings,omitempty"` // ClickHouse query settings (URL params) } // AgentMessage represents messages sent to the CH-UI tunnel server. type AgentMessage struct { Type string `json:"type"` // Message type: auth, pong, query_result, query_error, test_result, host_info, query_stream_* QueryID string `json:"query_id,omitempty"` // Query identifier (for query responses) Token string `json:"token,omitempty"` // Tunnel token (for auth message) Takeover bool `json:"takeover,omitempty"` // Request takeover of an existing session for this token Data interface{} `json:"data,omitempty"` // Query result data Meta interface{} `json:"meta,omitempty"` // Query result metadata Stats *QueryStats `json:"statistics,omitempty"` Error string `json:"error,omitempty"` // Error message Version string `json:"version,omitempty"` // ClickHouse version (for test_result) Online bool `json:"online,omitempty"` // Connection status (for test_result) HostInfo *HostInfo `json:"host_info,omitempty"` // Host machine metrics Seq int `json:"seq,omitempty"` // Chunk sequence number (for streaming) TotalRows int64 `json:"total_rows,omitempty"` // Total row count (for streaming) } // QueryStats contains query execution statistics type QueryStats struct { Elapsed float64 `json:"elapsed"` RowsRead uint64 `json:"rows_read"` BytesRead uint64 `json:"bytes_read"` } // Message types from gateway const ( MsgTypeAuthOK = "auth_ok" MsgTypeAuthError = "auth_error" MsgTypeQuery = "query" MsgTypeQueryStream = "query_stream" MsgTypePing = "ping" MsgTypeCancelQuery = "cancel_query" MsgTypeTestConnection = "test_connection" ) // Message types to gateway const ( MsgTypeAuth = "auth" MsgTypePong = "pong" MsgTypeQueryResult = "query_result" MsgTypeQueryError = "query_error" MsgTypeTestResult = "test_result" MsgTypeHostInfo = "host_info" MsgTypeQueryStreamStart = "query_stream_start" MsgTypeQueryStreamChunk = "query_stream_chunk" MsgTypeQueryStreamEnd = "query_stream_end" MsgTypeQueryStreamError = "query_stream_error" ) ================================================ FILE: connector/service/launchd.go ================================================ package service import ( "fmt" "os" "os/exec" "path/filepath" "strconv" "strings" ) const launchdPlistTemplate = ` Label %s ProgramArguments %s connect --config %s RunAtLoad KeepAlive SuccessfulExit ThrottleInterval 5 StandardOutPath %s StandardErrorPath %s WorkingDirectory /tmp ` func (m *Manager) launchdPlistPath() string { home, _ := os.UserHomeDir() return filepath.Join(home, "Library", "LaunchAgents", ServiceLabel+".plist") } func (m *Manager) launchdLogDir() string { home, _ := os.UserHomeDir() return filepath.Join(home, "Library", "Logs", "ch-ui") } func (m *Manager) launchdLogPath() string { return filepath.Join(m.launchdLogDir(), "agent.log") } func (m *Manager) launchdIsInstalled() bool { return fileExists(m.launchdPlistPath()) } func (m *Manager) launchdIsRunning() (bool, error) { output, err := runCommand("launchctl", "list") if err != nil { return false, err } return strings.Contains(output, ServiceLabel), nil } func (m *Manager) launchdInstall(configPath string) error { // Create log directory logDir := m.launchdLogDir() if err := os.MkdirAll(logDir, 0755); err != nil { return fmt.Errorf("failed to create log directory: %w", err) } // Create LaunchAgents directory if it doesn't exist agentsDir := filepath.Dir(m.launchdPlistPath()) if err := os.MkdirAll(agentsDir, 0755); err != nil { return fmt.Errorf("failed to create LaunchAgents directory: %w", err) } // Generate plist content logPath := m.launchdLogPath() plistContent := fmt.Sprintf(launchdPlistTemplate, ServiceLabel, BinaryPath, configPath, logPath, logPath, ) // Write plist file if err := os.WriteFile(m.launchdPlistPath(), []byte(plistContent), 0644); err != nil { return fmt.Errorf("failed to write plist file: %w", err) } // Load the service _, err := runCommand("launchctl", "load", m.launchdPlistPath()) if err != nil { return fmt.Errorf("failed to load service: %w", err) } return nil } func (m *Manager) launchdUninstall() error { // Stop the service first (ignore errors if not running) _ = m.launchdStop() // Unload the service if m.launchdIsInstalled() { runCommand("launchctl", "unload", m.launchdPlistPath()) } // Remove plist file plistPath := m.launchdPlistPath() if fileExists(plistPath) { if err := os.Remove(plistPath); err != nil { return fmt.Errorf("failed to remove plist file: %w", err) } } return nil } func (m *Manager) launchdStart() error { if !m.launchdIsInstalled() { return fmt.Errorf("service not installed. Run 'ch-ui service install' first") } // Check if already running running, _ := m.launchdIsRunning() if running { return fmt.Errorf("service is already running") } // Start the service _, err := runCommand("launchctl", "start", ServiceLabel) if err != nil { return fmt.Errorf("failed to start service: %w", err) } return nil } func (m *Manager) launchdStop() error { running, _ := m.launchdIsRunning() if !running { return fmt.Errorf("service is not running") } _, err := runCommand("launchctl", "stop", ServiceLabel) if err != nil { return fmt.Errorf("failed to stop service: %w", err) } return nil } func (m *Manager) launchdRestart() error { if !m.launchdIsInstalled() { return fmt.Errorf("service not installed. Run 'ch-ui service install' first") } // Stop if running running, _ := m.launchdIsRunning() if running { runCommand("launchctl", "stop", ServiceLabel) } // Start the service _, err := runCommand("launchctl", "start", ServiceLabel) if err != nil { return fmt.Errorf("failed to restart service: %w", err) } return nil } func (m *Manager) launchdStatus() (string, error) { if !m.launchdIsInstalled() { return "not installed", nil } output, _ := runCommand("launchctl", "list") lines := strings.Split(output, "\n") for _, line := range lines { if strings.Contains(line, ServiceLabel) { parts := strings.Fields(line) if len(parts) >= 2 { pid := parts[0] status := parts[1] if pid != "-" { return fmt.Sprintf("running (PID: %s)", pid), nil } if status != "0" { return fmt.Sprintf("stopped (last exit: %s)", status), nil } } return "stopped", nil } } return "not running", nil } func (m *Manager) launchdLogs(follow bool, lines int) error { logPath := m.launchdLogPath() if !fileExists(logPath) { fmt.Println("No logs found yet. Service may not have started.") return nil } if follow { cmd := exec.Command("tail", "-f", "-n", strconv.Itoa(lines), logPath) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } cmd := exec.Command("tail", "-n", strconv.Itoa(lines), logPath) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } ================================================ FILE: connector/service/service.go ================================================ package service import ( "fmt" "os" "os/exec" "path/filepath" "runtime" "strings" ) const ( ServiceName = "ch-ui" ServiceLabel = "com.ch-ui" BinaryPath = "/usr/local/bin/ch-ui" SystemConfigDir = "/etc/ch-ui" SystemConfigPath = "/etc/ch-ui/config.yaml" ) // Manager provides cross-platform service management type Manager struct { platform string } // New creates a new service manager for the current platform func New() *Manager { return &Manager{platform: runtime.GOOS} } // IsInstalled checks if the service is installed func (m *Manager) IsInstalled() bool { switch m.platform { case "darwin": return m.launchdIsInstalled() case "linux": return m.systemdIsInstalled() default: return false } } // IsRunning checks if the service is currently running func (m *Manager) IsRunning() (bool, error) { switch m.platform { case "darwin": return m.launchdIsRunning() case "linux": return m.systemdIsRunning() default: return false, fmt.Errorf("unsupported platform: %s", m.platform) } } // Install installs the service func (m *Manager) Install(configPath string) error { switch m.platform { case "darwin": return m.launchdInstall(configPath) case "linux": return m.systemdInstall(configPath) default: return fmt.Errorf("unsupported platform: %s", m.platform) } } // Uninstall removes the service func (m *Manager) Uninstall() error { switch m.platform { case "darwin": return m.launchdUninstall() case "linux": return m.systemdUninstall() default: return fmt.Errorf("unsupported platform: %s", m.platform) } } // Start starts the service func (m *Manager) Start() error { switch m.platform { case "darwin": return m.launchdStart() case "linux": return m.systemdStart() default: return fmt.Errorf("unsupported platform: %s", m.platform) } } // Stop stops the service func (m *Manager) Stop() error { switch m.platform { case "darwin": return m.launchdStop() case "linux": return m.systemdStop() default: return fmt.Errorf("unsupported platform: %s", m.platform) } } // Restart restarts the service func (m *Manager) Restart() error { switch m.platform { case "darwin": return m.launchdRestart() case "linux": return m.systemdRestart() default: return fmt.Errorf("unsupported platform: %s", m.platform) } } // Status returns the service status as a string func (m *Manager) Status() (string, error) { switch m.platform { case "darwin": return m.launchdStatus() case "linux": return m.systemdStatus() default: return "", fmt.Errorf("unsupported platform: %s", m.platform) } } // Logs returns recent service logs func (m *Manager) Logs(follow bool, lines int) error { switch m.platform { case "darwin": return m.launchdLogs(follow, lines) case "linux": return m.systemdLogs(follow, lines) default: return fmt.Errorf("unsupported platform: %s", m.platform) } } // GetLogPath returns the path to the log file (for macOS) func (m *Manager) GetLogPath() string { switch m.platform { case "darwin": home, _ := os.UserHomeDir() return filepath.Join(home, "Library", "Logs", "ch-ui", "agent.log") case "linux": return "" // Uses journald default: return "" } } // Platform returns the current platform func (m *Manager) Platform() string { return m.platform } // NeedsSudo returns true if sudo is needed for service operations func (m *Manager) NeedsSudo() bool { // macOS launchd user agents don't need sudo // Linux systemd system services need sudo return m.platform == "linux" } // runCommand runs a command and returns combined output func runCommand(name string, args ...string) (string, error) { cmd := exec.Command(name, args...) output, err := cmd.CombinedOutput() return strings.TrimSpace(string(output)), err } // runCommandWithSudo runs a command with sudo if needed func runCommandWithSudo(needsSudo bool, name string, args ...string) (string, error) { if needsSudo && os.Geteuid() != 0 { args = append([]string{name}, args...) name = "sudo" } return runCommand(name, args...) } // fileExists checks if a file exists func fileExists(path string) bool { _, err := os.Stat(path) return err == nil } // GetConfigPath returns the appropriate config path based on platform func GetConfigPath() string { switch runtime.GOOS { case "darwin": home, _ := os.UserHomeDir() return filepath.Join(home, ".config", "ch-ui", "config.yaml") default: return SystemConfigPath } } // GetConfigDir returns the appropriate config directory based on platform func GetConfigDir() string { switch runtime.GOOS { case "darwin": home, _ := os.UserHomeDir() return filepath.Join(home, ".config", "ch-ui") default: return SystemConfigDir } } ================================================ FILE: connector/service/systemd.go ================================================ package service import ( "fmt" "os" "os/exec" "strconv" "strings" ) const systemdServiceTemplate = `[Unit] Description=CH-UI Tunnel Documentation=https://ch-ui.com/docs After=network-online.target Wants=network-online.target [Service] Type=simple ExecStart=%s connect --config %s Restart=always RestartSec=5 StandardOutput=journal StandardError=journal SyslogIdentifier=%s # Security hardening NoNewPrivileges=true ProtectSystem=strict ProtectHome=read-only PrivateTmp=true ReadWritePaths=%s [Install] WantedBy=multi-user.target ` const systemdServicePath = "/etc/systemd/system/ch-ui.service" func (m *Manager) systemdIsInstalled() bool { return fileExists(systemdServicePath) } func (m *Manager) systemdIsRunning() (bool, error) { output, err := runCommand("systemctl", "is-active", ServiceName) if err != nil { return false, nil // Not running or not installed } return strings.TrimSpace(output) == "active", nil } func (m *Manager) systemdInstall(configPath string) error { // Create config directory with proper permissions configDir := SystemConfigDir if err := os.MkdirAll(configDir, 0755); err != nil { // Try with sudo _, err = runCommandWithSudo(true, "mkdir", "-p", configDir) if err != nil { return fmt.Errorf("failed to create config directory: %w", err) } } // Generate service content serviceContent := fmt.Sprintf(systemdServiceTemplate, BinaryPath, configPath, ServiceName, configDir, ) // Write service file (needs sudo) tmpFile := "/tmp/ch-ui-agent.service" if err := os.WriteFile(tmpFile, []byte(serviceContent), 0644); err != nil { return fmt.Errorf("failed to write service file: %w", err) } defer os.Remove(tmpFile) // Move to systemd directory _, err := runCommandWithSudo(true, "mv", tmpFile, systemdServicePath) if err != nil { return fmt.Errorf("failed to install service file: %w", err) } // Set permissions runCommandWithSudo(true, "chmod", "644", systemdServicePath) // Reload systemd _, err = runCommandWithSudo(true, "systemctl", "daemon-reload") if err != nil { return fmt.Errorf("failed to reload systemd: %w", err) } // Enable the service _, err = runCommandWithSudo(true, "systemctl", "enable", ServiceName) if err != nil { return fmt.Errorf("failed to enable service: %w", err) } // Start the service _, err = runCommandWithSudo(true, "systemctl", "start", ServiceName) if err != nil { return fmt.Errorf("failed to start service: %w", err) } return nil } func (m *Manager) systemdUninstall() error { // Stop the service runCommandWithSudo(true, "systemctl", "stop", ServiceName) // Disable the service runCommandWithSudo(true, "systemctl", "disable", ServiceName) // Remove service file if fileExists(systemdServicePath) { _, err := runCommandWithSudo(true, "rm", systemdServicePath) if err != nil { return fmt.Errorf("failed to remove service file: %w", err) } } // Reload systemd runCommandWithSudo(true, "systemctl", "daemon-reload") return nil } func (m *Manager) systemdStart() error { if !m.systemdIsInstalled() { return fmt.Errorf("service not installed. Run 'ch-ui service install' first") } running, _ := m.systemdIsRunning() if running { return fmt.Errorf("service is already running") } _, err := runCommandWithSudo(true, "systemctl", "start", ServiceName) if err != nil { return fmt.Errorf("failed to start service: %w", err) } return nil } func (m *Manager) systemdStop() error { running, _ := m.systemdIsRunning() if !running { return fmt.Errorf("service is not running") } _, err := runCommandWithSudo(true, "systemctl", "stop", ServiceName) if err != nil { return fmt.Errorf("failed to stop service: %w", err) } return nil } func (m *Manager) systemdRestart() error { if !m.systemdIsInstalled() { return fmt.Errorf("service not installed. Run 'ch-ui service install' first") } _, err := runCommandWithSudo(true, "systemctl", "restart", ServiceName) if err != nil { return fmt.Errorf("failed to restart service: %w", err) } return nil } func (m *Manager) systemdStatus() (string, error) { if !m.systemdIsInstalled() { return "not installed", nil } output, _ := runCommand("systemctl", "is-active", ServiceName) status := strings.TrimSpace(output) switch status { case "active": // Get more details detailOutput, _ := runCommand("systemctl", "show", ServiceName, "--property=MainPID,ActiveEnterTimestamp") var pid, since string for _, line := range strings.Split(detailOutput, "\n") { if strings.HasPrefix(line, "MainPID=") { pid = strings.TrimPrefix(line, "MainPID=") } if strings.HasPrefix(line, "ActiveEnterTimestamp=") { since = strings.TrimPrefix(line, "ActiveEnterTimestamp=") } } if pid != "" && pid != "0" { if since != "" { return fmt.Sprintf("running (PID: %s, since: %s)", pid, since), nil } return fmt.Sprintf("running (PID: %s)", pid), nil } return "running", nil case "inactive": return "stopped", nil case "failed": return "failed (check logs with: ch-ui service logs)", nil default: return status, nil } } func (m *Manager) systemdLogs(follow bool, lines int) error { args := []string{"-u", ServiceName, "-n", strconv.Itoa(lines)} if follow { args = append(args, "-f") } cmd := exec.Command("journalctl", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } ================================================ FILE: connector/ui/ui.go ================================================ package ui import ( "fmt" "io" "os" "strings" "time" "github.com/fatih/color" ) // UI handles formatted terminal output type UI struct { out io.Writer noColor bool quiet bool verbose bool jsonMode bool green *color.Color red *color.Color yellow *color.Color cyan *color.Color blue *color.Color magenta *color.Color bold *color.Color dim *color.Color } // New creates a new UI instance func New(noColor, quiet, verbose, jsonMode bool) *UI { if noColor { color.NoColor = true } return &UI{ out: os.Stdout, noColor: noColor, quiet: quiet, verbose: verbose, jsonMode: jsonMode, green: color.New(color.FgGreen), red: color.New(color.FgRed), yellow: color.New(color.FgYellow), cyan: color.New(color.FgCyan), blue: color.New(color.FgBlue), magenta: color.New(color.FgMagenta), bold: color.New(color.Bold), dim: color.New(color.Faint), } } // Logo prints the CH-UI ASCII art logo func (u *UI) Logo(version string) { if u.quiet || u.jsonMode { return } logo := ` ██████╗██╗ ██╗ ██╗ ██╗██╗ ██╔════╝██║ ██║ ██║ ██║██║ ██║ ███████║█████╗██║ ██║██║ ██║ ██╔══██║╚════╝██║ ██║██║ ╚██████╗██║ ██║ ╚██████╔╝██║ ╚═════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝` u.cyan.Println(logo) u.dim.Printf(" Tunnel %s\n\n", version) } // Info prints an info message func (u *UI) Info(format string, args ...interface{}) { if u.quiet || u.jsonMode { return } u.cyan.Print("→ ") fmt.Fprintf(u.out, format+"\n", args...) } // Success prints a success message func (u *UI) Success(format string, args ...interface{}) { if u.quiet || u.jsonMode { return } u.green.Print("✓ ") fmt.Fprintf(u.out, format+"\n", args...) } // Error prints an error message func (u *UI) Error(format string, args ...interface{}) { if u.jsonMode { return } u.red.Print("✗ ") fmt.Fprintf(os.Stderr, format+"\n", args...) } // ErrorType represents the category of error type ErrorType string const ( ErrorTypeNetwork ErrorType = "NETWORK" ErrorTypeAuth ErrorType = "AUTH" ErrorTypeServer ErrorType = "SERVER" ErrorTypeConfig ErrorType = "CONFIG" ErrorTypeUnknown ErrorType = "UNKNOWN" ) // DiagnosticError prints a detailed error with source, type, and suggestions func (u *UI) DiagnosticError(errType ErrorType, source, message string, suggestions []string) { if u.jsonMode { return } fmt.Fprintln(os.Stderr) // Error header with type badge u.red.Fprint(os.Stderr, "┌─ ERROR ") u.dim.Fprintf(os.Stderr, "[%s]\n", errType) // Source u.red.Fprint(os.Stderr, "│\n") u.red.Fprint(os.Stderr, "│ ") u.bold.Fprint(os.Stderr, "Source: ") fmt.Fprintln(os.Stderr, source) // Message u.red.Fprint(os.Stderr, "│ ") u.bold.Fprint(os.Stderr, "Error: ") fmt.Fprintln(os.Stderr, message) // Suggestions if len(suggestions) > 0 { u.red.Fprint(os.Stderr, "│\n") u.red.Fprint(os.Stderr, "│ ") u.yellow.Fprintln(os.Stderr, "Possible causes:") for _, s := range suggestions { u.red.Fprint(os.Stderr, "│ ") u.dim.Fprint(os.Stderr, "• ") fmt.Fprintln(os.Stderr, s) } } u.red.Fprint(os.Stderr, "│\n") u.red.Fprintln(os.Stderr, "└─") fmt.Fprintln(os.Stderr) } // AuthError prints an authentication-specific error with helpful context func (u *UI) AuthError(serverMessage string) { source := "CH-UI Server" var suggestions []string // Classify the error and provide specific suggestions switch { case strings.Contains(strings.ToLower(serverMessage), "invalid") && strings.Contains(strings.ToLower(serverMessage), "token"): suggestions = []string{ "The tunnel token is invalid or has been revoked", "Check that you copied the complete token (starts with 'cht_')", "Generate a new token on the server with: ch-ui tunnel create --name ", "Verify the token belongs to the target server instance", } case strings.Contains(strings.ToLower(serverMessage), "license"): suggestions = []string{ "The server license may have expired", "Contact your administrator to renew the license", "Check server logs for license validation details", } case strings.Contains(strings.ToLower(serverMessage), "already connected"): suggestions = []string{ "Another agent process is already connected with this token", "Stop the existing process or service before starting a new one", "Use 'ch-ui service status' to check service mode", "Reconnect with '--takeover' to replace the active session", } case strings.Contains(strings.ToLower(serverMessage), "not found"): suggestions = []string{ "The organization associated with this token may have been deleted", "The tunnel connection may have been removed", "Contact your administrator", } default: suggestions = []string{ "Check that your token is valid and not expired", "Verify the tunnel URL is correct", "Check CH-UI server logs for tunnel auth errors", } } u.DiagnosticError(ErrorTypeAuth, source, serverMessage, suggestions) } // ConnectionError prints a connection-specific error func (u *UI) ConnectionError(err error, tunnelURL string) { source := fmt.Sprintf("Connection to %s", tunnelURL) message := err.Error() var suggestions []string switch { case strings.Contains(message, "connection refused"): suggestions = []string{ "The CH-UI server may be down or unreachable", "Check if the tunnel URL is correct: " + tunnelURL, "Verify your network/firewall allows outbound WebSocket connections", "If using a custom server, ensure it's running", } case strings.Contains(message, "no such host") || strings.Contains(message, "lookup"): suggestions = []string{ "Cannot resolve the tunnel server hostname", "Check your DNS settings", "Verify the tunnel URL is correct: " + tunnelURL, } case strings.Contains(message, "timeout") || strings.Contains(message, "deadline"): suggestions = []string{ "Connection timed out - server may be overloaded or unreachable", "Check your network connection", "Try again in a few moments", } case strings.Contains(message, "certificate") || strings.Contains(message, "tls"): suggestions = []string{ "SSL/TLS certificate error", "If using a self-signed certificate, this is expected in dev mode", "Verify the tunnel URL protocol (ws:// vs wss://)", } default: suggestions = []string{ "Check your network connection", "Verify the tunnel URL: " + tunnelURL, "Try running with --verbose for more details", } } u.DiagnosticError(ErrorTypeNetwork, source, message, suggestions) } // Warn prints a warning message func (u *UI) Warn(format string, args ...interface{}) { if u.quiet || u.jsonMode { return } u.yellow.Print("! ") fmt.Fprintf(u.out, format+"\n", args...) } // Debug prints a debug message (only in verbose mode) func (u *UI) Debug(format string, args ...interface{}) { if !u.verbose || u.jsonMode { return } u.dim.Printf("[debug] "+format+"\n", args...) } // Status prints the connection status block func (u *UI) Status(tunnelURL, clickhouseURL string, uptime time.Duration) { if u.quiet || u.jsonMode { return } fmt.Println() u.bold.Println(" Status: ", u.green.Sprint("Connected")) fmt.Printf(" Tunnel: %s\n", tunnelURL) fmt.Printf(" ClickHouse: %s\n", clickhouseURL) fmt.Printf(" Uptime: %s\n", formatDuration(uptime)) fmt.Println() u.dim.Println("Press Ctrl+C to disconnect") fmt.Println() } // QueryLog prints a query execution log line func (u *UI) QueryLog(queryID string, elapsed time.Duration, rows int) { if u.quiet || u.jsonMode { return } timestamp := time.Now().Format("2006-01-02 15:04:05") u.dim.Printf("[%s] ", timestamp) fmt.Printf("Query %s executed ", u.cyan.Sprint(queryID[:8])) u.dim.Printf("(%s, %s rows)\n", elapsed.Round(time.Millisecond), formatNumber(rows)) } // QueryError prints a query error log line func (u *UI) QueryError(queryID string, err error) { if u.jsonMode { return } timestamp := time.Now().Format("2006-01-02 15:04:05") u.dim.Printf("[%s] ", timestamp) u.red.Printf("Query %s failed: %v\n", queryID[:8], err) } // Disconnected prints a disconnection message func (u *UI) Disconnected(reason string) { if u.jsonMode { return } u.yellow.Print("! ") fmt.Printf("Disconnected: %s\n", reason) } // Reconnecting prints a reconnection message func (u *UI) Reconnecting(delay time.Duration) { if u.quiet || u.jsonMode { return } u.cyan.Print("→ ") fmt.Printf("Reconnecting in %s...\n", delay.Round(time.Millisecond)) } // Box prints a boxed message func (u *UI) Box(title string, lines map[string]string, order []string) { if u.quiet || u.jsonMode { return } fmt.Println() u.bold.Println(title) fmt.Println(strings.Repeat("─", len(title)+2)) for _, key := range order { if val, ok := lines[key]; ok { fmt.Printf(" %-12s %s\n", key+":", val) } } fmt.Println() } // Helpers func formatDuration(d time.Duration) string { if d < time.Minute { return fmt.Sprintf("%ds", int(d.Seconds())) } if d < time.Hour { return fmt.Sprintf("%dm %ds", int(d.Minutes()), int(d.Seconds())%60) } return fmt.Sprintf("%dh %dm", int(d.Hours()), int(d.Minutes())%60) } func formatNumber(n int) string { if n < 1000 { return fmt.Sprintf("%d", n) } if n < 1000000 { return fmt.Sprintf("%.1fK", float64(n)/1000) } return fmt.Sprintf("%.1fM", float64(n)/1000000) } // FormatBytes formats bytes to human readable format func FormatBytes(b uint64) string { const unit = 1024 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := uint64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "KMGTPE"[exp]) } ================================================ FILE: docs/brain/SKILLS.md ================================================ # Brain Skills This file defines the default instruction set used by Brain across all chats. Admins can copy this content into the Brain Skills editor and create variants. ## Role You are **Brain**, a senior ClickHouse analytics copilot. ## Main goals - Produce correct, executable ClickHouse SQL. - Help users move from question -> query -> insight quickly. - Stay concise and explicit about assumptions. ## SQL rules - Prefer read-only exploration first. - Use `LIMIT 100` by default for exploratory selects. - Avoid `SELECT *` on large tables unless explicitly requested. - Always qualify tables with backticks when needed (for example: `` `db.table` ``). - If the request is ambiguous, ask one targeted clarification question. ## Safety rules - Do not suggest destructive SQL (DROP/TRUNCATE/DELETE/ALTER) unless the user asks directly. - If the user asks for destructive SQL, include a short warning and confirmation step. - For expensive queries, provide a preview query first (sample, top-N, or date-bounded window). ## Artifact contract When query output or derived assets exist, create or reference artifacts with stable titles: - `SQL Draft: ` - `Query Result: ` - `Insight Summary: ` - `Chart Spec: ` Each artifact should include: - Purpose (1 line) - Inputs used (query/message references) - Output payload (JSON/text/SQL) ## Query tool contract When running SQL tools: 1. Start with read-only SQL. 2. Keep runtime bounded (small scans first). 3. Persist output as an artifact. 4. Summarize findings in 3-5 bullets. ## Response format Default assistant response structure: 1. One-line intent confirmation. 2. SQL block when applicable. 3. Short explanation. 4. Optional next-step variants. ## Example pattern ````text Got it. You want daily active users by region for the last 30 days. ```sql SELECT toDate(event_time) AS day, region, uniq(user_id) AS dau FROM `analytics.events` WHERE event_time >= now() - INTERVAL 30 DAY GROUP BY day, region ORDER BY day DESC, dau DESC LIMIT 100 ``` This computes DAU by region and keeps the result bounded for quick validation. If you want, I can also return a stacked timeseries version. ```` ================================================ FILE: docs/cant-login.md ================================================ # Can't Login? Use this guide when CH-UI loads but sign-in fails, local connection is wrong, or you are blocked by retry windows. ## Quick Diagnosis | What you see | Most likely cause | What to do | |---|---|---| | `Authentication failed` | Wrong ClickHouse username/password | Retry with correct credentials for the selected connection | | `Connection unavailable` / `Unreachable` | Local ClickHouse URL is wrong or connector is offline | Update local URL/name, restart CH-UI, then retry | | `Too many login attempts` | Repeated failed attempts triggered temporary lock | Wait retry window; if URL/connection was wrong, fix setup and restart before retrying | | No connections configured | Embedded local connection was not created/updated correctly | Run setup command below and restart CH-UI | ## Local Recovery (Recommended) 1. Open **Can't login?** in CH-UI login. 2. Set: - `ClickHouse URL` - `Connection Name` 3. Restart CH-UI with one of these commands. Global install: ```bash ch-ui server --clickhouse-url 'http://127.0.0.1:8123' --connection-name 'My Connection 1' ``` Local binary: ```bash ./ch-ui server --clickhouse-url 'http://127.0.0.1:8123' --connection-name 'My Connection 1' ``` Then open `http://localhost:3488` and sign in again. ## Docker Recovery ```bash docker run --rm \ -p 3488:3488 \ -v ch-ui-data:/app/data \ -e CLICKHOUSE_URL='http://127.0.0.1:8123' \ -e CONNECTION_NAME='My Connection 1' \ ghcr.io/caioricciuti/ch-ui:latest ``` ## Env And Config Alternatives Environment variables: ```bash CLICKHOUSE_URL='http://127.0.0.1:8123' CONNECTION_NAME='My Connection 1' ch-ui server ``` Config file (`server.yaml`): ```yaml clickhouse_url: http://127.0.0.1:8123 connection_name: My Connection 1 ``` ## Notes - Local URL setup does **not** require Admin access. - Admin and multi-connection management are Pro-only features. - Setup commands intentionally exclude passwords; credentials stay in the Sign in form. - Connection name precedence: `--connection-name` > `CONNECTION_NAME` > `server.yaml` > `Local ClickHouse`. ================================================ FILE: docs/legal/privacy-policy.md ================================================ # Privacy Policy **Effective date:** February 12, 2026 **Last updated:** February 12, 2026 CH-UI ("we", "our", "us") is developed by Caio Ricciuti. This privacy policy explains how we handle data when you use CH-UI software. --- ## What CH-UI does NOT collect CH-UI is a self-hosted application. When you run CH-UI on your own infrastructure: - **No telemetry** is sent to us or any third party - **No usage data** leaves your server - **No analytics** are collected - **No cookies** are set by us (only session cookies for your own login) - **Your queries, data, and database contents never leave your infrastructure** ## Data stored locally CH-UI stores the following data in a local SQLite database on your server: - **User sessions** — login tokens for authenticated access - **Saved queries** — queries you choose to save - **Dashboard configurations** — layout and panel settings (Pro) - **Scheduled jobs** — query schedules you create (Pro) - **Connection settings** — ClickHouse connection details (encrypted) - **License information** — your license key if you activate Pro - **Application settings** — preferences and configuration All data is stored in the SQLite file specified by `database_path` in your config (default: `./data/ch-ui.db`). You have full control over this data. ## Pro license activation When you activate a Pro license, the license file is stored locally in your database. No information is sent to external servers during activation — the license is validated offline using cryptographic signatures. ## Managed hosting If you use a managed CH-UI hosting offering: - We may store your account information (email, name) for authentication - We may store your ClickHouse connection metadata (not your database contents) - We do not access, read, or store your ClickHouse data - Tunnel connections are end-to-end between your agent and your browser session ## Third-party services The self-hosted CH-UI binary does not communicate with any third-party services except: - **Your ClickHouse server** — as configured by you - **OpenAI API** — only if you configure the Brain AI feature (Pro) with your own API key ## Data deletion Since all data is stored locally: - Delete the SQLite database file to remove all application data - Uninstall the binary to fully remove CH-UI ## Contact For privacy questions: **c.ricciuti@ch-ui.com** ## Changes We may update this policy. Changes will be posted in this file and noted in release changelogs. ================================================ FILE: docs/legal/terms-of-service.md ================================================ # Terms of Service **Effective date:** February 12, 2026 **Last updated:** February 12, 2026 These terms govern your use of CH-UI software developed by Caio Ricciuti. --- ## 1. Software license CH-UI is distributed under a dual-license model: - **CH-UI Core** (Community Edition) is licensed under the [Apache License 2.0](../../LICENSE). You may use, modify, and distribute it freely under those terms. - **CH-UI Pro** modules require a separate commercial license. Pro features are clearly marked in the application and documentation. ## 2. Self-hosted usage When you run CH-UI on your own infrastructure: - You are responsible for your own data, backups, and security - You are responsible for compliance with applicable laws in your jurisdiction - We provide the software "as is" without warranty (see Section 6) ## 3. Pro license If you purchase a CH-UI Pro license: - The license grants you access to Pro features for the duration specified - Licenses are non-transferable unless agreed in writing - License terms are specified in the license file provided to you - Tampering with or circumventing license validation is prohibited ## 4. Acceptable use You agree not to: - Reverse-engineer the license validation mechanism - Redistribute Pro modules without authorization - Use CH-UI to violate applicable laws or regulations - Misrepresent CH-UI as your own product ## 5. Intellectual property - CH-UI, the CH-UI logo, and related marks are the property of Caio Ricciuti - Open source components are governed by their respective licenses - Your data remains yours — we claim no ownership over data processed by CH-UI ## 6. Disclaimer of warranty THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ## 7. Limitation of liability To the maximum extent permitted by law, Caio Ricciuti shall not be liable for any indirect, incidental, special, consequential, or punitive damages, or any loss of profits or revenues, whether incurred directly or indirectly. ## 8. Changes We may update these terms. Continued use of the software after changes constitutes acceptance. ## 9. Contact For questions about these terms: **c.ricciuti@ch-ui.com** ================================================ FILE: docs/license.md ================================================ # CH-UI Licensing CH-UI uses a dual-license model: open source core + commercial Pro modules. --- ## CH-UI Core (Community Edition) **License:** [Apache License 2.0](../LICENSE) The core of CH-UI is free and open source. This includes: - SQL Editor (multi-tab, formatting, profiling, streaming results, query plan analysis) - Schema Explorer (database/table/column browser, data preview) - Saved Queries - Dashboards (panel builder, multiple chart types, time ranges) - Brain AI Assistant (OpenAI, OpenAI-compatible, Ollama — multi-chat, artifacts, skills) - Data Pipelines (Webhook, S3, Kafka, Database sources into ClickHouse) - Models (dbt-style SQL transformations with DAG and materialization) - Admin Panel (user management, connection management, provider configuration) - Multi-connection management - Tunnel connector (`ch-ui connect`) for remote ClickHouse access - Embedded web frontend - All CLI commands You can use, modify, and distribute CH-UI Core freely under the Apache 2.0 license. ## CH-UI Pro **License:** Commercial (proprietary) Pro modules extend CH-UI with enterprise features: - Scheduled query jobs (cron-based scheduling, execution history, timezone support) - Governance (metadata sync, query log analytics, data lineage, access matrix, tagging) - Policies and incident management (violation detection, incident workflow, severity tracking) - Alerting (SMTP, Resend, Brevo — rules by event type/severity, escalation) Pro features require a valid license file. Licenses are per-deployment and include a customer name, expiration date, and feature set. ### How to activate 1. Open CH-UI in your browser 2. Go to **Settings > License** 3. Paste or upload your license file 4. Pro features unlock immediately ### How to get a license Visit [ch-ui.com/pricing](https://ch-ui.com/pricing) or contact **c.ricciuti@ch-ui.com**. ## License boundary The licensing boundary is enforced server-side via HTTP 402 middleware on Pro-only routes: - **Free routes:** queries, saved queries, dashboards, pipelines, models, brain, admin, connections - **Pro routes:** `/api/schedules/*`, `/api/governance/*` (including alerts) The Pro license check is enforced both server-side (HTTP 402 middleware) and client-side (UI gate). ## FAQ **Can I use CH-UI Core in production?** Yes, freely. Apache 2.0 allows commercial use. **Can I modify CH-UI Core?** Yes. You must retain the copyright notice and license. **Do I need Pro for dashboards, Brain, or pipelines?** No. Dashboards, Brain AI, data pipelines, models, and admin are all free. **What features require Pro?** Only scheduled query jobs, governance (lineage, policies, incidents, access matrix), and alerting. **What happens when a Pro license expires?** Pro features become locked. Core features continue working. Your data is never lost. ================================================ FILE: docs/production-runbook.md ================================================ # CH-UI Production Runbook (VM2 Server + VM1 Connector) This runbook covers a production topology where: - **VM2** runs `ch-ui server` (UI, API, tunnel gateway) - **VM1** runs `ch-ui connect` (agent next to ClickHouse) ## 1. VM2 Server Hardening 1. Create server config at `/etc/ch-ui/server.yaml`: ```yaml port: 3488 app_url: https://ch-ui.example.com app_secret_key: "replace-with-long-random-secret" allowed_origins: - https://ch-ui.example.com database_path: /var/lib/ch-ui/ch-ui.db ``` 2. Keep runtime state in writable directories: ```bash sudo mkdir -p /var/lib/ch-ui/run sudo mkdir -p /var/lib/ch-ui sudo chown -R chui:chui /var/lib/ch-ui ``` 3. Use lifecycle commands with explicit PID file: ```bash ch-ui server start -c /etc/ch-ui/server.yaml --detach --pid-file /var/lib/ch-ui/run/ch-ui-server.pid ch-ui server status -c /etc/ch-ui/server.yaml --pid-file /var/lib/ch-ui/run/ch-ui-server.pid ch-ui server stop -c /etc/ch-ui/server.yaml --pid-file /var/lib/ch-ui/run/ch-ui-server.pid ``` ## 2. VM2 systemd Service (recommended) Create `/etc/systemd/system/ch-ui-server.service`: ```ini [Unit] Description=CH-UI Server After=network.target [Service] Type=simple User=chui Group=chui WorkingDirectory=/var/lib/ch-ui ExecStart=/usr/local/bin/ch-ui server start -c /etc/ch-ui/server.yaml --pid-file /var/lib/ch-ui/run/ch-ui-server.pid ExecStop=/usr/local/bin/ch-ui server stop -c /etc/ch-ui/server.yaml --pid-file /var/lib/ch-ui/run/ch-ui-server.pid Restart=always RestartSec=5 LimitNOFILE=65535 [Install] WantedBy=multi-user.target ``` Then: ```bash sudo systemctl daemon-reload sudo systemctl enable ch-ui-server sudo systemctl start ch-ui-server sudo systemctl status ch-ui-server ``` ## 3. VM2 Reverse Proxy (TLS + WebSocket) Your proxy must: - route app traffic to `127.0.0.1:3488` - support WebSocket upgrades on `/connect` - keep long-enough timeouts for tunnel traffic Use the repo example: `ch-ui.conf`. ## 4. VM1 Connector Setup 1. On VM2, create a tunnel key for VM1: ```bash ch-ui tunnel create --name "vm1-clickhouse" -c /etc/ch-ui/server.yaml --url wss://ch-ui.example.com/connect ``` Copy the generated `cht_...` token. 2. Install connector service on VM1: ```bash sudo /usr/local/bin/ch-ui service install \ --url wss://ch-ui.example.com/connect \ --key cht_your_tunnel_token \ --clickhouse-url http://127.0.0.1:8123 ``` 3. Verify: ```bash ch-ui service status ch-ui service logs -f ``` 4. (Optional) Rotate compromised/old token from VM2: ```bash ch-ui tunnel list -c /etc/ch-ui/server.yaml ch-ui tunnel rotate -c /etc/ch-ui/server.yaml --url wss://ch-ui.example.com/connect ``` ## 5. Network Policy - VM2 inbound: `443` (or your TLS port) - VM2 inbound: `3488` only from localhost/reverse-proxy path - VM1 outbound: allow to `wss://ch-ui.example.com/connect` - VM1 ClickHouse can stay local-only (`127.0.0.1:8123`) ## 6. Monitoring and Backups 1. Health endpoint: ```bash curl -fsS http://127.0.0.1:3488/health ``` 2. Back up SQLite: - file: `/var/lib/ch-ui/ch-ui.db` - schedule daily snapshot + retention policy - verify restore procedure quarterly 3. Log collection: - VM2: `journalctl -u ch-ui-server` - VM1: `ch-ui service logs` or platform service logs ## 7. Upgrade Procedure 1. Replace binaries on VM2 and VM1. 2. Restart services: ```bash sudo systemctl restart ch-ui-server ch-ui service restart ``` 3. Validate: ```bash ch-ui version ch-ui server status -c /etc/ch-ui/server.yaml --pid-file /var/lib/ch-ui/run/ch-ui-server.pid ch-ui service status ``` ## 8. Notes on Older Binaries Older builds did not support server lifecycle subcommands (`status/stop/restart`). If `ch-ui server status` starts the server, replace the binary with a newer build and retry. ================================================ FILE: frontend.go ================================================ package main import ( "embed" "io/fs" "log/slog" ) //go:embed all:ui/dist var uiDistFS embed.FS func frontendFS() fs.FS { sub, err := fs.Sub(uiDistFS, "ui/dist") if err != nil { slog.Warn("Failed to open embedded frontend directory", "error", err) return nil } return sub } ================================================ FILE: go.mod ================================================ module github.com/caioricciuti/ch-ui go 1.25.0 require ( github.com/IBM/sarama v1.47.0 github.com/fatih/color v1.18.0 github.com/go-chi/chi/v5 v5.2.5 github.com/go-sql-driver/mysql v1.9.3 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 github.com/lib/pq v1.11.2 github.com/minio/minio-go/v7 v7.0.98 github.com/spf13/cobra v1.10.2 github.com/xdg-go/scram v1.2.0 github.com/xitongsys/parquet-go v1.6.2 github.com/xitongsys/parquet-go-source v0.0.0-20241021075129-b732d2ac9c9b golang.org/x/crypto v0.48.0 gopkg.in/yaml.v3 v3.0.1 modernc.org/sqlite v1.44.3 ) require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516 // indirect github.com/apache/thrift v0.14.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/golang/snappy v0.0.3 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/klauspost/compress v1.18.4 // indirect github.com/klauspost/cpuid/v2 v2.2.11 // indirect github.com/klauspost/crc32 v1.3.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/crc64nvme v1.1.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect github.com/philhofer/fwd v1.2.0 // indirect github.com/pierrec/lz4/v4 v4.1.25 // indirect github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/xid v1.6.0 // indirect github.com/spf13/pflag v1.0.9 // indirect github.com/tinylib/msgp v1.6.1 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect golang.org/x/net v0.51.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect modernc.org/libc v1.67.6 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect ) ================================================ FILE: go.sum ================================================ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/kms v1.1.0/go.mod h1:WdbppnCDMDpOvoYBMn1+gNmOeEoZYqAv+HeuKARGCXI= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs= cloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho= cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/azure-amqp-common-go/v3 v3.2.1/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI= github.com/Azure/azure-amqp-common-go/v3 v3.2.2/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0/go.mod h1:ceIuwmxDWptoW3eCqSXlnPsZFKh4X+R38dWPv7GS9Vs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= github.com/Azure/azure-service-bus-go v0.11.5/go.mod h1:MI6ge2CuQWBVq+ly456MY7XqNLJip5LO1iSFodbNLbU= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-amqp v0.16.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-amqp v0.16.4/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8= github.com/IBM/sarama v1.47.0 h1:GcQFEd12+KzfPYeLgN69Fh7vLCtYRhVIx0rO4TZO318= github.com/IBM/sarama v1.47.0/go.mod h1:7gLLIU97nznOmA6TX++Qds+DRxH89P2XICY2KAQUzAY= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516 h1:byKBBF2CKWBjjA4J1ZL2JXttJULvWSl50LegTyRZ728= github.com/apache/arrow/go/arrow v0.0.0-20200730104253-651201b0f516/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.14.2 h1:hY4rAyg7Eqbb27GB6gkhUKrRAuc8xRjlNtJq+LseKeY= github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2 v1.23.0/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1/go.mod h1:t8PYl/6LzdAqsU4/9tz28V/kU+asFePvpOMkdul0gEQ= github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= github.com/aws/aws-sdk-go-v2/config v1.25.3/go.mod h1:tAByZy03nH5jcq0vZmkcVoo6tRzRHEwSFx3QW4NmDw8= github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= github.com/aws/aws-sdk-go-v2/credentials v1.16.2/go.mod h1:sDdvGhXrSVT5yzBDR7qXz+rhbpiMpUYfF3vJ01QSdrc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4/go.mod h1:t4i+yGHMCcUNIX1x7YVYa6bH/Do7civ5I6cG/6PMfyA= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.14.0/go.mod h1:UcgIwJ9KHquYxs6Q5skC9qXjhYMK+JASDYcXQ4X7JZE= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3/go.mod h1:7sGSz1JCKHWWBHq98m6sMtWQikmYPpxjqOydDemiVoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3/go.mod h1:ify42Rb7nKeDDPkFjKn7q1bPscVPu/+gmHH8d2c+anU= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.3/go.mod h1:5yzAuE9i2RkVAttBl8yxZgQr5OCq4D5yDnG7j9x2L0U= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3/go.mod h1:Seb8KNmD6kVTjwRjVEgOT5hPin6sq+v4C2ycJQDwuH8= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.3/go.mod h1:R+/S1O4TYpcktbVwddeOYg+uwUfLhADP2S/x4QwsCTM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3/go.mod h1:Owv1I59vaghv1Ax8zz8ELY8DN7/Y0rGS+WWAmjgi950= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.3/go.mod h1:KZgs2ny8HsxRIRbDwgvJcHHBZPOzQr/+NtGwnP+w2ec= github.com/aws/aws-sdk-go-v2/service/kms v1.16.3/go.mod h1:QuiHPBqlOFCi4LqdSskYYAWpQlx3PKmohy+rE2F+o5g= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.3/go.mod h1:g1qvDuRsJY+XghsV6zg00Z4KJ7DtFFCx8fJD2a491Ak= github.com/aws/aws-sdk-go-v2/service/s3 v1.43.0/go.mod h1:NXRKkiRF+erX2hnybnVU660cYT5/KChRD4iUgJ97cI8= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.15.4/go.mod h1:PJc8s+lxyU8rrre0/4a0pn2wgwiDvOEzoOjcJUBr67o= github.com/aws/aws-sdk-go-v2/service/sns v1.17.4/go.mod h1:kElt+uCcXxcqFyc+bQqZPFD9DME/eC6oHBXvFzQ9Bcw= github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmrPc1HWCbHUJyrCEp+ZaLzqM= github.com/aws/aws-sdk-go-v2/service/ssm v1.24.1/go.mod h1:NR/xoKjdbRJ+qx0pMR4mI+N/H1I1ynHwXnO6FowXJc0= github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= github.com/aws/aws-sdk-go-v2/service/sso v1.17.2/go.mod h1:/pE21vno3q1h4bbhUOEi+6Zu/aT26UK2WKkDXd+TssQ= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.0/go.mod h1:dWqm5G767qwKPuayKfzm4rjzFmVjiBFbOJrpSPnAMDs= github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= github.com/aws/aws-sdk-go-v2/service/sts v1.25.3/go.mod h1:4EqRHDCKP78hq3zOnmFXu5k0j4bXbRFfCh/zQ6KnEfQ= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bobg/gcsobj v0.1.2/go.mod h1:vS49EQ1A1Ib8FgrL58C8xXYZyOCR2TgzAdopy6/ipa8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU= github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM= github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs= github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI= github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.34/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0= github.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0= github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY= github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.2.0 h1:bYKF2AEwG5rqd1BumT4gAnvwU/M9nBp2pTSxeZw7Wvs= github.com/xdg-go/scram v1.2.0/go.mod h1:3dlrS0iBaWKYVt2ZfA4cj48umJZ+cAEbR6/SjLA88I8= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww= github.com/xitongsys/parquet-go v1.6.2 h1:MhCaXii4eqceKPu9BwrjLqyK10oX9WF+xGhwvwbw7xM= github.com/xitongsys/parquet-go v1.6.2/go.mod h1:IulAQyalCm0rPiZVNnCgm/PCL64X2tdSVGMQ/UeKqWA= github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA= github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE= github.com/xitongsys/parquet-go-source v0.0.0-20241021075129-b732d2ac9c9b h1:zbb5qM/t3N+O33Vp5sFyG6yIcWZV1q7rfEjJM8UsRBQ= github.com/xitongsys/parquet-go-source v0.0.0-20241021075129-b732d2ac9c9b/go.mod h1:2ActxmJ4q17Cdruar9nKEkzKSOL1Ol03737Bkz10rTY= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= gocloud.dev v0.26.0/go.mod h1:mkUgejbnbLotorqDyvedJO20XcZNTynmSeVSQS9btVg= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8= google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM= modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE= modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY= modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY= modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= ================================================ FILE: internal/alerts/dispatcher.go ================================================ package alerts import ( "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" "log/slog" "math" "net/http" "net/smtp" "strconv" "strings" "time" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" ) const ( ChannelTypeSMTP = "smtp" ChannelTypeResend = "resend" ChannelTypeBrevo = "brevo" ) const ( EventTypePolicyViolation = "policy.violation" EventTypeScheduleFailed = "schedule.failed" EventTypeScheduleSlow = "schedule.slow" ) const ( SeverityInfo = "info" SeverityWarn = "warn" SeverityError = "error" SeverityCritical = "critical" ) const ( dispatchTickInterval = 8 * time.Second maxNewEventsPerTick = 100 maxJobsPerTick = 30 ) type Dispatcher struct { db *database.DB cfg *config.Config stopCh chan struct{} http *http.Client } func NewDispatcher(db *database.DB, cfg *config.Config) *Dispatcher { return &Dispatcher{ db: db, cfg: cfg, stopCh: make(chan struct{}), http: &http.Client{ Timeout: 15 * time.Second, }, } } func (d *Dispatcher) Start() { go func() { slog.Info("Alert dispatcher started", "interval", dispatchTickInterval) ticker := time.NewTicker(dispatchTickInterval) defer ticker.Stop() for { select { case <-d.stopCh: slog.Info("Alert dispatcher stopped") return case <-ticker.C: d.tick() } } }() } func (d *Dispatcher) Stop() { close(d.stopCh) } func (d *Dispatcher) tick() { if !d.cfg.IsPro() { return } d.materializeEventJobs() d.processDueJobs() d.processDueDigests() } func (d *Dispatcher) materializeEventJobs() { events, err := d.db.ListNewAlertEvents(maxNewEventsPerTick) if err != nil { slog.Error("Alert dispatcher failed to list new events", "error", err) return } if len(events) == 0 { return } rules, err := d.db.ListEnabledAlertRules() if err != nil { slog.Error("Alert dispatcher failed to list enabled rules", "error", err) return } routesByRule := make(map[string][]database.AlertRuleRouteView) now := time.Now().UTC() for _, event := range events { for _, rule := range rules { if !ruleMatchesEvent(rule, event) { continue } routes, ok := routesByRule[rule.ID] if !ok { routes, err = d.db.ListActiveAlertRuleRoutes(rule.ID) if err != nil { slog.Error("Alert dispatcher failed to list active routes", "rule", rule.ID, "error", err) continue } routesByRule[rule.ID] = routes } for _, route := range routes { if len(route.Recipients) == 0 { continue } deliveryMode := strings.ToLower(strings.TrimSpace(route.DeliveryMode)) if deliveryMode == "digest" { if err := d.db.UpsertAlertRouteDigest(rule, route, event, now); err != nil { slog.Error("Alert dispatcher failed to upsert digest batch", "event", event.ID, "rule", rule.ID, "route", route.ID, "error", err) } continue } if event.Fingerprint != nil && strings.TrimSpace(*event.Fingerprint) != "" && rule.CooldownSeconds > 0 { since := now.Add(-time.Duration(rule.CooldownSeconds) * time.Second) exists, err := d.db.HasRecentAlertDispatch(route.ID, *event.Fingerprint, since) if err != nil { slog.Warn("Alert dispatcher dedupe check failed", "route", route.ID, "error", err) } else if exists { continue } } if _, err := d.db.CreateAlertDispatchJob(event.ID, rule.ID, route.ID, route.ChannelID, rule.MaxAttempts, now); err != nil { slog.Error("Alert dispatcher failed to create dispatch job", "event", event.ID, "rule", rule.ID, "route", route.ID, "error", err) } } } if err := d.db.MarkAlertEventProcessed(event.ID); err != nil { slog.Warn("Alert dispatcher failed to mark event processed", "event", event.ID, "error", err) } } } func (d *Dispatcher) processDueJobs() { jobs, err := d.db.ListDueAlertDispatchJobs(maxJobsPerTick) if err != nil { slog.Error("Alert dispatcher failed to list due jobs", "error", err) return } if len(jobs) == 0 { return } for _, job := range jobs { if err := d.db.MarkAlertDispatchJobSending(job.ID); err != nil { slog.Warn("Alert dispatcher failed to mark job sending", "job", job.ID, "error", err) continue } recipients := parseRecipients(job.RouteRecipientsJSON) if len(recipients) == 0 { _ = d.db.MarkAlertDispatchJobFailed(job.ID, "route has no recipients") continue } decrypted, err := crypto.Decrypt(job.ChannelConfigEncrypted, d.cfg.AppSecretKey) if err != nil { _ = d.db.MarkAlertDispatchJobFailed(job.ID, "decrypt channel config: "+err.Error()) continue } var channelConfig map[string]interface{} if err := json.Unmarshal([]byte(decrypted), &channelConfig); err != nil { _ = d.db.MarkAlertDispatchJobFailed(job.ID, "parse channel config: "+err.Error()) continue } subject := renderTemplate(coalesce(job.RuleSubjectTemplate, fmt.Sprintf("[CH-UI][%s][%s] %s", strings.ToUpper(job.EventSeverity), job.EventType, job.EventTitle)), job, ) body := renderTemplate(coalesce(job.RuleBodyTemplate, defaultBody(job)), job) providerMessageID, err := d.sendByChannelType(context.Background(), job.ChannelType, channelConfig, recipients, subject, body) if err != nil { nextAttempt := job.AttemptCount + 1 if nextAttempt >= job.MaxAttempts { failureMessage := err.Error() if escalationNote := d.tryEscalationForDispatchJob(job, subject, body, failureMessage, nextAttempt); escalationNote != "" { failureMessage = failureMessage + " | " + escalationNote } _ = d.db.MarkAlertDispatchJobFailed(job.ID, failureMessage) continue } backoff := retryBackoff(nextAttempt) _ = d.db.MarkAlertDispatchJobRetry(job.ID, time.Now().UTC().Add(backoff), err.Error()) continue } if err := d.db.MarkAlertDispatchJobSent(job.ID, providerMessageID); err != nil { slog.Warn("Alert dispatcher failed to mark job sent", "job", job.ID, "error", err) } } } func (d *Dispatcher) processDueDigests() { digests, err := d.db.ListDueAlertRouteDigests(maxJobsPerTick) if err != nil { slog.Error("Alert dispatcher failed to list due digests", "error", err) return } if len(digests) == 0 { return } for _, digest := range digests { if err := d.db.MarkAlertRouteDigestSending(digest.ID); err != nil { slog.Warn("Alert dispatcher failed to mark digest sending", "digest", digest.ID, "error", err) continue } recipients := parseRecipients(digest.RouteRecipientsJSON) if len(recipients) == 0 { _ = d.db.MarkAlertRouteDigestFailed(digest.ID, "digest route has no recipients") continue } decrypted, err := crypto.Decrypt(digest.ChannelConfigEncrypted, d.cfg.AppSecretKey) if err != nil { _ = d.db.MarkAlertRouteDigestFailed(digest.ID, "decrypt channel config: "+err.Error()) continue } var channelConfig map[string]interface{} if err := json.Unmarshal([]byte(decrypted), &channelConfig); err != nil { _ = d.db.MarkAlertRouteDigestFailed(digest.ID, "parse channel config: "+err.Error()) continue } subject := fmt.Sprintf("[CH-UI Digest][%s][%s] %d events", strings.ToUpper(digest.Severity), digest.EventType, digest.EventCount) body := renderDigestBody(digest) _, err = d.sendByChannelType(context.Background(), digest.ChannelType, channelConfig, recipients, subject, body) if err != nil { nextAttempt := digest.AttemptCount + 1 if nextAttempt >= digest.MaxAttempts { failureMessage := err.Error() if escalationNote := d.tryEscalationForDigest(digest, subject, body, failureMessage, nextAttempt); escalationNote != "" { failureMessage = failureMessage + " | " + escalationNote } _ = d.db.MarkAlertRouteDigestFailed(digest.ID, failureMessage) continue } backoff := retryBackoff(nextAttempt) _ = d.db.MarkAlertRouteDigestRetry(digest.ID, time.Now().UTC().Add(backoff), err.Error()) continue } if err := d.db.MarkAlertRouteDigestSent(digest.ID); err != nil { slog.Warn("Alert dispatcher failed to mark digest sent", "digest", digest.ID, "error", err) } } } // SendDirect sends a one-off notification without queueing. func SendDirect(ctx context.Context, channelType string, channelConfig map[string]interface{}, recipients []string, subject, body string) (string, error) { d := &Dispatcher{ http: &http.Client{Timeout: 15 * time.Second}, } return d.sendByChannelType(ctx, channelType, channelConfig, recipients, subject, body) } func (d *Dispatcher) sendByChannelType(ctx context.Context, channelType string, channelConfig map[string]interface{}, recipients []string, subject, body string) (string, error) { switch strings.ToLower(channelType) { case ChannelTypeSMTP: return d.sendSMTP(ctx, channelConfig, recipients, subject, body) case ChannelTypeResend: return d.sendResend(ctx, channelConfig, recipients, subject, body) case ChannelTypeBrevo: return d.sendBrevo(ctx, channelConfig, recipients, subject, body) default: return "", fmt.Errorf("unsupported channel type: %s", channelType) } } func ruleMatchesEvent(rule database.AlertRule, event database.AlertEvent) bool { eventType := strings.ToLower(strings.TrimSpace(event.EventType)) ruleType := strings.ToLower(strings.TrimSpace(rule.EventType)) if ruleType != "*" && ruleType != "any" && ruleType != eventType { return false } return severityRank(event.Severity) >= severityRank(rule.SeverityMin) } func severityRank(s string) int { switch strings.ToLower(strings.TrimSpace(s)) { case SeverityInfo: return 1 case SeverityWarn: return 2 case SeverityError: return 3 case SeverityCritical: return 4 default: return 0 } } func parseRecipients(raw string) []string { if strings.TrimSpace(raw) == "" { return []string{} } var vals []string if err := json.Unmarshal([]byte(raw), &vals); err != nil { return []string{} } out := make([]string, 0, len(vals)) for _, v := range vals { v = strings.TrimSpace(v) if v != "" { out = append(out, v) } } return out } func parseStringList(raw string) []string { if strings.TrimSpace(raw) == "" { return []string{} } var vals []string if err := json.Unmarshal([]byte(raw), &vals); err != nil { return []string{} } out := make([]string, 0, len(vals)) for _, v := range vals { v = strings.TrimSpace(v) if v != "" { out = append(out, v) } } return out } func defaultBody(job database.AlertDispatchJobWithDetails) string { var b strings.Builder b.WriteString("CH-UI Alert\n\n") b.WriteString("Type: " + job.EventType + "\n") b.WriteString("Severity: " + strings.ToUpper(job.EventSeverity) + "\n") b.WriteString("Title: " + job.EventTitle + "\n") b.WriteString("Message: " + job.EventMessage + "\n") b.WriteString("Channel: " + job.ChannelName + " (" + job.ChannelType + ")\n") if job.EventPayloadJSON != nil && strings.TrimSpace(*job.EventPayloadJSON) != "" { b.WriteString("\nPayload:\n") b.WriteString(*job.EventPayloadJSON) } return b.String() } func renderTemplate(tpl string, job database.AlertDispatchJobWithDetails) string { out := tpl repl := map[string]string{ "{{event_type}}": job.EventType, "{{severity}}": job.EventSeverity, "{{title}}": job.EventTitle, "{{message}}": job.EventMessage, "{{channel_name}}": job.ChannelName, "{{channel_type}}": job.ChannelType, "{{payload_json}}": coalesce(job.EventPayloadJSON, ""), "{{created_at}}": job.CreatedAt, "{{event_id}}": job.EventID, "{{rule_name}}": job.RuleName, } for key, val := range repl { out = strings.ReplaceAll(out, key, val) } return out } func retryBackoff(attempt int) time.Duration { if attempt <= 0 { return 10 * time.Second } base := 10 * time.Second multiplier := math.Pow(2, float64(attempt-1)) d := time.Duration(multiplier * float64(base)) if d > 30*time.Minute { return 30 * time.Minute } return d } func (d *Dispatcher) tryEscalationForDispatchJob(job database.AlertDispatchJobWithDetails, subject, body, rootErr string, failedAttempt int) string { if job.RouteEscalationChannelID == nil || strings.TrimSpace(*job.RouteEscalationChannelID) == "" { return "" } if job.RouteEscalationAfterFailures > 0 && failedAttempt < job.RouteEscalationAfterFailures { return "" } if job.EscalationChannelType == nil || job.EscalationChannelConfigEncrypted == nil { return "escalation skipped: channel metadata unavailable" } recipients := parseRecipients(coalesce(job.RouteEscalationRecipientsJSON, "")) if len(recipients) == 0 { recipients = parseRecipients(job.RouteRecipientsJSON) } if len(recipients) == 0 { return "escalation skipped: no escalation recipients" } decrypted, err := crypto.Decrypt(*job.EscalationChannelConfigEncrypted, d.cfg.AppSecretKey) if err != nil { return "escalation decrypt failed: " + err.Error() } cfg := map[string]interface{}{} if err := json.Unmarshal([]byte(decrypted), &cfg); err != nil { return "escalation config parse failed: " + err.Error() } escalationSubject := "[ESCALATED] " + subject escalationBody := body + "\n\nEscalation reason:\n" + rootErr if _, err := d.sendByChannelType(context.Background(), *job.EscalationChannelType, cfg, recipients, escalationSubject, escalationBody); err != nil { return "escalation send failed: " + err.Error() } return "escalated via " + coalesce(job.EscalationChannelName, "channel") } func (d *Dispatcher) tryEscalationForDigest(digest database.AlertRouteDigestWithDetails, subject, body, rootErr string, failedAttempt int) string { if digest.EscalationChannelID == nil || strings.TrimSpace(*digest.EscalationChannelID) == "" { return "" } if digest.EscalationAfterFailures > 0 && failedAttempt < digest.EscalationAfterFailures { return "" } if digest.EscalationChannelType == nil || digest.EscalationChannelConfigEncrypted == nil { return "digest escalation skipped: channel metadata unavailable" } recipients := parseRecipients(coalesce(digest.EscalationRecipientsJSON, "")) if len(recipients) == 0 { recipients = parseRecipients(digest.RouteRecipientsJSON) } if len(recipients) == 0 { return "digest escalation skipped: no recipients" } decrypted, err := crypto.Decrypt(*digest.EscalationChannelConfigEncrypted, d.cfg.AppSecretKey) if err != nil { return "digest escalation decrypt failed: " + err.Error() } cfg := map[string]interface{}{} if err := json.Unmarshal([]byte(decrypted), &cfg); err != nil { return "digest escalation config parse failed: " + err.Error() } escalationSubject := "[ESCALATED] " + subject escalationBody := body + "\n\nEscalation reason:\n" + rootErr if _, err := d.sendByChannelType(context.Background(), *digest.EscalationChannelType, cfg, recipients, escalationSubject, escalationBody); err != nil { return "digest escalation send failed: " + err.Error() } return "digest escalated via " + coalesce(digest.EscalationChannelName, "channel") } func renderDigestBody(digest database.AlertRouteDigestWithDetails) string { titles := parseStringList(digest.TitlesJSON) var b strings.Builder b.WriteString("CH-UI Alert Digest\n\n") b.WriteString("Event type: " + digest.EventType + "\n") b.WriteString("Severity: " + strings.ToUpper(digest.Severity) + "\n") b.WriteString("Events in window: " + strconv.Itoa(digest.EventCount) + "\n") b.WriteString("Window: " + digest.BucketStart + " -> " + digest.BucketEnd + "\n") b.WriteString("Channel: " + digest.ChannelName + " (" + digest.ChannelType + ")\n") if len(titles) > 0 { b.WriteString("\nTitles:\n") for i, title := range titles { b.WriteString(fmt.Sprintf("%d. %s\n", i+1, title)) if i >= 14 { remaining := len(titles) - (i + 1) if remaining > 0 { b.WriteString(fmt.Sprintf("... and %d more\n", remaining)) } break } } } return b.String() } func coalesce(v *string, fallback string) string { if v == nil || strings.TrimSpace(*v) == "" { return fallback } return *v } func stringCfg(cfg map[string]interface{}, key string) string { v := strings.TrimSpace(fmt.Sprintf("%v", cfg[key])) if v == "" { return "" } return v } func boolCfg(cfg map[string]interface{}, key string, defaultVal bool) bool { raw, ok := cfg[key] if !ok { return defaultVal } switch v := raw.(type) { case bool: return v case float64: return v != 0 case string: val := strings.ToLower(strings.TrimSpace(v)) return val == "1" || val == "true" || val == "yes" default: return defaultVal } } func intCfg(cfg map[string]interface{}, key string, defaultVal int) int { raw, ok := cfg[key] if !ok { return defaultVal } switch v := raw.(type) { case float64: return int(v) case int: return v case string: if n, err := strconv.Atoi(strings.TrimSpace(v)); err == nil { return n } } return defaultVal } func (d *Dispatcher) sendSMTP(ctx context.Context, cfg map[string]interface{}, recipients []string, subject, body string) (string, error) { host := stringCfg(cfg, "host") fromEmail := stringCfg(cfg, "from_email") username := stringCfg(cfg, "username") password := stringCfg(cfg, "password") fromName := stringCfg(cfg, "from_name") if host == "" || fromEmail == "" { return "", fmt.Errorf("smtp config requires host and from_email") } port := intCfg(cfg, "port", 587) addr := fmt.Sprintf("%s:%d", host, port) useTLS := boolCfg(cfg, "use_tls", false) startTLS := boolCfg(cfg, "starttls", !useTLS) insecureSkipVerify := boolCfg(cfg, "insecure_skip_verify", false) fromHeader := fromEmail if fromName != "" { fromHeader = fmt.Sprintf("%s <%s>", fromName, fromEmail) } msg := []byte("From: " + fromHeader + "\r\n" + "To: " + strings.Join(recipients, ",") + "\r\n" + "Subject: " + subject + "\r\n" + "MIME-Version: 1.0\r\n" + "Content-Type: text/plain; charset=UTF-8\r\n\r\n" + body) var auth smtp.Auth if username != "" { auth = smtp.PlainAuth("", username, password, host) } if useTLS { conn, err := tls.Dial("tcp", addr, &tls.Config{ ServerName: host, InsecureSkipVerify: insecureSkipVerify, }) if err != nil { return "", fmt.Errorf("smtp tls dial: %w", err) } defer conn.Close() client, err := smtp.NewClient(conn, host) if err != nil { return "", fmt.Errorf("smtp new client: %w", err) } defer client.Close() if auth != nil { if err := client.Auth(auth); err != nil { return "", fmt.Errorf("smtp auth: %w", err) } } if err := client.Mail(fromEmail); err != nil { return "", fmt.Errorf("smtp mail: %w", err) } for _, rcpt := range recipients { if err := client.Rcpt(rcpt); err != nil { return "", fmt.Errorf("smtp rcpt %s: %w", rcpt, err) } } w, err := client.Data() if err != nil { return "", fmt.Errorf("smtp data: %w", err) } if _, err := w.Write(msg); err != nil { _ = w.Close() return "", fmt.Errorf("smtp write: %w", err) } if err := w.Close(); err != nil { return "", fmt.Errorf("smtp close data: %w", err) } if err := client.Quit(); err != nil { return "", fmt.Errorf("smtp quit: %w", err) } return "smtp", nil } if startTLS { client, err := smtp.Dial(addr) if err != nil { return "", fmt.Errorf("smtp dial: %w", err) } defer client.Close() if ok, _ := client.Extension("STARTTLS"); ok { if err := client.StartTLS(&tls.Config{ ServerName: host, InsecureSkipVerify: insecureSkipVerify, }); err != nil { return "", fmt.Errorf("smtp starttls: %w", err) } } if auth != nil { if err := client.Auth(auth); err != nil { return "", fmt.Errorf("smtp auth: %w", err) } } if err := client.Mail(fromEmail); err != nil { return "", fmt.Errorf("smtp mail: %w", err) } for _, rcpt := range recipients { if err := client.Rcpt(rcpt); err != nil { return "", fmt.Errorf("smtp rcpt %s: %w", rcpt, err) } } w, err := client.Data() if err != nil { return "", fmt.Errorf("smtp data: %w", err) } if _, err := w.Write(msg); err != nil { _ = w.Close() return "", fmt.Errorf("smtp write: %w", err) } if err := w.Close(); err != nil { return "", fmt.Errorf("smtp close data: %w", err) } if err := client.Quit(); err != nil { return "", fmt.Errorf("smtp quit: %w", err) } return "smtp", nil } if err := smtp.SendMail(addr, auth, fromEmail, recipients, msg); err != nil { return "", fmt.Errorf("smtp sendmail: %w", err) } return "smtp", nil } func (d *Dispatcher) sendResend(ctx context.Context, cfg map[string]interface{}, recipients []string, subject, body string) (string, error) { apiKey := stringCfg(cfg, "api_key") fromEmail := stringCfg(cfg, "from_email") fromName := stringCfg(cfg, "from_name") baseURL := stringCfg(cfg, "base_url") if baseURL == "" { baseURL = "https://api.resend.com" } if apiKey == "" || fromEmail == "" { return "", fmt.Errorf("resend config requires api_key and from_email") } from := fromEmail if fromName != "" { from = fmt.Sprintf("%s <%s>", fromName, fromEmail) } payload := map[string]interface{}{ "from": from, "to": recipients, "subject": subject, "text": body, } raw, _ := json.Marshal(payload) req, err := http.NewRequestWithContext(ctx, http.MethodPost, strings.TrimRight(baseURL, "/")+"/emails", bytes.NewReader(raw)) if err != nil { return "", fmt.Errorf("resend request: %w", err) } req.Header.Set("Authorization", "Bearer "+apiKey) req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") resp, err := d.http.Do(req) if err != nil { return "", fmt.Errorf("resend send: %w", err) } defer resp.Body.Close() data, _ := io.ReadAll(resp.Body) if resp.StatusCode >= 300 { return "", fmt.Errorf("resend error (%d): %s", resp.StatusCode, strings.TrimSpace(string(data))) } var out struct { ID string `json:"id"` } _ = json.Unmarshal(data, &out) return out.ID, nil } func (d *Dispatcher) sendBrevo(ctx context.Context, cfg map[string]interface{}, recipients []string, subject, body string) (string, error) { apiKey := stringCfg(cfg, "api_key") fromEmail := stringCfg(cfg, "from_email") fromName := stringCfg(cfg, "from_name") baseURL := stringCfg(cfg, "base_url") if baseURL == "" { baseURL = "https://api.brevo.com" } if apiKey == "" || fromEmail == "" { return "", fmt.Errorf("brevo config requires api_key and from_email") } to := make([]map[string]string, 0, len(recipients)) for _, r := range recipients { to = append(to, map[string]string{"email": r}) } payload := map[string]interface{}{ "sender": map[string]string{ "name": fromName, "email": fromEmail, }, "to": to, "subject": subject, "textContent": body, } raw, _ := json.Marshal(payload) req, err := http.NewRequestWithContext(ctx, http.MethodPost, strings.TrimRight(baseURL, "/")+"/v3/smtp/email", bytes.NewReader(raw)) if err != nil { return "", fmt.Errorf("brevo request: %w", err) } req.Header.Set("api-key", apiKey) req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", "application/json") resp, err := d.http.Do(req) if err != nil { return "", fmt.Errorf("brevo send: %w", err) } defer resp.Body.Close() data, _ := io.ReadAll(resp.Body) if resp.StatusCode >= 300 { return "", fmt.Errorf("brevo error (%d): %s", resp.StatusCode, strings.TrimSpace(string(data))) } var out struct { MessageID string `json:"messageId"` } _ = json.Unmarshal(data, &out) return out.MessageID, nil } ================================================ FILE: internal/brain/provider.go ================================================ package brain import ( "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strings" ) // Message represents one chat message for provider calls. type Message struct { Role string `json:"role"` Content string `json:"content"` } // ProviderConfig defines provider runtime configuration. type ProviderConfig struct { Kind string BaseURL string APIKey string } // ChatResult holds optional metadata returned after streaming completes. type ChatResult struct { InputTokens int OutputTokens int ModelParameters map[string]interface{} } // Provider handles streaming chat and model discovery. type Provider interface { StreamChat(ctx context.Context, cfg ProviderConfig, model string, messages []Message, onDelta func(string) error) (*ChatResult, error) ListModels(ctx context.Context, cfg ProviderConfig) ([]string, error) } func NewProvider(kind string) (Provider, error) { switch strings.ToLower(strings.TrimSpace(kind)) { case "openai", "openai_compatible": return &openAIProvider{client: &http.Client{}}, nil case "ollama": return &ollamaProvider{client: &http.Client{}}, nil default: return nil, fmt.Errorf("unsupported provider kind: %s", kind) } } // -------- OpenAI provider -------- type openAIProvider struct { client *http.Client } type openAIRequest struct { Model string `json:"model"` Messages []Message `json:"messages"` Stream bool `json:"stream"` Temperature *float64 `json:"temperature,omitempty"` StreamOptions *openAIStreamOptions `json:"stream_options,omitempty"` } type openAIStreamOptions struct { IncludeUsage bool `json:"include_usage"` } type openAIChunk struct { Choices []struct { Delta struct { Content string `json:"content"` } `json:"delta"` } `json:"choices"` Usage *struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` } `json:"usage"` } const openAIDefaultTemperature = 0.1 func ensureOpenAIV1Base(rawBase string) string { trimmed := strings.TrimRight(strings.TrimSpace(rawBase), "/") if trimmed == "" { return "https://api.openai.com/v1" } parsed, err := url.Parse(trimmed) if err != nil { if strings.HasSuffix(trimmed, "/v1") { return trimmed } return trimmed + "/v1" } path := strings.TrimRight(strings.TrimSpace(parsed.Path), "/") if path == "/v1" || strings.HasSuffix(path, "/v1") { return strings.TrimRight(parsed.String(), "/") } if path == "" || path == "/" { parsed.Path = "/v1" } else { parsed.Path = path + "/v1" } return strings.TrimRight(parsed.String(), "/") } func shouldRetryOpenAIV1(status int, body []byte) bool { if status != http.StatusNotFound { return false } msg := strings.ToLower(string(body)) return strings.Contains(msg, "invalid url") || strings.Contains(msg, "/models") || strings.Contains(msg, "/chat/completions") } func (p *openAIProvider) baseURL(cfg ProviderConfig) string { raw := strings.TrimSpace(cfg.BaseURL) if raw == "" { return "https://api.openai.com/v1" } parsed, err := url.Parse(raw) if err != nil { return strings.TrimRight(raw, "/") } base := strings.TrimRight(parsed.String(), "/") path := strings.TrimSpace(parsed.Path) // For OpenAI-style APIs, a root URL should target /v1 endpoints. if path == "" || path == "/" { return strings.TrimRight(base, "/") + "/v1" } return base } // DefaultModelParameters returns the default provider request parameters for one model. func DefaultModelParameters(kind, model string) map[string]interface{} { switch strings.ToLower(strings.TrimSpace(kind)) { case "openai", "openai_compatible": return openAIModelParameters(openAIRequestTemperature(model)) default: return nil } } func openAIRequestTemperature(model string) *float64 { if openAIModelRequiresDefaultTemperature(model) { return nil } temperature := openAIDefaultTemperature return &temperature } func openAIModelRequiresDefaultTemperature(model string) bool { name := strings.ToLower(strings.TrimSpace(model)) if slash := strings.LastIndex(name, "/"); slash >= 0 { name = name[slash+1:] } return strings.HasPrefix(name, "o1") || strings.HasPrefix(name, "o3") || strings.HasPrefix(name, "o4") } func openAIModelParameters(temperature *float64) map[string]interface{} { params := map[string]interface{}{} if temperature != nil { params["temperature"] = *temperature } return params } func isUnsupportedOpenAITemperature(status int, body []byte) bool { if status != http.StatusBadRequest { return false } msg := strings.ToLower(string(body)) return strings.Contains(msg, "temperature") && (strings.Contains(msg, "unsupported value") || strings.Contains(msg, "does not support") || strings.Contains(msg, "only the default")) } func (p *openAIProvider) StreamChat(ctx context.Context, cfg ProviderConfig, model string, messages []Message, onDelta func(string) error) (*ChatResult, error) { if strings.TrimSpace(cfg.APIKey) == "" { return nil, errors.New("provider API key is not configured") } temperatures := []*float64{openAIRequestTemperature(model)} if temperatures[0] != nil { temperatures = append(temperatures, nil) } var lastStatus int var lastErrBody []byte for attemptIdx, temperature := range temperatures { result, status, errBody, err := p.streamChatAttempt(ctx, cfg, model, messages, temperature, onDelta) if err != nil { return nil, err } if status == 0 { return result, nil } lastStatus = status lastErrBody = errBody if attemptIdx < len(temperatures)-1 && isUnsupportedOpenAITemperature(status, errBody) { continue } return nil, fmt.Errorf("provider error (%d): %s", status, string(errBody)) } if lastStatus != 0 { return nil, fmt.Errorf("provider error (%d): %s", lastStatus, string(lastErrBody)) } return nil, errors.New("provider request failed") } func (p *openAIProvider) streamChatAttempt(ctx context.Context, cfg ProviderConfig, model string, messages []Message, temperature *float64, onDelta func(string) error) (*ChatResult, int, []byte, error) { payload := openAIRequest{ Model: model, Messages: messages, Stream: true, Temperature: temperature, StreamOptions: &openAIStreamOptions{IncludeUsage: true}, } body, err := json.Marshal(payload) if err != nil { return nil, 0, nil, fmt.Errorf("marshal provider request: %w", err) } primaryBase := p.baseURL(cfg) bases := []string{primaryBase} v1Fallback := ensureOpenAIV1Base(primaryBase) if v1Fallback != primaryBase { bases = append(bases, v1Fallback) } var lastStatus int var lastErrBody []byte for idx, base := range bases { endpoint := base + "/chat/completions" req, reqErr := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) if reqErr != nil { return nil, 0, nil, fmt.Errorf("create provider request: %w", reqErr) } req.Header.Set("Content-Type", "application/json") req.Header.Set("Authorization", "Bearer "+cfg.APIKey) resp, doErr := p.client.Do(req) if doErr != nil { return nil, 0, nil, fmt.Errorf("provider request failed: %w", doErr) } if resp.StatusCode < 200 || resp.StatusCode >= 300 { errBody, _ := io.ReadAll(resp.Body) resp.Body.Close() lastStatus = resp.StatusCode lastErrBody = errBody if idx < len(bases)-1 && shouldRetryOpenAIV1(resp.StatusCode, errBody) { continue } return nil, resp.StatusCode, errBody, nil } result := ChatResult{ModelParameters: openAIModelParameters(temperature)} scanner := bufio.NewScanner(resp.Body) for scanner.Scan() { line := scanner.Text() if !strings.HasPrefix(line, "data: ") { continue } data := strings.TrimPrefix(line, "data: ") if data == "[DONE]" { resp.Body.Close() return &result, 0, nil, nil } var chunk openAIChunk if err := json.Unmarshal([]byte(data), &chunk); err != nil { continue } if chunk.Usage != nil { result.InputTokens = chunk.Usage.PromptTokens result.OutputTokens = chunk.Usage.CompletionTokens } for _, c := range chunk.Choices { if c.Delta.Content == "" { continue } if err := onDelta(c.Delta.Content); err != nil { resp.Body.Close() return nil, 0, nil, err } } } if err := scanner.Err(); err != nil { resp.Body.Close() return nil, 0, nil, fmt.Errorf("read provider stream: %w", err) } resp.Body.Close() return &result, 0, nil, nil } if lastStatus != 0 { return nil, lastStatus, lastErrBody, nil } return nil, 0, nil, errors.New("provider request failed") } func (p *openAIProvider) ListModels(ctx context.Context, cfg ProviderConfig) ([]string, error) { if strings.TrimSpace(cfg.APIKey) == "" { return nil, errors.New("provider API key is not configured") } primaryBase := p.baseURL(cfg) bases := []string{primaryBase} v1Fallback := ensureOpenAIV1Base(primaryBase) if v1Fallback != primaryBase { bases = append(bases, v1Fallback) } var lastStatus int var lastErrBody []byte for idx, base := range bases { endpoint := base + "/models" req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) if err != nil { return nil, fmt.Errorf("create provider request: %w", err) } req.Header.Set("Authorization", "Bearer "+cfg.APIKey) resp, err := p.client.Do(req) if err != nil { return nil, fmt.Errorf("provider request failed: %w", err) } if resp.StatusCode < 200 || resp.StatusCode >= 300 { errBody, _ := io.ReadAll(resp.Body) resp.Body.Close() lastStatus = resp.StatusCode lastErrBody = errBody if idx < len(bases)-1 && shouldRetryOpenAIV1(resp.StatusCode, errBody) { continue } return nil, fmt.Errorf("provider error (%d): %s", resp.StatusCode, string(errBody)) } var parsed struct { Data []struct { ID string `json:"id"` } `json:"data"` } if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { resp.Body.Close() return nil, fmt.Errorf("decode models response: %w", err) } resp.Body.Close() models := make([]string, 0, len(parsed.Data)) for _, item := range parsed.Data { if strings.TrimSpace(item.ID) == "" { continue } models = append(models, item.ID) } return models, nil } if lastStatus != 0 { return nil, fmt.Errorf("provider error (%d): %s", lastStatus, string(lastErrBody)) } return nil, errors.New("provider request failed") } // -------- Ollama provider -------- type ollamaProvider struct { client *http.Client } func (p *ollamaProvider) baseURL(cfg ProviderConfig) string { if strings.TrimSpace(cfg.BaseURL) != "" { return strings.TrimRight(cfg.BaseURL, "/") } return "http://localhost:11434" } func (p *ollamaProvider) StreamChat(ctx context.Context, cfg ProviderConfig, model string, messages []Message, onDelta func(string) error) (*ChatResult, error) { payload := map[string]interface{}{ "model": model, "stream": true, "messages": messages, } body, err := json.Marshal(payload) if err != nil { return nil, fmt.Errorf("marshal provider request: %w", err) } url := p.baseURL(cfg) + "/api/chat" req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) if err != nil { return nil, fmt.Errorf("create provider request: %w", err) } req.Header.Set("Content-Type", "application/json") resp, err := p.client.Do(req) if err != nil { return nil, fmt.Errorf("provider request failed: %w", err) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 300 { errBody, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("provider error (%d): %s", resp.StatusCode, string(errBody)) } var result ChatResult scanner := bufio.NewScanner(resp.Body) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if line == "" { continue } var chunk struct { Done bool `json:"done"` Message struct { Content string `json:"content"` } `json:"message"` Error string `json:"error"` PromptEvalCount int `json:"prompt_eval_count"` EvalCount int `json:"eval_count"` } if err := json.Unmarshal([]byte(line), &chunk); err != nil { continue } if chunk.Error != "" { return nil, errors.New(chunk.Error) } if chunk.Message.Content != "" { if err := onDelta(chunk.Message.Content); err != nil { return nil, err } } if chunk.Done { result.InputTokens = chunk.PromptEvalCount result.OutputTokens = chunk.EvalCount return &result, nil } } if err := scanner.Err(); err != nil { return nil, fmt.Errorf("read provider stream: %w", err) } return &result, nil } func (p *ollamaProvider) ListModels(ctx context.Context, cfg ProviderConfig) ([]string, error) { url := p.baseURL(cfg) + "/api/tags" req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, fmt.Errorf("create provider request: %w", err) } resp, err := p.client.Do(req) if err != nil { return nil, fmt.Errorf("provider request failed: %w", err) } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 300 { errBody, _ := io.ReadAll(resp.Body) return nil, fmt.Errorf("provider error (%d): %s", resp.StatusCode, string(errBody)) } var parsed struct { Models []struct { Name string `json:"name"` } `json:"models"` } if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { return nil, fmt.Errorf("decode models response: %w", err) } models := make([]string, 0, len(parsed.Models)) for _, item := range parsed.Models { if strings.TrimSpace(item.Name) == "" { continue } models = append(models, item.Name) } return models, nil } ================================================ FILE: internal/brain/provider_test.go ================================================ package brain import ( "context" "encoding/json" "fmt" "net/http" "net/http/httptest" "strings" "sync" "testing" ) func TestOpenAIProviderStreamChatOmitsTemperatureForReasoningModels(t *testing.T) { t.Parallel() var attempts int server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/v1/chat/completions" { t.Fatalf("unexpected path: %s", r.URL.Path) } attempts++ var payload map[string]interface{} if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { t.Fatalf("decode request: %v", err) } if _, ok := payload["temperature"]; ok { t.Fatalf("temperature should be omitted for o4-mini payloads") } writeOpenAIStreamResponse(w, "hello", 3, 1) })) defer server.Close() provider := &openAIProvider{client: server.Client()} var built strings.Builder result, err := provider.StreamChat( context.Background(), ProviderConfig{Kind: "openai", BaseURL: server.URL, APIKey: "test-key"}, "o4-mini", []Message{{Role: "user", Content: "help"}}, func(delta string) error { built.WriteString(delta) return nil }, ) if err != nil { t.Fatalf("StreamChat returned error: %v", err) } if built.String() != "hello" { t.Fatalf("unexpected streamed content: %q", built.String()) } if attempts != 1 { t.Fatalf("expected 1 attempt, got %d", attempts) } if result == nil { t.Fatalf("expected result") } if result.InputTokens != 3 || result.OutputTokens != 1 { t.Fatalf("unexpected usage: %+v", result) } if len(result.ModelParameters) != 0 { t.Fatalf("expected no model parameters, got %+v", result.ModelParameters) } } func TestOpenAIProviderStreamChatRetriesWithoutTemperatureOnUnsupportedValue(t *testing.T) { t.Parallel() var ( mu sync.Mutex attempts []bool ) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/v1/chat/completions" { t.Fatalf("unexpected path: %s", r.URL.Path) } var payload map[string]interface{} if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { t.Fatalf("decode request: %v", err) } _, hasTemperature := payload["temperature"] mu.Lock() attempts = append(attempts, hasTemperature) mu.Unlock() if hasTemperature { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusBadRequest) _, _ = fmt.Fprint(w, `{"error":{"message":"Unsupported value: 'temperature' does not support 0.1 with this model. Only the default (1) value is supported.","type":"invalid_request_error","param":"temperature","code":"unsupported_value"}}`) return } writeOpenAIStreamResponse(w, "fixed", 5, 2) })) defer server.Close() provider := &openAIProvider{client: server.Client()} var built strings.Builder result, err := provider.StreamChat( context.Background(), ProviderConfig{Kind: "openai", BaseURL: server.URL, APIKey: "test-key"}, "gpt-4o", []Message{{Role: "user", Content: "help"}}, func(delta string) error { built.WriteString(delta) return nil }, ) if err != nil { t.Fatalf("StreamChat returned error: %v", err) } if built.String() != "fixed" { t.Fatalf("unexpected streamed content: %q", built.String()) } mu.Lock() gotAttempts := append([]bool(nil), attempts...) mu.Unlock() if len(gotAttempts) != 2 { t.Fatalf("expected 2 attempts, got %d", len(gotAttempts)) } if !gotAttempts[0] || gotAttempts[1] { t.Fatalf("expected retry sequence [true false], got %v", gotAttempts) } if result == nil { t.Fatalf("expected result") } if result.InputTokens != 5 || result.OutputTokens != 2 { t.Fatalf("unexpected usage: %+v", result) } if len(result.ModelParameters) != 0 { t.Fatalf("expected no model parameters after retry, got %+v", result.ModelParameters) } } func writeOpenAIStreamResponse(w http.ResponseWriter, content string, inputTokens, outputTokens int) { w.Header().Set("Content-Type", "text/event-stream") _, _ = fmt.Fprintf(w, "data: {\"choices\":[{\"delta\":{\"content\":%q}}]}\n\n", content) _, _ = fmt.Fprintf(w, "data: {\"usage\":{\"prompt_tokens\":%d,\"completion_tokens\":%d}}\n\n", inputTokens, outputTokens) _, _ = fmt.Fprint(w, "data: [DONE]\n\n") } ================================================ FILE: internal/config/config.go ================================================ package config import ( "log/slog" "os" "path/filepath" "runtime" "strconv" "strings" "github.com/caioricciuti/ch-ui/internal/license" "gopkg.in/yaml.v3" ) type Config struct { // Server Port int DevMode bool AppURL string // Database DatabasePath string // Security AppSecretKey string SessionMaxAge int // seconds, default 7 days AllowedOrigins []string // Tunnel TunnelURL string // Embedded agent ClickHouseURL string // default http://localhost:8123 ConnectionName string // default Local ClickHouse // License LicenseJSON string // Stored signed license JSON (loaded from DB at startup) } // serverConfigFile is the YAML structure for the server config file. type serverConfigFile struct { Port int `yaml:"port"` AppURL string `yaml:"app_url"` DatabasePath string `yaml:"database_path"` ClickHouseURL string `yaml:"clickhouse_url"` ConnectionName string `yaml:"connection_name"` AppSecretKey string `yaml:"app_secret_key"` AllowedOrigins []string `yaml:"allowed_origins"` TunnelURL string `yaml:"tunnel_url"` } // DefaultServerConfigPath returns the platform-specific default config path. func DefaultServerConfigPath() string { switch runtime.GOOS { case "darwin": home, _ := os.UserHomeDir() return filepath.Join(home, ".config", "ch-ui", "server.yaml") default: return "/etc/ch-ui/server.yaml" } } // Load creates a Config by merging: config file -> env vars -> defaults. // Priority: env vars > config file > defaults. func Load(configPath string) *Config { cfg := &Config{ Port: 3488, DatabasePath: "./data/ch-ui.db", AppSecretKey: DefaultAppSecretKey, SessionMaxAge: 7 * 24 * 60 * 60, ClickHouseURL: "http://localhost:8123", ConnectionName: "Local ClickHouse", } // 1. Load from config file (overrides defaults) if configPath != "" { if err := loadServerConfigFile(configPath, cfg); err != nil { if !os.IsNotExist(err) { slog.Warn("Failed to load config file", "path", configPath, "error", err) } else { slog.Warn("Config file not found", "path", configPath) } } else { slog.Info("Loaded config file", "path", configPath) } } else { // Try default path, silently ignore if not found defaultPath := DefaultServerConfigPath() if err := loadServerConfigFile(defaultPath, cfg); err == nil { slog.Info("Loaded config file", "path", defaultPath) } } // 2. Override with environment variables (highest priority) if v := os.Getenv("PORT"); v != "" { if p, err := strconv.Atoi(v); err == nil { cfg.Port = p } } if v := os.Getenv("APP_URL"); v != "" { cfg.AppURL = trimQuotes(v) } if v := os.Getenv("DATABASE_PATH"); v != "" { cfg.DatabasePath = v } if v := os.Getenv("CLICKHOUSE_URL"); v != "" { cfg.ClickHouseURL = v } if v := os.Getenv("CONNECTION_NAME"); v != "" { cfg.ConnectionName = trimQuotes(v) } // Backward-compatible typo alias if v := os.Getenv("CONNECITION_NAME"); v != "" { cfg.ConnectionName = trimQuotes(v) } if v := os.Getenv("APP_SECRET_KEY"); v != "" { cfg.AppSecretKey = trimQuotes(v) } if v := os.Getenv("ALLOWED_ORIGINS"); v != "" { cfg.AllowedOrigins = nil for _, o := range strings.Split(v, ",") { if trimmed := strings.TrimSpace(o); trimmed != "" { cfg.AllowedOrigins = append(cfg.AllowedOrigins, trimmed) } } } if v := os.Getenv("TUNNEL_URL"); v != "" { cfg.TunnelURL = v } // Derive defaults for computed fields if cfg.AppURL == "" { cfg.AppURL = "http://localhost:" + strconv.Itoa(cfg.Port) } if cfg.TunnelURL == "" { cfg.TunnelURL = "ws://127.0.0.1:" + strconv.Itoa(cfg.Port) + "/connect" } cfg.DevMode = os.Getenv("NODE_ENV") != "production" return cfg } func loadServerConfigFile(path string, cfg *Config) error { data, err := os.ReadFile(path) if err != nil { return err } var fc serverConfigFile if err := yaml.Unmarshal(data, &fc); err != nil { return err } if fc.Port != 0 { cfg.Port = fc.Port } if fc.AppURL != "" { cfg.AppURL = fc.AppURL } if fc.DatabasePath != "" { cfg.DatabasePath = fc.DatabasePath } if fc.ClickHouseURL != "" { cfg.ClickHouseURL = fc.ClickHouseURL } if fc.ConnectionName != "" { cfg.ConnectionName = fc.ConnectionName } if fc.AppSecretKey != "" { cfg.AppSecretKey = fc.AppSecretKey } if len(fc.AllowedOrigins) > 0 { cfg.AllowedOrigins = fc.AllowedOrigins } if fc.TunnelURL != "" { cfg.TunnelURL = fc.TunnelURL } return nil } // GenerateServerTemplate returns a YAML config template for the server. func GenerateServerTemplate() string { return `# CH-UI Server Configuration # # Place this file at: # macOS: ~/.config/ch-ui/server.yaml # Linux: /etc/ch-ui/server.yaml # # All settings can also be set via environment variables. # Priority: env vars > config file > defaults # HTTP port (default: 3488) port: 3488 # Public URL of the server # app_url: https://ch-ui.yourcompany.com # SQLite database path (default: ./data/ch-ui.db) # database_path: /var/lib/ch-ui/ch-ui.db # ClickHouse HTTP endpoint (default: http://localhost:8123) # clickhouse_url: http://localhost:8123 # Embedded connection display name (default: Local ClickHouse) # connection_name: Local ClickHouse # Secret key for session encryption (CHANGE THIS in production) # app_secret_key: your-random-secret-here # Allowed CORS origins # allowed_origins: # - https://ch-ui.yourcompany.com ` } func (c *Config) IsProduction() bool { return !c.DevMode } func (c *Config) IsPro() bool { info := license.ValidateLicense(c.LicenseJSON) return info.Valid && strings.EqualFold(strings.TrimSpace(info.Edition), "pro") } func trimQuotes(s string) string { s = strings.TrimSpace(s) if len(s) >= 2 { if (s[0] == '\'' && s[len(s)-1] == '\'') || (s[0] == '"' && s[len(s)-1] == '"') { return s[1 : len(s)-1] } } return s } ================================================ FILE: internal/config/secret.go ================================================ package config import ( "crypto/rand" "encoding/base64" "fmt" "os" "path/filepath" "strings" ) const ( // DefaultAppSecretKey exists for backward compatibility only. // New installs should persist a random key when this placeholder is detected. DefaultAppSecretKey = "ch-ui-default-secret-key-change-in-production" ) type SecretKeySource string const ( SecretKeySourceConfigured SecretKeySource = "configured" SecretKeySourceFile SecretKeySource = "file" SecretKeySourceGenerated SecretKeySource = "generated" ) // AppSecretKeyPath returns the default persisted key path based on the database path. func AppSecretKeyPath(databasePath string) string { dbPath := strings.TrimSpace(databasePath) if dbPath == "" { dbPath = "./data/ch-ui.db" } return filepath.Join(filepath.Dir(dbPath), ".app_secret_key") } // EnsureAppSecretKey guarantees a non-default secret key. // If the configured key is default/empty, it loads from the persisted key file, // or generates and stores a new key. func EnsureAppSecretKey(cfg *Config) (SecretKeySource, error) { if cfg == nil { return SecretKeySourceConfigured, fmt.Errorf("nil config") } current := strings.TrimSpace(cfg.AppSecretKey) if current != "" && current != DefaultAppSecretKey { return SecretKeySourceConfigured, nil } secretPath := AppSecretKeyPath(cfg.DatabasePath) if data, err := os.ReadFile(secretPath); err == nil { loaded := strings.TrimSpace(string(data)) if loaded == "" { return SecretKeySourceFile, fmt.Errorf("empty app secret key file: %s", secretPath) } cfg.AppSecretKey = loaded return SecretKeySourceFile, nil } else if !os.IsNotExist(err) { return SecretKeySourceFile, fmt.Errorf("read app secret key file: %w", err) } secret, err := generateRandomSecret(48) if err != nil { return SecretKeySourceGenerated, err } if err := os.MkdirAll(filepath.Dir(secretPath), 0700); err != nil { return SecretKeySourceGenerated, fmt.Errorf("create secret key directory: %w", err) } if err := os.WriteFile(secretPath, []byte(secret+"\n"), 0600); err != nil { return SecretKeySourceGenerated, fmt.Errorf("write app secret key file: %w", err) } cfg.AppSecretKey = secret return SecretKeySourceGenerated, nil } func generateRandomSecret(size int) (string, error) { if size <= 0 { size = 48 } buf := make([]byte, size) if _, err := rand.Read(buf); err != nil { return "", fmt.Errorf("generate random app secret key: %w", err) } return base64.RawStdEncoding.EncodeToString(buf), nil } ================================================ FILE: internal/config/secret_test.go ================================================ package config import ( "path/filepath" "testing" ) func TestEnsureAppSecretKeyConfigured(t *testing.T) { cfg := &Config{ DatabasePath: "./data/ch-ui.db", AppSecretKey: "already-configured-secret", } source, err := EnsureAppSecretKey(cfg) if err != nil { t.Fatalf("EnsureAppSecretKey returned error: %v", err) } if source != SecretKeySourceConfigured { t.Fatalf("unexpected source: got %s want %s", source, SecretKeySourceConfigured) } if cfg.AppSecretKey != "already-configured-secret" { t.Fatalf("configured secret should be preserved") } } func TestEnsureAppSecretKeyGenerateAndReload(t *testing.T) { tmp := t.TempDir() dbPath := filepath.Join(tmp, "data", "ch-ui.db") cfg := &Config{ DatabasePath: dbPath, AppSecretKey: DefaultAppSecretKey, } source, err := EnsureAppSecretKey(cfg) if err != nil { t.Fatalf("EnsureAppSecretKey returned error: %v", err) } if source != SecretKeySourceGenerated { t.Fatalf("unexpected source on first run: got %s want %s", source, SecretKeySourceGenerated) } if cfg.AppSecretKey == "" || cfg.AppSecretKey == DefaultAppSecretKey { t.Fatalf("generated secret should be non-empty and non-default") } first := cfg.AppSecretKey cfgReload := &Config{ DatabasePath: dbPath, AppSecretKey: DefaultAppSecretKey, } source, err = EnsureAppSecretKey(cfgReload) if err != nil { t.Fatalf("EnsureAppSecretKey reload returned error: %v", err) } if source != SecretKeySourceFile { t.Fatalf("unexpected source on reload: got %s want %s", source, SecretKeySourceFile) } if cfgReload.AppSecretKey != first { t.Fatalf("reloaded secret mismatch") } } ================================================ FILE: internal/crypto/aes.go ================================================ package crypto import ( "crypto/aes" "crypto/cipher" "crypto/rand" "encoding/hex" "fmt" "strings" "golang.org/x/crypto/scrypt" ) // deriveKey derives a 32-byte AES key from the secret using scrypt. // Parameters match Node.js crypto.scryptSync(secret, 'salt', 32) defaults: N=16384, r=8, p=1. // // SECURITY NOTE: The static salt weakens key derivation — all installations using // the same AppSecretKey will produce the same derived key. A future version should // use a per-installation salt (stored alongside the DB) with a data migration path. func deriveKey(secret string) ([]byte, error) { return scrypt.Key([]byte(secret), []byte("salt"), 16384, 8, 1, 32) } // Encrypt encrypts plaintext using AES-256-GCM with a 16-byte nonce. // Returns format: iv:authTag:encrypted (hex-encoded). func Encrypt(plaintext, secret string) (string, error) { key, err := deriveKey(secret) if err != nil { return "", fmt.Errorf("key derivation failed: %w", err) } block, err := aes.NewCipher(key) if err != nil { return "", fmt.Errorf("cipher creation failed: %w", err) } // Use 16-byte nonce to match Node.js createCipheriv('aes-256-gcm', key, iv) with 16-byte IV gcm, err := cipher.NewGCMWithNonceSize(block, 16) if err != nil { return "", fmt.Errorf("GCM creation failed: %w", err) } iv := make([]byte, 16) if _, err := rand.Read(iv); err != nil { return "", fmt.Errorf("random IV generation failed: %w", err) } // Seal appends ciphertext + tag sealed := gcm.Seal(nil, iv, []byte(plaintext), nil) // Split sealed into encrypted data and auth tag (last 16 bytes) tagSize := gcm.Overhead() encrypted := sealed[:len(sealed)-tagSize] authTag := sealed[len(sealed)-tagSize:] return fmt.Sprintf("%s:%s:%s", hex.EncodeToString(iv), hex.EncodeToString(authTag), hex.EncodeToString(encrypted), ), nil } // Decrypt decrypts a string in format iv:authTag:encrypted using AES-256-GCM. func Decrypt(encryptedStr, secret string) (string, error) { parts := strings.Split(encryptedStr, ":") if len(parts) != 3 { return "", fmt.Errorf("invalid encrypted format: expected 3 parts, got %d", len(parts)) } iv, err := hex.DecodeString(parts[0]) if err != nil { return "", fmt.Errorf("invalid IV hex: %w", err) } authTag, err := hex.DecodeString(parts[1]) if err != nil { return "", fmt.Errorf("invalid auth tag hex: %w", err) } encrypted, err := hex.DecodeString(parts[2]) if err != nil { return "", fmt.Errorf("invalid encrypted data hex: %w", err) } key, err := deriveKey(secret) if err != nil { return "", fmt.Errorf("key derivation failed: %w", err) } block, err := aes.NewCipher(key) if err != nil { return "", fmt.Errorf("cipher creation failed: %w", err) } gcm, err := cipher.NewGCMWithNonceSize(block, 16) if err != nil { return "", fmt.Errorf("GCM creation failed: %w", err) } // Go expects ciphertext + tag concatenated ciphertext := append(encrypted, authTag...) plaintext, err := gcm.Open(nil, iv, ciphertext, nil) if err != nil { return "", fmt.Errorf("decryption failed: %w", err) } return string(plaintext), nil } // IsEncrypted checks if a string appears to be in the encrypted format (iv:authTag:encrypted). func IsEncrypted(value string) bool { parts := strings.Split(value, ":") return len(parts) == 3 && len(parts[0]) == 32 && // IV: 16 bytes = 32 hex chars len(parts[1]) == 32 && // Auth tag: 16 bytes = 32 hex chars len(parts[2]) > 0 // Encrypted data exists } ================================================ FILE: internal/database/alert_digests.go ================================================ package database import ( "database/sql" "encoding/json" "fmt" "strings" "time" "github.com/google/uuid" ) type AlertRouteDigestWithDetails struct { ID string `json:"id"` RouteID string `json:"route_id"` RuleID string `json:"rule_id"` ChannelID string `json:"channel_id"` BucketStart string `json:"bucket_start"` BucketEnd string `json:"bucket_end"` EventType string `json:"event_type"` Severity string `json:"severity"` EventCount int `json:"event_count"` EventIDsJSON string `json:"event_ids_json"` TitlesJSON string `json:"titles_json"` Status string `json:"status"` AttemptCount int `json:"attempt_count"` MaxAttempts int `json:"max_attempts"` NextAttemptAt string `json:"next_attempt_at"` LastError *string `json:"last_error"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` SentAt *string `json:"sent_at"` RouteRecipientsJSON string `json:"route_recipients_json"` EscalationChannelID *string `json:"escalation_channel_id"` EscalationRecipientsJSON *string `json:"escalation_recipients_json"` EscalationAfterFailures int `json:"escalation_after_failures"` ChannelName string `json:"channel_name"` ChannelType string `json:"channel_type"` ChannelConfigEncrypted string `json:"channel_config_encrypted"` EscalationChannelName *string `json:"escalation_channel_name"` EscalationChannelType *string `json:"escalation_channel_type"` EscalationChannelConfigEncrypted *string `json:"escalation_channel_config_encrypted"` } func (db *DB) UpsertAlertRouteDigest(rule AlertRule, route AlertRuleRouteView, event AlertEvent, now time.Time) error { windowMins := route.DigestWindowMinutes if windowMins <= 0 { windowMins = 15 } window := time.Duration(windowMins) * time.Minute bucketStart := now.UTC().Truncate(window) bucketEnd := bucketStart.Add(window) tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin digest upsert: %w", err) } defer tx.Rollback() var ( digestID string eventCount int eventIDsJSON string titlesJSON string ) err = tx.QueryRow( `SELECT id, event_count, event_ids_json, titles_json FROM alert_route_digests WHERE route_id = ? AND bucket_start = ? AND event_type = ? AND severity = ?`, route.ID, bucketStart.Format(time.RFC3339), event.EventType, event.Severity, ).Scan(&digestID, &eventCount, &eventIDsJSON, &titlesJSON) switch err { case nil: ids := parseDigestStringArray(eventIDsJSON) if len(ids) < 200 { ids = append(ids, event.ID) } titles := parseDigestStringArray(titlesJSON) if len(titles) < 30 { titles = append(titles, strings.TrimSpace(event.Title)) } idsPayload, _ := json.Marshal(ids) titlesPayload, _ := json.Marshal(titles) if _, err := tx.Exec( `UPDATE alert_route_digests SET event_count = ?, event_ids_json = ?, titles_json = ?, updated_at = ? WHERE id = ?`, eventCount+1, string(idsPayload), string(titlesPayload), now.UTC().Format(time.RFC3339), digestID, ); err != nil { return fmt.Errorf("update digest batch: %w", err) } case sql.ErrNoRows: digestID = uuid.NewString() maxAttempts := rule.MaxAttempts if maxAttempts <= 0 { maxAttempts = 5 } idsPayload, _ := json.Marshal([]string{event.ID}) titlesPayload, _ := json.Marshal([]string{strings.TrimSpace(event.Title)}) if _, err := tx.Exec( `INSERT INTO alert_route_digests (id, route_id, rule_id, channel_id, bucket_start, bucket_end, event_type, severity, event_count, event_ids_json, titles_json, status, attempt_count, max_attempts, next_attempt_at, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, 1, ?, ?, 'collecting', 0, ?, ?, ?, ?)`, digestID, route.ID, rule.ID, route.ChannelID, bucketStart.Format(time.RFC3339), bucketEnd.Format(time.RFC3339), event.EventType, event.Severity, string(idsPayload), string(titlesPayload), maxAttempts, bucketEnd.Format(time.RFC3339), now.UTC().Format(time.RFC3339), now.UTC().Format(time.RFC3339), ); err != nil { return fmt.Errorf("insert digest batch: %w", err) } default: return fmt.Errorf("load digest batch: %w", err) } if err := tx.Commit(); err != nil { return fmt.Errorf("commit digest upsert: %w", err) } return nil } func (db *DB) ListDueAlertRouteDigests(limit int) ([]AlertRouteDigestWithDetails, error) { if limit <= 0 { limit = 20 } now := time.Now().UTC().Format(time.RFC3339) rows, err := db.conn.Query( `SELECT d.id, d.route_id, d.rule_id, d.channel_id, d.bucket_start, d.bucket_end, d.event_type, d.severity, d.event_count, d.event_ids_json, d.titles_json, d.status, d.attempt_count, d.max_attempts, d.next_attempt_at, d.last_error, d.created_at, d.updated_at, d.sent_at, rr.recipients_json, rp.escalation_channel_id, rp.escalation_recipients_json, COALESCE(rp.escalation_after_failures, 0), c.name, c.channel_type, c.config_encrypted, ec.name, ec.channel_type, ec.config_encrypted FROM alert_route_digests d JOIN alert_rule_routes rr ON rr.id = d.route_id LEFT JOIN alert_route_policies rp ON rp.route_id = rr.id JOIN alert_channels c ON c.id = d.channel_id LEFT JOIN alert_channels ec ON ec.id = rp.escalation_channel_id WHERE d.status IN ('collecting', 'retrying') AND d.bucket_end <= ? AND d.next_attempt_at <= ? AND d.attempt_count < d.max_attempts ORDER BY d.bucket_end ASC LIMIT ?`, now, now, limit, ) if err != nil { return nil, fmt.Errorf("list due alert route digests: %w", err) } defer rows.Close() out := make([]AlertRouteDigestWithDetails, 0) for rows.Next() { var item AlertRouteDigestWithDetails var lastError, sentAt sql.NullString var escalationChannelID, escalationRecipientsJSON, escalationChannelName, escalationChannelType, escalationChannelConfig sql.NullString if err := rows.Scan( &item.ID, &item.RouteID, &item.RuleID, &item.ChannelID, &item.BucketStart, &item.BucketEnd, &item.EventType, &item.Severity, &item.EventCount, &item.EventIDsJSON, &item.TitlesJSON, &item.Status, &item.AttemptCount, &item.MaxAttempts, &item.NextAttemptAt, &lastError, &item.CreatedAt, &item.UpdatedAt, &sentAt, &item.RouteRecipientsJSON, &escalationChannelID, &escalationRecipientsJSON, &item.EscalationAfterFailures, &item.ChannelName, &item.ChannelType, &item.ChannelConfigEncrypted, &escalationChannelName, &escalationChannelType, &escalationChannelConfig, ); err != nil { return nil, fmt.Errorf("scan due alert route digest: %w", err) } item.LastError = nullStringToPtr(lastError) item.SentAt = nullStringToPtr(sentAt) item.EscalationChannelID = nullStringToPtr(escalationChannelID) item.EscalationRecipientsJSON = nullStringToPtr(escalationRecipientsJSON) item.EscalationChannelName = nullStringToPtr(escalationChannelName) item.EscalationChannelType = nullStringToPtr(escalationChannelType) item.EscalationChannelConfigEncrypted = nullStringToPtr(escalationChannelConfig) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate due alert route digests: %w", err) } return out, nil } func (db *DB) MarkAlertRouteDigestSending(id string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_route_digests SET status = 'sending', attempt_count = attempt_count + 1, updated_at = ? WHERE id = ?`, now, id, ); err != nil { return fmt.Errorf("mark digest sending: %w", err) } return nil } func (db *DB) MarkAlertRouteDigestSent(id string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_route_digests SET status = 'sent', sent_at = ?, updated_at = ? WHERE id = ?`, now, now, id, ); err != nil { return fmt.Errorf("mark digest sent: %w", err) } return nil } func (db *DB) MarkAlertRouteDigestRetry(id string, nextAttemptAt time.Time, lastError string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_route_digests SET status = 'retrying', next_attempt_at = ?, last_error = ?, updated_at = ? WHERE id = ?`, nextAttemptAt.UTC().Format(time.RFC3339), nullableString(lastError), now, id, ); err != nil { return fmt.Errorf("mark digest retry: %w", err) } return nil } func (db *DB) MarkAlertRouteDigestFailed(id string, lastError string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_route_digests SET status = 'failed', last_error = ?, updated_at = ? WHERE id = ?`, nullableString(lastError), now, id, ); err != nil { return fmt.Errorf("mark digest failed: %w", err) } return nil } func parseDigestStringArray(raw string) []string { if strings.TrimSpace(raw) == "" { return []string{} } var values []string if err := json.Unmarshal([]byte(raw), &values); err != nil { return []string{} } out := make([]string, 0, len(values)) for _, v := range values { v = strings.TrimSpace(v) if v != "" { out = append(out, v) } } return out } ================================================ FILE: internal/database/alerts.go ================================================ package database import ( "database/sql" "encoding/json" "fmt" "strings" "time" "github.com/google/uuid" ) type AlertChannel struct { ID string `json:"id"` Name string `json:"name"` ChannelType string `json:"channel_type"` ConfigEncrypted string `json:"-"` IsActive bool `json:"is_active"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type AlertRule struct { ID string `json:"id"` Name string `json:"name"` EventType string `json:"event_type"` SeverityMin string `json:"severity_min"` Enabled bool `json:"enabled"` CooldownSeconds int `json:"cooldown_seconds"` MaxAttempts int `json:"max_attempts"` SubjectTemplate *string `json:"subject_template"` BodyTemplate *string `json:"body_template"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type AlertRuleRoute struct { ID string `json:"id"` RuleID string `json:"rule_id"` ChannelID string `json:"channel_id"` Recipients []string `json:"recipients"` RecipientsJSON string `json:"-"` IsActive bool `json:"is_active"` DeliveryMode string `json:"delivery_mode"` DigestWindowMinutes int `json:"digest_window_minutes"` EscalationChannelID *string `json:"escalation_channel_id"` EscalationRecipients []string `json:"escalation_recipients"` EscalationRecipientsJSON string `json:"-"` EscalationAfterFailures int `json:"escalation_after_failures"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type AlertRuleRouteView struct { AlertRuleRoute ChannelName string `json:"channel_name"` ChannelType string `json:"channel_type"` EscalationChannelName *string `json:"escalation_channel_name"` EscalationChannelType *string `json:"escalation_channel_type"` } type AlertEvent struct { ID string `json:"id"` ConnectionID *string `json:"connection_id"` EventType string `json:"event_type"` Severity string `json:"severity"` Title string `json:"title"` Message string `json:"message"` PayloadJSON *string `json:"payload_json"` Fingerprint *string `json:"fingerprint"` SourceRef *string `json:"source_ref"` Status string `json:"status"` CreatedAt string `json:"created_at"` ProcessedAt *string `json:"processed_at"` } type AlertDispatchJob struct { ID string `json:"id"` EventID string `json:"event_id"` RuleID string `json:"rule_id"` RouteID string `json:"route_id"` ChannelID string `json:"channel_id"` Status string `json:"status"` AttemptCount int `json:"attempt_count"` MaxAttempts int `json:"max_attempts"` NextAttemptAt string `json:"next_attempt_at"` LastError *string `json:"last_error"` ProviderMessageID *string `json:"provider_message_id"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` SentAt *string `json:"sent_at"` } type AlertDispatchJobWithDetails struct { AlertDispatchJob EventType string `json:"event_type"` EventSeverity string `json:"event_severity"` EventTitle string `json:"event_title"` EventMessage string `json:"event_message"` EventPayloadJSON *string `json:"event_payload_json"` EventFingerprint *string `json:"event_fingerprint"` RuleName string `json:"rule_name"` RuleCooldownSeconds int `json:"rule_cooldown_seconds"` RuleSubjectTemplate *string `json:"rule_subject_template"` RuleBodyTemplate *string `json:"rule_body_template"` RouteRecipientsJSON string `json:"route_recipients_json"` RouteDeliveryMode string `json:"route_delivery_mode"` RouteDigestWindowMins int `json:"route_digest_window_minutes"` RouteEscalationChannelID *string `json:"route_escalation_channel_id"` RouteEscalationRecipientsJSON *string `json:"route_escalation_recipients_json"` RouteEscalationAfterFailures int `json:"route_escalation_after_failures"` ChannelName string `json:"channel_name"` ChannelType string `json:"channel_type"` ChannelConfigEncrypted string `json:"channel_config_encrypted"` EscalationChannelName *string `json:"escalation_channel_name"` EscalationChannelType *string `json:"escalation_channel_type"` EscalationChannelConfigEncrypted *string `json:"escalation_channel_config_encrypted"` } func (db *DB) CreateAlertChannel(name, channelType, encryptedConfig string, isActive bool, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `INSERT INTO alert_channels (id, name, channel_type, config_encrypted, is_active, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, id, strings.TrimSpace(name), strings.TrimSpace(channelType), encryptedConfig, boolToInt(isActive), nullableString(createdBy), now, now, ) if err != nil { return "", fmt.Errorf("create alert channel: %w", err) } return id, nil } func (db *DB) UpdateAlertChannel(id, name, channelType string, encryptedConfig *string, isActive bool) error { now := time.Now().UTC().Format(time.RFC3339) if encryptedConfig != nil { if _, err := db.conn.Exec( `UPDATE alert_channels SET name = ?, channel_type = ?, config_encrypted = ?, is_active = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(name), strings.TrimSpace(channelType), *encryptedConfig, boolToInt(isActive), now, id, ); err != nil { return fmt.Errorf("update alert channel: %w", err) } return nil } if _, err := db.conn.Exec( `UPDATE alert_channels SET name = ?, channel_type = ?, is_active = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(name), strings.TrimSpace(channelType), boolToInt(isActive), now, id, ); err != nil { return fmt.Errorf("update alert channel: %w", err) } return nil } func (db *DB) DeleteAlertChannel(id string) error { if _, err := db.conn.Exec(`DELETE FROM alert_channels WHERE id = ?`, id); err != nil { return fmt.Errorf("delete alert channel: %w", err) } return nil } func (db *DB) GetAlertChannelByID(id string) (*AlertChannel, error) { row := db.conn.QueryRow( `SELECT id, name, channel_type, config_encrypted, is_active, created_by, created_at, updated_at FROM alert_channels WHERE id = ?`, id, ) return scanAlertChannelRow(row) } func (db *DB) ListAlertChannels() ([]AlertChannel, error) { rows, err := db.conn.Query( `SELECT id, name, channel_type, config_encrypted, is_active, created_by, created_at, updated_at FROM alert_channels ORDER BY created_at DESC`, ) if err != nil { return nil, fmt.Errorf("list alert channels: %w", err) } defer rows.Close() out := make([]AlertChannel, 0) for rows.Next() { channel, err := scanAlertChannel(rows) if err != nil { return nil, err } out = append(out, channel) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate alert channels: %w", err) } return out, nil } func (db *DB) CreateAlertRule(name, eventType, severityMin string, enabled bool, cooldownSeconds, maxAttempts int, subjectTemplate, bodyTemplate, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) if cooldownSeconds < 0 { cooldownSeconds = 0 } if maxAttempts <= 0 { maxAttempts = 5 } _, err := db.conn.Exec( `INSERT INTO alert_rules (id, name, event_type, severity_min, enabled, cooldown_seconds, max_attempts, subject_template, body_template, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, strings.TrimSpace(name), strings.TrimSpace(eventType), strings.TrimSpace(severityMin), boolToInt(enabled), cooldownSeconds, maxAttempts, nullableString(subjectTemplate), nullableString(bodyTemplate), nullableString(createdBy), now, now, ) if err != nil { return "", fmt.Errorf("create alert rule: %w", err) } return id, nil } func (db *DB) UpdateAlertRule(id, name, eventType, severityMin string, enabled bool, cooldownSeconds, maxAttempts int, subjectTemplate, bodyTemplate string) error { now := time.Now().UTC().Format(time.RFC3339) if cooldownSeconds < 0 { cooldownSeconds = 0 } if maxAttempts <= 0 { maxAttempts = 5 } if _, err := db.conn.Exec( `UPDATE alert_rules SET name = ?, event_type = ?, severity_min = ?, enabled = ?, cooldown_seconds = ?, max_attempts = ?, subject_template = ?, body_template = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(name), strings.TrimSpace(eventType), strings.TrimSpace(severityMin), boolToInt(enabled), cooldownSeconds, maxAttempts, nullableString(subjectTemplate), nullableString(bodyTemplate), now, id, ); err != nil { return fmt.Errorf("update alert rule: %w", err) } return nil } func (db *DB) DeleteAlertRule(id string) error { if _, err := db.conn.Exec(`DELETE FROM alert_rules WHERE id = ?`, id); err != nil { return fmt.Errorf("delete alert rule: %w", err) } return nil } func (db *DB) GetAlertRuleByID(id string) (*AlertRule, error) { row := db.conn.QueryRow( `SELECT id, name, event_type, severity_min, enabled, cooldown_seconds, max_attempts, subject_template, body_template, created_by, created_at, updated_at FROM alert_rules WHERE id = ?`, id, ) return scanAlertRuleRow(row) } func (db *DB) ListAlertRules() ([]AlertRule, error) { rows, err := db.conn.Query( `SELECT id, name, event_type, severity_min, enabled, cooldown_seconds, max_attempts, subject_template, body_template, created_by, created_at, updated_at FROM alert_rules ORDER BY created_at DESC`, ) if err != nil { return nil, fmt.Errorf("list alert rules: %w", err) } defer rows.Close() out := make([]AlertRule, 0) for rows.Next() { rule, err := scanAlertRule(rows) if err != nil { return nil, err } out = append(out, rule) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate alert rules: %w", err) } return out, nil } func (db *DB) ListEnabledAlertRules() ([]AlertRule, error) { rows, err := db.conn.Query( `SELECT id, name, event_type, severity_min, enabled, cooldown_seconds, max_attempts, subject_template, body_template, created_by, created_at, updated_at FROM alert_rules WHERE enabled = 1 ORDER BY created_at DESC`, ) if err != nil { return nil, fmt.Errorf("list enabled alert rules: %w", err) } defer rows.Close() out := make([]AlertRule, 0) for rows.Next() { rule, err := scanAlertRule(rows) if err != nil { return nil, err } out = append(out, rule) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate enabled alert rules: %w", err) } return out, nil } func (db *DB) ReplaceAlertRuleRoutes(ruleID string, routes []AlertRuleRoute) error { tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin replace alert routes: %w", err) } defer tx.Rollback() if _, err := tx.Exec(`DELETE FROM alert_rule_routes WHERE rule_id = ?`, ruleID); err != nil { return fmt.Errorf("clear alert routes: %w", err) } now := time.Now().UTC().Format(time.RFC3339) for _, route := range routes { recipientsJSON, err := json.Marshal(route.Recipients) if err != nil { return fmt.Errorf("marshal route recipients: %w", err) } id := route.ID if strings.TrimSpace(id) == "" { id = uuid.NewString() } if _, err := tx.Exec( `INSERT INTO alert_rule_routes (id, rule_id, channel_id, recipients_json, is_active, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?)`, id, ruleID, route.ChannelID, string(recipientsJSON), boolToInt(route.IsActive), now, now, ); err != nil { return fmt.Errorf("insert alert route: %w", err) } deliveryMode := strings.ToLower(strings.TrimSpace(route.DeliveryMode)) if deliveryMode != "digest" { deliveryMode = "immediate" } digestWindow := route.DigestWindowMinutes if digestWindow < 0 { digestWindow = 0 } escalationAfter := route.EscalationAfterFailures if escalationAfter < 0 { escalationAfter = 0 } var escalationChannelID interface{} if route.EscalationChannelID != nil && strings.TrimSpace(*route.EscalationChannelID) != "" { escalationChannelID = strings.TrimSpace(*route.EscalationChannelID) } var escalationRecipients interface{} if len(route.EscalationRecipients) > 0 { payload, err := json.Marshal(route.EscalationRecipients) if err != nil { return fmt.Errorf("marshal escalation recipients: %w", err) } escalationRecipients = string(payload) } if _, err := tx.Exec( `INSERT INTO alert_route_policies (route_id, delivery_mode, digest_window_minutes, escalation_channel_id, escalation_recipients_json, escalation_after_failures, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(route_id) DO UPDATE SET delivery_mode = excluded.delivery_mode, digest_window_minutes = excluded.digest_window_minutes, escalation_channel_id = excluded.escalation_channel_id, escalation_recipients_json = excluded.escalation_recipients_json, escalation_after_failures = excluded.escalation_after_failures, updated_at = excluded.updated_at`, id, deliveryMode, digestWindow, escalationChannelID, escalationRecipients, escalationAfter, now, now, ); err != nil { return fmt.Errorf("upsert alert route policy: %w", err) } } if err := tx.Commit(); err != nil { return fmt.Errorf("commit replace alert routes: %w", err) } return nil } func (db *DB) ListAlertRuleRoutes(ruleID string) ([]AlertRuleRouteView, error) { rows, err := db.conn.Query( `SELECT rr.id, rr.rule_id, rr.channel_id, rr.recipients_json, rr.is_active, rr.created_at, rr.updated_at, c.name, c.channel_type, COALESCE(rp.delivery_mode, 'immediate'), COALESCE(rp.digest_window_minutes, 0), rp.escalation_channel_id, rp.escalation_recipients_json, COALESCE(rp.escalation_after_failures, 0), ec.name, ec.channel_type FROM alert_rule_routes rr JOIN alert_channels c ON c.id = rr.channel_id LEFT JOIN alert_route_policies rp ON rp.route_id = rr.id LEFT JOIN alert_channels ec ON ec.id = rp.escalation_channel_id WHERE rr.rule_id = ? ORDER BY rr.created_at ASC`, ruleID, ) if err != nil { return nil, fmt.Errorf("list alert rule routes: %w", err) } defer rows.Close() out := make([]AlertRuleRouteView, 0) for rows.Next() { var item AlertRuleRouteView var recipientsJSON string var active int var escalationChannelID, escalationRecipientsJSON, escalationChannelName, escalationChannelType sql.NullString if err := rows.Scan( &item.ID, &item.RuleID, &item.ChannelID, &recipientsJSON, &active, &item.CreatedAt, &item.UpdatedAt, &item.ChannelName, &item.ChannelType, &item.DeliveryMode, &item.DigestWindowMinutes, &escalationChannelID, &escalationRecipientsJSON, &item.EscalationAfterFailures, &escalationChannelName, &escalationChannelType, ); err != nil { return nil, fmt.Errorf("scan alert rule route: %w", err) } item.IsActive = intToBool(active) item.RecipientsJSON = recipientsJSON item.Recipients = parseRecipientsJSON(recipientsJSON) item.EscalationChannelID = nullStringToPtr(escalationChannelID) item.EscalationRecipientsJSON = escalationRecipientsJSON.String item.EscalationRecipients = parseRecipientsJSON(escalationRecipientsJSON.String) item.EscalationChannelName = nullStringToPtr(escalationChannelName) item.EscalationChannelType = nullStringToPtr(escalationChannelType) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate alert rule routes: %w", err) } return out, nil } func (db *DB) ListActiveAlertRuleRoutes(ruleID string) ([]AlertRuleRouteView, error) { rows, err := db.conn.Query( `SELECT rr.id, rr.rule_id, rr.channel_id, rr.recipients_json, rr.is_active, rr.created_at, rr.updated_at, c.name, c.channel_type, COALESCE(rp.delivery_mode, 'immediate'), COALESCE(rp.digest_window_minutes, 0), rp.escalation_channel_id, rp.escalation_recipients_json, COALESCE(rp.escalation_after_failures, 0), ec.name, ec.channel_type FROM alert_rule_routes rr JOIN alert_channels c ON c.id = rr.channel_id LEFT JOIN alert_route_policies rp ON rp.route_id = rr.id LEFT JOIN alert_channels ec ON ec.id = rp.escalation_channel_id WHERE rr.rule_id = ? AND rr.is_active = 1 AND c.is_active = 1 ORDER BY rr.created_at ASC`, ruleID, ) if err != nil { return nil, fmt.Errorf("list active alert rule routes: %w", err) } defer rows.Close() out := make([]AlertRuleRouteView, 0) for rows.Next() { var item AlertRuleRouteView var recipientsJSON string var active int var escalationChannelID, escalationRecipientsJSON, escalationChannelName, escalationChannelType sql.NullString if err := rows.Scan( &item.ID, &item.RuleID, &item.ChannelID, &recipientsJSON, &active, &item.CreatedAt, &item.UpdatedAt, &item.ChannelName, &item.ChannelType, &item.DeliveryMode, &item.DigestWindowMinutes, &escalationChannelID, &escalationRecipientsJSON, &item.EscalationAfterFailures, &escalationChannelName, &escalationChannelType, ); err != nil { return nil, fmt.Errorf("scan active alert route: %w", err) } item.IsActive = intToBool(active) item.RecipientsJSON = recipientsJSON item.Recipients = parseRecipientsJSON(recipientsJSON) item.EscalationChannelID = nullStringToPtr(escalationChannelID) item.EscalationRecipientsJSON = escalationRecipientsJSON.String item.EscalationRecipients = parseRecipientsJSON(escalationRecipientsJSON.String) item.EscalationChannelName = nullStringToPtr(escalationChannelName) item.EscalationChannelType = nullStringToPtr(escalationChannelType) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate active alert routes: %w", err) } return out, nil } func (db *DB) CreateAlertEvent(connectionID *string, eventType, severity, title, message string, payload interface{}, fingerprint, sourceRef string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var payloadJSON interface{} if payload != nil { data, err := json.Marshal(payload) if err != nil { return "", fmt.Errorf("marshal alert payload: %w", err) } payloadJSON = string(data) } var connectionVal interface{} if connectionID != nil && strings.TrimSpace(*connectionID) != "" { connectionVal = strings.TrimSpace(*connectionID) } if _, err := db.conn.Exec( `INSERT INTO alert_events (id, connection_id, event_type, severity, title, message, payload_json, fingerprint, source_ref, status, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 'new', ?)`, id, connectionVal, strings.TrimSpace(eventType), strings.TrimSpace(severity), strings.TrimSpace(title), strings.TrimSpace(message), payloadJSON, nullableString(fingerprint), nullableString(sourceRef), now, ); err != nil { return "", fmt.Errorf("create alert event: %w", err) } return id, nil } func (db *DB) ListAlertEvents(limit int, eventType, status string) ([]AlertEvent, error) { if limit <= 0 { limit = 100 } if limit > 1000 { limit = 1000 } where := []string{"1=1"} args := make([]interface{}, 0, 4) if strings.TrimSpace(eventType) != "" { where = append(where, "event_type = ?") args = append(args, strings.TrimSpace(eventType)) } if strings.TrimSpace(status) != "" { where = append(where, "status = ?") args = append(args, strings.TrimSpace(status)) } args = append(args, limit) query := fmt.Sprintf( `SELECT id, connection_id, event_type, severity, title, message, payload_json, fingerprint, source_ref, status, created_at, processed_at FROM alert_events WHERE %s ORDER BY created_at DESC LIMIT ?`, strings.Join(where, " AND "), ) rows, err := db.conn.Query(query, args...) if err != nil { return nil, fmt.Errorf("list alert events: %w", err) } defer rows.Close() out := make([]AlertEvent, 0) for rows.Next() { var item AlertEvent var connectionID, payloadJSON, fingerprint, sourceRef, processedAt sql.NullString if err := rows.Scan( &item.ID, &connectionID, &item.EventType, &item.Severity, &item.Title, &item.Message, &payloadJSON, &fingerprint, &sourceRef, &item.Status, &item.CreatedAt, &processedAt, ); err != nil { return nil, fmt.Errorf("scan alert event: %w", err) } item.ConnectionID = nullStringToPtr(connectionID) item.PayloadJSON = nullStringToPtr(payloadJSON) item.Fingerprint = nullStringToPtr(fingerprint) item.SourceRef = nullStringToPtr(sourceRef) item.ProcessedAt = nullStringToPtr(processedAt) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate alert events: %w", err) } return out, nil } func (db *DB) ListNewAlertEvents(limit int) ([]AlertEvent, error) { if limit <= 0 { limit = 50 } rows, err := db.conn.Query( `SELECT id, connection_id, event_type, severity, title, message, payload_json, fingerprint, source_ref, status, created_at, processed_at FROM alert_events WHERE status = 'new' ORDER BY created_at ASC LIMIT ?`, limit, ) if err != nil { return nil, fmt.Errorf("list new alert events: %w", err) } defer rows.Close() out := make([]AlertEvent, 0) for rows.Next() { var item AlertEvent var connectionID, payloadJSON, fingerprint, sourceRef, processedAt sql.NullString if err := rows.Scan( &item.ID, &connectionID, &item.EventType, &item.Severity, &item.Title, &item.Message, &payloadJSON, &fingerprint, &sourceRef, &item.Status, &item.CreatedAt, &processedAt, ); err != nil { return nil, fmt.Errorf("scan new alert event: %w", err) } item.ConnectionID = nullStringToPtr(connectionID) item.PayloadJSON = nullStringToPtr(payloadJSON) item.Fingerprint = nullStringToPtr(fingerprint) item.SourceRef = nullStringToPtr(sourceRef) item.ProcessedAt = nullStringToPtr(processedAt) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate new alert events: %w", err) } return out, nil } func (db *DB) MarkAlertEventProcessed(id string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_events SET status = 'processed', processed_at = ? WHERE id = ?`, now, id, ); err != nil { return fmt.Errorf("mark alert event processed: %w", err) } return nil } func (db *DB) HasRecentAlertDispatch(routeID, fingerprint string, since time.Time) (bool, error) { if strings.TrimSpace(fingerprint) == "" { return false, nil } var count int if err := db.conn.QueryRow( `SELECT COUNT(*) FROM alert_dispatch_jobs j JOIN alert_events e ON e.id = j.event_id WHERE j.route_id = ? AND e.fingerprint = ? AND e.created_at >= ? AND j.status IN ('queued', 'retrying', 'sending', 'sent')`, routeID, fingerprint, since.UTC().Format(time.RFC3339), ).Scan(&count); err != nil { return false, fmt.Errorf("check recent alert dispatch: %w", err) } return count > 0, nil } func (db *DB) CreateAlertDispatchJob(eventID, ruleID, routeID, channelID string, maxAttempts int, nextAttemptAt time.Time) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) if maxAttempts <= 0 { maxAttempts = 5 } if _, err := db.conn.Exec( `INSERT INTO alert_dispatch_jobs (id, event_id, rule_id, route_id, channel_id, status, attempt_count, max_attempts, next_attempt_at, created_at, updated_at) VALUES (?, ?, ?, ?, ?, 'queued', 0, ?, ?, ?, ?)`, id, eventID, ruleID, routeID, channelID, maxAttempts, nextAttemptAt.UTC().Format(time.RFC3339), now, now, ); err != nil { return "", fmt.Errorf("create alert dispatch job: %w", err) } return id, nil } func (db *DB) ListDueAlertDispatchJobs(limit int) ([]AlertDispatchJobWithDetails, error) { if limit <= 0 { limit = 20 } now := time.Now().UTC().Format(time.RFC3339) rows, err := db.conn.Query( `SELECT j.id, j.event_id, j.rule_id, j.route_id, j.channel_id, j.status, j.attempt_count, j.max_attempts, j.next_attempt_at, j.last_error, j.provider_message_id, j.created_at, j.updated_at, j.sent_at, e.event_type, e.severity, e.title, e.message, e.payload_json, e.fingerprint, r.name, r.cooldown_seconds, r.subject_template, r.body_template, rr.recipients_json, COALESCE(rp.delivery_mode, 'immediate'), COALESCE(rp.digest_window_minutes, 0), rp.escalation_channel_id, rp.escalation_recipients_json, COALESCE(rp.escalation_after_failures, 0), c.name, c.channel_type, c.config_encrypted, ec.name, ec.channel_type, ec.config_encrypted FROM alert_dispatch_jobs j JOIN alert_events e ON e.id = j.event_id JOIN alert_rules r ON r.id = j.rule_id JOIN alert_rule_routes rr ON rr.id = j.route_id LEFT JOIN alert_route_policies rp ON rp.route_id = rr.id JOIN alert_channels c ON c.id = j.channel_id LEFT JOIN alert_channels ec ON ec.id = rp.escalation_channel_id WHERE j.status IN ('queued', 'retrying') AND j.attempt_count < j.max_attempts AND j.next_attempt_at <= ? ORDER BY j.next_attempt_at ASC LIMIT ?`, now, limit, ) if err != nil { return nil, fmt.Errorf("list due alert dispatch jobs: %w", err) } defer rows.Close() out := make([]AlertDispatchJobWithDetails, 0) for rows.Next() { var item AlertDispatchJobWithDetails var lastError, providerMessageID, sentAt sql.NullString var eventPayloadJSON, eventFingerprint, subjectTemplate, bodyTemplate sql.NullString var escalationChannelID, escalationRecipientsJSON, escalationChannelName, escalationChannelType, escalationChannelConfig sql.NullString if err := rows.Scan( &item.ID, &item.EventID, &item.RuleID, &item.RouteID, &item.ChannelID, &item.Status, &item.AttemptCount, &item.MaxAttempts, &item.NextAttemptAt, &lastError, &providerMessageID, &item.CreatedAt, &item.UpdatedAt, &sentAt, &item.EventType, &item.EventSeverity, &item.EventTitle, &item.EventMessage, &eventPayloadJSON, &eventFingerprint, &item.RuleName, &item.RuleCooldownSeconds, &subjectTemplate, &bodyTemplate, &item.RouteRecipientsJSON, &item.RouteDeliveryMode, &item.RouteDigestWindowMins, &escalationChannelID, &escalationRecipientsJSON, &item.RouteEscalationAfterFailures, &item.ChannelName, &item.ChannelType, &item.ChannelConfigEncrypted, &escalationChannelName, &escalationChannelType, &escalationChannelConfig, ); err != nil { return nil, fmt.Errorf("scan due alert dispatch job: %w", err) } item.LastError = nullStringToPtr(lastError) item.ProviderMessageID = nullStringToPtr(providerMessageID) item.SentAt = nullStringToPtr(sentAt) item.EventPayloadJSON = nullStringToPtr(eventPayloadJSON) item.EventFingerprint = nullStringToPtr(eventFingerprint) item.RuleSubjectTemplate = nullStringToPtr(subjectTemplate) item.RuleBodyTemplate = nullStringToPtr(bodyTemplate) item.RouteEscalationChannelID = nullStringToPtr(escalationChannelID) item.RouteEscalationRecipientsJSON = nullStringToPtr(escalationRecipientsJSON) item.EscalationChannelName = nullStringToPtr(escalationChannelName) item.EscalationChannelType = nullStringToPtr(escalationChannelType) item.EscalationChannelConfigEncrypted = nullStringToPtr(escalationChannelConfig) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate due alert dispatch jobs: %w", err) } return out, nil } func (db *DB) MarkAlertDispatchJobSending(id string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_dispatch_jobs SET status = 'sending', attempt_count = attempt_count + 1, updated_at = ? WHERE id = ?`, now, id, ); err != nil { return fmt.Errorf("mark alert dispatch sending: %w", err) } return nil } func (db *DB) MarkAlertDispatchJobSent(id, providerMessageID string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_dispatch_jobs SET status = 'sent', provider_message_id = ?, sent_at = ?, updated_at = ? WHERE id = ?`, nullableString(providerMessageID), now, now, id, ); err != nil { return fmt.Errorf("mark alert dispatch sent: %w", err) } return nil } func (db *DB) MarkAlertDispatchJobRetry(id string, nextAttemptAt time.Time, lastError string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_dispatch_jobs SET status = 'retrying', next_attempt_at = ?, last_error = ?, updated_at = ? WHERE id = ?`, nextAttemptAt.UTC().Format(time.RFC3339), nullableString(lastError), now, id, ); err != nil { return fmt.Errorf("mark alert dispatch retry: %w", err) } return nil } func (db *DB) MarkAlertDispatchJobFailed(id, lastError string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE alert_dispatch_jobs SET status = 'failed', last_error = ?, updated_at = ? WHERE id = ?`, nullableString(lastError), now, id, ); err != nil { return fmt.Errorf("mark alert dispatch failed: %w", err) } return nil } func scanAlertChannel(scanner interface { Scan(dest ...interface{}) error }) (AlertChannel, error) { var item AlertChannel var configEncrypted, createdBy sql.NullString var isActive int if err := scanner.Scan( &item.ID, &item.Name, &item.ChannelType, &configEncrypted, &isActive, &createdBy, &item.CreatedAt, &item.UpdatedAt, ); err != nil { return item, fmt.Errorf("scan alert channel: %w", err) } item.ConfigEncrypted = configEncrypted.String item.IsActive = intToBool(isActive) item.CreatedBy = nullStringToPtr(createdBy) return item, nil } func scanAlertChannelRow(row *sql.Row) (*AlertChannel, error) { var item AlertChannel var configEncrypted, createdBy sql.NullString var isActive int err := row.Scan( &item.ID, &item.Name, &item.ChannelType, &configEncrypted, &isActive, &createdBy, &item.CreatedAt, &item.UpdatedAt, ) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("scan alert channel row: %w", err) } item.ConfigEncrypted = configEncrypted.String item.IsActive = intToBool(isActive) item.CreatedBy = nullStringToPtr(createdBy) return &item, nil } func scanAlertRule(scanner interface { Scan(dest ...interface{}) error }) (AlertRule, error) { var item AlertRule var subjectTemplate, bodyTemplate, createdBy sql.NullString var enabled int if err := scanner.Scan( &item.ID, &item.Name, &item.EventType, &item.SeverityMin, &enabled, &item.CooldownSeconds, &item.MaxAttempts, &subjectTemplate, &bodyTemplate, &createdBy, &item.CreatedAt, &item.UpdatedAt, ); err != nil { return item, fmt.Errorf("scan alert rule: %w", err) } item.Enabled = intToBool(enabled) item.SubjectTemplate = nullStringToPtr(subjectTemplate) item.BodyTemplate = nullStringToPtr(bodyTemplate) item.CreatedBy = nullStringToPtr(createdBy) return item, nil } func scanAlertRuleRow(row *sql.Row) (*AlertRule, error) { var item AlertRule var enabled int var subjectTemplate, bodyTemplate, createdBy sql.NullString err := row.Scan( &item.ID, &item.Name, &item.EventType, &item.SeverityMin, &enabled, &item.CooldownSeconds, &item.MaxAttempts, &subjectTemplate, &bodyTemplate, &createdBy, &item.CreatedAt, &item.UpdatedAt, ) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("scan alert rule row: %w", err) } item.Enabled = intToBool(enabled) item.SubjectTemplate = nullStringToPtr(subjectTemplate) item.BodyTemplate = nullStringToPtr(bodyTemplate) item.CreatedBy = nullStringToPtr(createdBy) return &item, nil } func parseRecipientsJSON(raw string) []string { if strings.TrimSpace(raw) == "" { return []string{} } var values []string if err := json.Unmarshal([]byte(raw), &values); err != nil { return []string{} } out := make([]string, 0, len(values)) for _, v := range values { v = strings.TrimSpace(v) if v != "" { out = append(out, v) } } return out } ================================================ FILE: internal/database/audit_logs.go ================================================ package database import ( "database/sql" "fmt" "strings" "github.com/google/uuid" ) // AuditLogParams holds parameters for creating an audit log entry. type AuditLogParams struct { Action string Username *string ConnectionID *string Details *string IPAddress *string } // AuditLog represents an audit log entry. type AuditLog struct { ID string `json:"id"` Action string `json:"action"` Username *string `json:"username"` ConnectionID *string `json:"connection_id"` Details *string `json:"details"` IPAddress *string `json:"ip_address"` CreatedAt string `json:"created_at"` } // CreateAuditLog creates a new audit log entry. func (db *DB) CreateAuditLog(params AuditLogParams) error { id := uuid.NewString() _, err := db.conn.Exec( `INSERT INTO audit_logs (id, action, username, connection_id, details, ip_address) VALUES (?, ?, ?, ?, ?, ?)`, id, params.Action, params.Username, params.ConnectionID, params.Details, params.IPAddress, ) if err != nil { return fmt.Errorf("create audit log: %w", err) } return nil } // GetAuditLogs retrieves audit logs, most recent first. func (db *DB) GetAuditLogs(limit int) ([]AuditLog, error) { return db.GetAuditLogsFiltered(limit, "", "", "", "") } // GetAuditLogsFiltered retrieves audit logs with optional filters, most recent first. func (db *DB) GetAuditLogsFiltered(limit int, timeRange, action, username, search string) ([]AuditLog, error) { if limit <= 0 { limit = 100 } var whereClauses []string args := make([]any, 0, 8) timeRange = strings.TrimSpace(timeRange) action = strings.TrimSpace(action) username = strings.TrimSpace(username) search = strings.TrimSpace(strings.ToLower(search)) timeRangeOffsets := map[string]string{ "15m": "-15 minutes", "1h": "-1 hour", "6h": "-6 hours", "24h": "-24 hours", "7d": "-7 days", "30d": "-30 days", } if offset, ok := timeRangeOffsets[timeRange]; ok { whereClauses = append(whereClauses, "created_at >= datetime('now', ?)") args = append(args, offset) } if action != "" { whereClauses = append(whereClauses, "action = ?") args = append(args, action) } if username != "" { whereClauses = append(whereClauses, "username = ?") args = append(args, username) } if search != "" { term := "%" + search + "%" whereClauses = append(whereClauses, `( lower(action) LIKE ? OR lower(COALESCE(username, '')) LIKE ? OR lower(COALESCE(details, '')) LIKE ? OR lower(COALESCE(ip_address, '')) LIKE ? )`, ) args = append(args, term, term, term, term) } query := strings.Builder{} query.WriteString(`SELECT id, action, username, connection_id, details, ip_address, created_at FROM audit_logs`) if len(whereClauses) > 0 { query.WriteString(" WHERE ") query.WriteString(strings.Join(whereClauses, " AND ")) } query.WriteString(" ORDER BY created_at DESC LIMIT ?") args = append(args, limit) rows, err := db.conn.Query(query.String(), args...) if err != nil { return nil, fmt.Errorf("get audit logs: %w", err) } defer rows.Close() var logs []AuditLog for rows.Next() { var l AuditLog var username, connID, details, ip sql.NullString if err := rows.Scan(&l.ID, &l.Action, &username, &connID, &details, &ip, &l.CreatedAt); err != nil { return nil, fmt.Errorf("scan audit log: %w", err) } l.Username = nullStringToPtr(username) l.ConnectionID = nullStringToPtr(connID) l.Details = nullStringToPtr(details) l.IPAddress = nullStringToPtr(ip) logs = append(logs, l) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate audit log rows: %w", err) } return logs, nil } ================================================ FILE: internal/database/audit_logs_test.go ================================================ package database import ( "path/filepath" "testing" ) func openTestDB(t *testing.T) *DB { t.Helper() dbPath := filepath.Join(t.TempDir(), "test.db") db, err := Open(dbPath) if err != nil { t.Fatalf("open test db: %v", err) } t.Cleanup(func() { _ = db.Close() }) return db } func insertAuditLogAt(t *testing.T, db *DB, action, username, details, ip, createdAtExpr string) { t.Helper() _, err := db.conn.Exec( `INSERT INTO audit_logs (id, action, username, connection_id, details, ip_address, created_at) VALUES (?, ?, ?, ?, ?, ?, `+createdAtExpr+`)`, action+"-id-"+username, action, username, "conn-1", details, ip, ) if err != nil { t.Fatalf("insert audit log: %v", err) } } func TestGetAuditLogsFiltered_TimeRangeActionUsernameSearch(t *testing.T) { db := openTestDB(t) insertAuditLogAt(t, db, "connection.created", "alice", "Created warehouse connection", "10.0.0.10", "datetime('now', '-5 minutes')") insertAuditLogAt(t, db, "connection.deleted", "bob", "Deleted old tunnel", "10.0.0.20", "datetime('now', '-2 hours')") insertAuditLogAt(t, db, "user.login", "alice", "Login success", "10.0.0.11", "datetime('now', '-10 minutes')") rows, err := db.GetAuditLogsFiltered(100, "1h", "connection.created", "alice", "warehouse") if err != nil { t.Fatalf("GetAuditLogsFiltered: %v", err) } if len(rows) != 1 { t.Fatalf("expected 1 row, got %d", len(rows)) } if rows[0].Action != "connection.created" { t.Fatalf("unexpected action: %s", rows[0].Action) } if rows[0].Username == nil || *rows[0].Username != "alice" { t.Fatalf("unexpected username: %+v", rows[0].Username) } } func TestGetAuditLogsFiltered_SearchMatchesMultipleFieldsCaseInsensitive(t *testing.T) { db := openTestDB(t) insertAuditLogAt(t, db, "connection.created", "alice", "Created connection for ETL", "10.0.0.10", "datetime('now', '-5 minutes')") insertAuditLogAt(t, db, "user.login", "charlie", "Login success", "10.0.0.21", "datetime('now', '-4 minutes')") rows, err := db.GetAuditLogsFiltered(100, "", "", "", "etl") if err != nil { t.Fatalf("GetAuditLogsFiltered search details: %v", err) } if len(rows) != 1 || rows[0].Action != "connection.created" { t.Fatalf("expected details match on connection.created, got %+v", rows) } rows, err = db.GetAuditLogsFiltered(100, "", "", "", "CHARLIE") if err != nil { t.Fatalf("GetAuditLogsFiltered search username: %v", err) } if len(rows) != 1 || rows[0].Action != "user.login" { t.Fatalf("expected username match on user.login, got %+v", rows) } rows, err = db.GetAuditLogsFiltered(100, "", "", "", "10.0.0.10") if err != nil { t.Fatalf("GetAuditLogsFiltered search ip: %v", err) } if len(rows) != 1 || rows[0].Action != "connection.created" { t.Fatalf("expected ip match on connection.created, got %+v", rows) } } ================================================ FILE: internal/database/brain.go ================================================ package database import ( "database/sql" "fmt" "strings" "time" "github.com/google/uuid" ) // BrainProvider stores global AI provider configuration. type BrainProvider struct { ID string `json:"id"` Name string `json:"name"` Kind string `json:"kind"` BaseURL *string `json:"base_url"` HasAPIKey bool `json:"has_api_key"` IsActive bool `json:"is_active"` IsDefault bool `json:"is_default"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // BrainProviderSecret returns provider config with secret for runtime use. type BrainProviderSecret struct { BrainProvider EncryptedAPIKey *string `json:"-"` } // BrainModel stores available models per provider. type BrainModel struct { ID string `json:"id"` ProviderID string `json:"provider_id"` Name string `json:"name"` DisplayName *string `json:"display_name"` IsActive bool `json:"is_active"` IsDefault bool `json:"is_default"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // BrainModelRuntime is used for chat execution. type BrainModelRuntime struct { ModelID string ModelName string ProviderID string ProviderName string ProviderKind string ProviderBaseURL *string ProviderEncryptedKey *string ModelActive bool ProviderActive bool } // BrainChat stores a user chat thread. type BrainChat struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` Username string `json:"username"` Title string `json:"title"` ProviderID *string `json:"provider_id"` ModelID *string `json:"model_id"` Archived bool `json:"archived"` LastMessageAt *string `json:"last_message_at"` ContextDatabase *string `json:"context_database"` ContextTable *string `json:"context_table"` ContextTables *string `json:"context_tables"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // BrainMessage stores one chat turn. type BrainMessage struct { ID string `json:"id"` ChatID string `json:"chat_id"` Role string `json:"role"` Content string `json:"content"` Status string `json:"status"` Error *string `json:"error"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // BrainArtifact stores generated artifacts linked to chats/messages. type BrainArtifact struct { ID string `json:"id"` ChatID string `json:"chat_id"` MessageID *string `json:"message_id"` Type string `json:"type"` Title string `json:"title"` Content string `json:"content"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` } // BrainToolCall stores tool invocation traces. type BrainToolCall struct { ID string `json:"id"` ChatID string `json:"chat_id"` MessageID string `json:"message_id"` ToolName string `json:"tool_name"` InputJSON string `json:"input_json"` OutputJSON string `json:"output_json"` Status string `json:"status"` Error *string `json:"error"` CreatedAt string `json:"created_at"` } // BrainSkill stores admin-managed assistant instructions. type BrainSkill struct { ID string `json:"id"` Name string `json:"name"` Content string `json:"content"` IsActive bool `json:"is_active"` IsDefault bool `json:"is_default"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } func boolToInt(v bool) int { if v { return 1 } return 0 } func intToBool(v int) bool { return v != 0 } func nullableString(value string) interface{} { if strings.TrimSpace(value) == "" { return nil } return strings.TrimSpace(value) } // GetBrainProviders returns all providers. func (db *DB) GetBrainProviders() ([]BrainProvider, error) { rows, err := db.conn.Query(`SELECT id, name, kind, base_url, encrypted_api_key, is_active, is_default, created_by, created_at, updated_at FROM brain_providers ORDER BY is_default DESC, name ASC`) if err != nil { return nil, fmt.Errorf("get brain providers: %w", err) } defer rows.Close() providers := make([]BrainProvider, 0) for rows.Next() { var p BrainProvider var baseURL, encrypted, createdBy sql.NullString var active, def int if err := rows.Scan(&p.ID, &p.Name, &p.Kind, &baseURL, &encrypted, &active, &def, &createdBy, &p.CreatedAt, &p.UpdatedAt); err != nil { return nil, fmt.Errorf("scan brain provider: %w", err) } p.BaseURL = nullStringToPtr(baseURL) p.HasAPIKey = encrypted.Valid && strings.TrimSpace(encrypted.String) != "" p.IsActive = intToBool(active) p.IsDefault = intToBool(def) p.CreatedBy = nullStringToPtr(createdBy) providers = append(providers, p) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain providers: %w", err) } return providers, nil } // GetBrainProviderByID returns provider config including encrypted key. func (db *DB) GetBrainProviderByID(id string) (*BrainProviderSecret, error) { row := db.conn.QueryRow(`SELECT id, name, kind, base_url, encrypted_api_key, is_active, is_default, created_by, created_at, updated_at FROM brain_providers WHERE id = ?`, id) var p BrainProviderSecret var baseURL, encrypted, createdBy sql.NullString var active, def int if err := row.Scan(&p.ID, &p.Name, &p.Kind, &baseURL, &encrypted, &active, &def, &createdBy, &p.CreatedAt, &p.UpdatedAt); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get brain provider by id: %w", err) } p.BaseURL = nullStringToPtr(baseURL) p.EncryptedAPIKey = nullStringToPtr(encrypted) p.HasAPIKey = encrypted.Valid && strings.TrimSpace(encrypted.String) != "" p.IsActive = intToBool(active) p.IsDefault = intToBool(def) p.CreatedBy = nullStringToPtr(createdBy) return &p, nil } // CreateBrainProvider creates a provider. func (db *DB) CreateBrainProvider(name, kind, baseURL string, encryptedAPIKey *string, isActive, isDefault bool, createdBy string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() tx, err := db.conn.Begin() if err != nil { return "", fmt.Errorf("begin create brain provider: %w", err) } defer tx.Rollback() if isDefault { if _, err := tx.Exec(`UPDATE brain_providers SET is_default = 0, updated_at = ?`, now); err != nil { return "", fmt.Errorf("clear default provider: %w", err) } } if _, err := tx.Exec( `INSERT INTO brain_providers (id, name, kind, base_url, encrypted_api_key, is_active, is_default, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, strings.TrimSpace(name), strings.TrimSpace(kind), nullableString(baseURL), encryptedAPIKey, boolToInt(isActive), boolToInt(isDefault), nullableString(createdBy), now, now, ); err != nil { return "", fmt.Errorf("insert brain provider: %w", err) } if err := tx.Commit(); err != nil { return "", fmt.Errorf("commit create brain provider: %w", err) } return id, nil } // UpdateBrainProvider updates a provider. func (db *DB) UpdateBrainProvider(id, name, kind, baseURL string, encryptedAPIKey *string, updateAPIKey bool, isActive, isDefault bool) error { now := time.Now().UTC().Format(time.RFC3339) tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin update brain provider: %w", err) } defer tx.Rollback() if isDefault { if _, err := tx.Exec(`UPDATE brain_providers SET is_default = 0, updated_at = ?`, now); err != nil { return fmt.Errorf("clear default provider: %w", err) } } if updateAPIKey { if _, err := tx.Exec( `UPDATE brain_providers SET name = ?, kind = ?, base_url = ?, encrypted_api_key = ?, is_active = ?, is_default = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(name), strings.TrimSpace(kind), nullableString(baseURL), encryptedAPIKey, boolToInt(isActive), boolToInt(isDefault), now, id, ); err != nil { return fmt.Errorf("update brain provider: %w", err) } } else { if _, err := tx.Exec( `UPDATE brain_providers SET name = ?, kind = ?, base_url = ?, is_active = ?, is_default = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(name), strings.TrimSpace(kind), nullableString(baseURL), boolToInt(isActive), boolToInt(isDefault), now, id, ); err != nil { return fmt.Errorf("update brain provider: %w", err) } } if err := tx.Commit(); err != nil { return fmt.Errorf("commit update brain provider: %w", err) } return nil } // DeleteBrainProvider removes a provider and cascades dependent rows. func (db *DB) DeleteBrainProvider(id string) error { if _, err := db.conn.Exec(`DELETE FROM brain_providers WHERE id = ?`, id); err != nil { return fmt.Errorf("delete brain provider: %w", err) } return nil } // GetBrainModels returns models, optionally for one provider. func (db *DB) GetBrainModels(providerID string) ([]BrainModel, error) { query := `SELECT id, provider_id, name, display_name, is_active, is_default, created_at, updated_at FROM brain_models` args := []interface{}{} if strings.TrimSpace(providerID) != "" { query += ` WHERE provider_id = ?` args = append(args, providerID) } query += ` ORDER BY is_default DESC, name ASC` rows, err := db.conn.Query(query, args...) if err != nil { return nil, fmt.Errorf("get brain models: %w", err) } defer rows.Close() models := make([]BrainModel, 0) for rows.Next() { var m BrainModel var display sql.NullString var active, def int if err := rows.Scan(&m.ID, &m.ProviderID, &m.Name, &display, &active, &def, &m.CreatedAt, &m.UpdatedAt); err != nil { return nil, fmt.Errorf("scan brain model: %w", err) } m.DisplayName = nullStringToPtr(display) m.IsActive = intToBool(active) m.IsDefault = intToBool(def) models = append(models, m) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain models: %w", err) } return models, nil } // GetBrainModelByID returns one model by id. func (db *DB) GetBrainModelByID(id string) (*BrainModel, error) { row := db.conn.QueryRow(`SELECT id, provider_id, name, display_name, is_active, is_default, created_at, updated_at FROM brain_models WHERE id = ?`, id) var m BrainModel var display sql.NullString var active, def int if err := row.Scan(&m.ID, &m.ProviderID, &m.Name, &display, &active, &def, &m.CreatedAt, &m.UpdatedAt); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get brain model by id: %w", err) } m.DisplayName = nullStringToPtr(display) m.IsActive = intToBool(active) m.IsDefault = intToBool(def) return &m, nil } // EnsureBrainModel inserts or updates a model by provider+name and returns its id. func (db *DB) EnsureBrainModel(providerID, name, displayName string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) var existingID string row := db.conn.QueryRow(`SELECT id FROM brain_models WHERE provider_id = ? AND name = ?`, providerID, name) if err := row.Scan(&existingID); err != nil && err != sql.ErrNoRows { return "", fmt.Errorf("lookup brain model: %w", err) } if existingID != "" { if _, err := db.conn.Exec(`UPDATE brain_models SET display_name = ?, updated_at = ? WHERE id = ?`, nullableString(displayName), now, existingID); err != nil { return "", fmt.Errorf("update brain model: %w", err) } return existingID, nil } id := uuid.NewString() if _, err := db.conn.Exec( `INSERT INTO brain_models (id, provider_id, name, display_name, is_active, is_default, created_at, updated_at) VALUES (?, ?, ?, ?, 0, 0, ?, ?)`, id, providerID, strings.TrimSpace(name), nullableString(displayName), now, now, ); err != nil { return "", fmt.Errorf("insert brain model: %w", err) } return id, nil } // UpdateBrainModel updates model flags and display name. func (db *DB) UpdateBrainModel(id string, displayName string, isActive, isDefault bool) error { now := time.Now().UTC().Format(time.RFC3339) tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin update brain model: %w", err) } defer tx.Rollback() var providerID string if err := tx.QueryRow(`SELECT provider_id FROM brain_models WHERE id = ?`, id).Scan(&providerID); err != nil { if err == sql.ErrNoRows { return nil } return fmt.Errorf("load model provider: %w", err) } if isDefault { if _, err := tx.Exec(`UPDATE brain_models SET is_default = 0, updated_at = ? WHERE provider_id = ?`, now, providerID); err != nil { return fmt.Errorf("clear default model: %w", err) } } if _, err := tx.Exec( `UPDATE brain_models SET display_name = ?, is_active = ?, is_default = ?, updated_at = ? WHERE id = ?`, nullableString(displayName), boolToInt(isActive), boolToInt(isDefault), now, id, ); err != nil { return fmt.Errorf("update brain model: %w", err) } if err := tx.Commit(); err != nil { return fmt.Errorf("commit update brain model: %w", err) } return nil } // SetBrainModelActive updates active flag for a model without touching default flag. func (db *DB) SetBrainModelActive(id string, isActive bool) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE brain_models SET is_active = ?, updated_at = ? WHERE id = ?`, boolToInt(isActive), now, id, ); err != nil { return fmt.Errorf("set brain model active: %w", err) } return nil } // ClearDefaultBrainModelsByProvider clears default flag for all models under one provider. func (db *DB) ClearDefaultBrainModelsByProvider(providerID string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE brain_models SET is_default = 0, updated_at = ? WHERE provider_id = ?`, now, providerID, ); err != nil { return fmt.Errorf("clear default brain models by provider: %w", err) } return nil } // ClearDefaultBrainModelByProviderExcept keeps one default and clears others for the same provider. func (db *DB) ClearDefaultBrainModelByProviderExcept(providerID, keepModelID string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec( `UPDATE brain_models SET is_default = 0, updated_at = ? WHERE provider_id = ? AND id <> ?`, now, providerID, keepModelID, ); err != nil { return fmt.Errorf("clear default brain model except: %w", err) } return nil } // GetDefaultBrainModelRuntime returns the default active model and provider. func (db *DB) GetDefaultBrainModelRuntime() (*BrainModelRuntime, error) { row := db.conn.QueryRow(` SELECT m.id, m.name, p.id, p.name, p.kind, p.base_url, p.encrypted_api_key, m.is_active, p.is_active FROM brain_models m JOIN brain_providers p ON p.id = m.provider_id WHERE m.is_active = 1 AND p.is_active = 1 ORDER BY m.is_default DESC, p.is_default DESC, m.created_at ASC LIMIT 1 `) var rt BrainModelRuntime var baseURL, encrypted sql.NullString var mActive, pActive int if err := row.Scan(&rt.ModelID, &rt.ModelName, &rt.ProviderID, &rt.ProviderName, &rt.ProviderKind, &baseURL, &encrypted, &mActive, &pActive); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get default brain model runtime: %w", err) } if baseURL.Valid { rt.ProviderBaseURL = &baseURL.String } if encrypted.Valid { rt.ProviderEncryptedKey = &encrypted.String } rt.ModelActive = intToBool(mActive) rt.ProviderActive = intToBool(pActive) return &rt, nil } // GetBrainModelRuntimeByID returns model/provider runtime config. func (db *DB) GetBrainModelRuntimeByID(modelID string) (*BrainModelRuntime, error) { row := db.conn.QueryRow(` SELECT m.id, m.name, p.id, p.name, p.kind, p.base_url, p.encrypted_api_key, m.is_active, p.is_active FROM brain_models m JOIN brain_providers p ON p.id = m.provider_id WHERE m.id = ? LIMIT 1 `, modelID) var rt BrainModelRuntime var baseURL, encrypted sql.NullString var mActive, pActive int if err := row.Scan(&rt.ModelID, &rt.ModelName, &rt.ProviderID, &rt.ProviderName, &rt.ProviderKind, &baseURL, &encrypted, &mActive, &pActive); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get brain model runtime by id: %w", err) } if baseURL.Valid { rt.ProviderBaseURL = &baseURL.String } if encrypted.Valid { rt.ProviderEncryptedKey = &encrypted.String } rt.ModelActive = intToBool(mActive) rt.ProviderActive = intToBool(pActive) return &rt, nil } // GetBrainSkills lists all skills. func (db *DB) GetBrainSkills() ([]BrainSkill, error) { rows, err := db.conn.Query(`SELECT id, name, content, is_active, is_default, created_by, created_at, updated_at FROM brain_skills ORDER BY is_default DESC, updated_at DESC`) if err != nil { return nil, fmt.Errorf("get brain skills: %w", err) } defer rows.Close() skills := make([]BrainSkill, 0) for rows.Next() { var s BrainSkill var active, def int var createdBy sql.NullString if err := rows.Scan(&s.ID, &s.Name, &s.Content, &active, &def, &createdBy, &s.CreatedAt, &s.UpdatedAt); err != nil { return nil, fmt.Errorf("scan brain skill: %w", err) } s.IsActive = intToBool(active) s.IsDefault = intToBool(def) s.CreatedBy = nullStringToPtr(createdBy) skills = append(skills, s) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain skills: %w", err) } return skills, nil } // GetActiveBrainSkill returns the active skill content. func (db *DB) GetActiveBrainSkill() (*BrainSkill, error) { row := db.conn.QueryRow(`SELECT id, name, content, is_active, is_default, created_by, created_at, updated_at FROM brain_skills WHERE is_active = 1 ORDER BY is_default DESC, updated_at DESC LIMIT 1`) var s BrainSkill var active, def int var createdBy sql.NullString if err := row.Scan(&s.ID, &s.Name, &s.Content, &active, &def, &createdBy, &s.CreatedAt, &s.UpdatedAt); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get active brain skill: %w", err) } s.IsActive = intToBool(active) s.IsDefault = intToBool(def) s.CreatedBy = nullStringToPtr(createdBy) return &s, nil } // GetBrainSkillByID returns one skill by id. func (db *DB) GetBrainSkillByID(id string) (*BrainSkill, error) { row := db.conn.QueryRow(`SELECT id, name, content, is_active, is_default, created_by, created_at, updated_at FROM brain_skills WHERE id = ?`, id) var s BrainSkill var active, def int var createdBy sql.NullString if err := row.Scan(&s.ID, &s.Name, &s.Content, &active, &def, &createdBy, &s.CreatedAt, &s.UpdatedAt); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get brain skill by id: %w", err) } s.IsActive = intToBool(active) s.IsDefault = intToBool(def) s.CreatedBy = nullStringToPtr(createdBy) return &s, nil } // UpsertDefaultBrainSkill stores a default skill, creating one if needed. func (db *DB) UpsertDefaultBrainSkill(name, content, createdBy string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) var existingID string err := db.conn.QueryRow(`SELECT id FROM brain_skills WHERE is_default = 1 LIMIT 1`).Scan(&existingID) if err != nil && err != sql.ErrNoRows { return "", fmt.Errorf("lookup default brain skill: %w", err) } if existingID == "" { id := uuid.NewString() if _, err := db.conn.Exec(`INSERT INTO brain_skills (id, name, content, is_active, is_default, created_by, created_at, updated_at) VALUES (?, ?, ?, 1, 1, ?, ?, ?)`, id, name, content, nullableString(createdBy), now, now); err != nil { return "", fmt.Errorf("insert default brain skill: %w", err) } return id, nil } if _, err := db.conn.Exec(`UPDATE brain_skills SET name = ?, content = ?, is_active = 1, updated_at = ? WHERE id = ?`, name, content, now, existingID); err != nil { return "", fmt.Errorf("update default brain skill: %w", err) } return existingID, nil } // UpdateBrainSkill updates skill content and active/default flags. func (db *DB) UpdateBrainSkill(id, name, content string, isActive, isDefault bool) error { now := time.Now().UTC().Format(time.RFC3339) tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin update brain skill: %w", err) } defer tx.Rollback() if isDefault { if _, err := tx.Exec(`UPDATE brain_skills SET is_default = 0, updated_at = ?`, now); err != nil { return fmt.Errorf("clear default brain skill: %w", err) } } if isActive { if _, err := tx.Exec(`UPDATE brain_skills SET is_active = 0, updated_at = ?`, now); err != nil { return fmt.Errorf("clear active brain skill: %w", err) } } if _, err := tx.Exec(`UPDATE brain_skills SET name = ?, content = ?, is_active = ?, is_default = ?, updated_at = ? WHERE id = ?`, name, content, boolToInt(isActive), boolToInt(isDefault), now, id); err != nil { return fmt.Errorf("update brain skill: %w", err) } if err := tx.Commit(); err != nil { return fmt.Errorf("commit update brain skill: %w", err) } return nil } // CreateBrainSkill creates a new skill. func (db *DB) CreateBrainSkill(name, content, createdBy string, isActive, isDefault bool) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() tx, err := db.conn.Begin() if err != nil { return "", fmt.Errorf("begin create brain skill: %w", err) } defer tx.Rollback() if isDefault { if _, err := tx.Exec(`UPDATE brain_skills SET is_default = 0, updated_at = ?`, now); err != nil { return "", fmt.Errorf("clear default brain skill: %w", err) } } if isActive { if _, err := tx.Exec(`UPDATE brain_skills SET is_active = 0, updated_at = ?`, now); err != nil { return "", fmt.Errorf("clear active brain skill: %w", err) } } if _, err := tx.Exec(`INSERT INTO brain_skills (id, name, content, is_active, is_default, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, id, name, content, boolToInt(isActive), boolToInt(isDefault), nullableString(createdBy), now, now); err != nil { return "", fmt.Errorf("insert brain skill: %w", err) } if err := tx.Commit(); err != nil { return "", fmt.Errorf("commit create brain skill: %w", err) } return id, nil } // GetBrainChatsByUser returns chats for a user scoped to connection. func (db *DB) GetBrainChatsByUser(username, connectionID string, includeArchived bool) ([]BrainChat, error) { query := `SELECT id, connection_id, username, title, provider_id, model_id, archived, last_message_at, context_database, context_table, context_tables, created_at, updated_at FROM brain_chats WHERE username = ? AND connection_id = ?` args := []interface{}{username, connectionID} if !includeArchived { query += ` AND archived = 0` } query += ` ORDER BY COALESCE(last_message_at, updated_at) DESC` rows, err := db.conn.Query(query, args...) if err != nil { return nil, fmt.Errorf("get brain chats by user: %w", err) } defer rows.Close() chats := make([]BrainChat, 0) for rows.Next() { chat, err := scanBrainChat(rows) if err != nil { return nil, err } chats = append(chats, chat) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain chats: %w", err) } return chats, nil } func scanBrainChat(scanner interface { Scan(dest ...interface{}) error }) (BrainChat, error) { var c BrainChat var providerID, modelID, lastMessageAt, ctxDB, ctxTable, ctxTables sql.NullString var archived int if err := scanner.Scan(&c.ID, &c.ConnectionID, &c.Username, &c.Title, &providerID, &modelID, &archived, &lastMessageAt, &ctxDB, &ctxTable, &ctxTables, &c.CreatedAt, &c.UpdatedAt); err != nil { return BrainChat{}, fmt.Errorf("scan brain chat: %w", err) } c.ProviderID = nullStringToPtr(providerID) c.ModelID = nullStringToPtr(modelID) c.LastMessageAt = nullStringToPtr(lastMessageAt) c.ContextDatabase = nullStringToPtr(ctxDB) c.ContextTable = nullStringToPtr(ctxTable) c.ContextTables = nullStringToPtr(ctxTables) c.Archived = intToBool(archived) return c, nil } // GetBrainChatByIDForUser loads one chat if owned by user. func (db *DB) GetBrainChatByIDForUser(chatID, username string) (*BrainChat, error) { row := db.conn.QueryRow(`SELECT id, connection_id, username, title, provider_id, model_id, archived, last_message_at, context_database, context_table, context_tables, created_at, updated_at FROM brain_chats WHERE id = ? AND username = ?`, chatID, username) var c BrainChat var providerID, modelID, lastMessageAt, ctxDB, ctxTable, ctxTables sql.NullString var archived int if err := row.Scan(&c.ID, &c.ConnectionID, &c.Username, &c.Title, &providerID, &modelID, &archived, &lastMessageAt, &ctxDB, &ctxTable, &ctxTables, &c.CreatedAt, &c.UpdatedAt); err != nil { if err == sql.ErrNoRows { return nil, nil } return nil, fmt.Errorf("get brain chat by id for user: %w", err) } c.ProviderID = nullStringToPtr(providerID) c.ModelID = nullStringToPtr(modelID) c.LastMessageAt = nullStringToPtr(lastMessageAt) c.ContextDatabase = nullStringToPtr(ctxDB) c.ContextTable = nullStringToPtr(ctxTable) c.ContextTables = nullStringToPtr(ctxTables) c.Archived = intToBool(archived) return &c, nil } // CreateBrainChat creates a chat thread. func (db *DB) CreateBrainChat(username, connectionID, title, providerID, modelID, contextDatabase, contextTable, contextTables string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() if strings.TrimSpace(title) == "" { title = "New Chat" } if _, err := db.conn.Exec( `INSERT INTO brain_chats (id, connection_id, username, title, provider_id, model_id, archived, last_message_at, context_database, context_table, context_tables, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, 0, NULL, ?, ?, ?, ?, ?)`, id, connectionID, username, strings.TrimSpace(title), nullableString(providerID), nullableString(modelID), nullableString(contextDatabase), nullableString(contextTable), nullableString(contextTables), now, now, ); err != nil { return "", fmt.Errorf("create brain chat: %w", err) } return id, nil } // UpdateBrainChat updates mutable chat properties. func (db *DB) UpdateBrainChat(chatID, title, providerID, modelID string, archived bool, contextDatabase, contextTable, contextTables string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec(`UPDATE brain_chats SET title = ?, provider_id = ?, model_id = ?, archived = ?, context_database = ?, context_table = ?, context_tables = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(title), nullableString(providerID), nullableString(modelID), boolToInt(archived), nullableString(contextDatabase), nullableString(contextTable), nullableString(contextTables), now, chatID); err != nil { return fmt.Errorf("update brain chat: %w", err) } return nil } // TouchBrainChat updates last activity timestamp. func (db *DB) TouchBrainChat(chatID string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec(`UPDATE brain_chats SET last_message_at = ?, updated_at = ? WHERE id = ?`, now, now, chatID); err != nil { return fmt.Errorf("touch brain chat: %w", err) } return nil } // DeleteBrainChat deletes a chat. func (db *DB) DeleteBrainChat(chatID string) error { if _, err := db.conn.Exec(`DELETE FROM brain_chats WHERE id = ?`, chatID); err != nil { return fmt.Errorf("delete brain chat: %w", err) } return nil } // GetBrainMessages lists all messages in a chat. func (db *DB) GetBrainMessages(chatID string) ([]BrainMessage, error) { rows, err := db.conn.Query(`SELECT id, chat_id, role, content, status, error, created_at, updated_at FROM brain_messages WHERE chat_id = ? ORDER BY created_at ASC`, chatID) if err != nil { return nil, fmt.Errorf("get brain messages: %w", err) } defer rows.Close() messages := make([]BrainMessage, 0) for rows.Next() { var m BrainMessage var msgErr sql.NullString if err := rows.Scan(&m.ID, &m.ChatID, &m.Role, &m.Content, &m.Status, &msgErr, &m.CreatedAt, &m.UpdatedAt); err != nil { return nil, fmt.Errorf("scan brain message: %w", err) } m.Error = nullStringToPtr(msgErr) messages = append(messages, m) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain messages: %w", err) } return messages, nil } // CreateBrainMessage creates one message. func (db *DB) CreateBrainMessage(chatID, role, content, status, errorText string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() if strings.TrimSpace(status) == "" { status = "complete" } if _, err := db.conn.Exec(`INSERT INTO brain_messages (id, chat_id, role, content, status, error, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, id, chatID, strings.TrimSpace(role), content, strings.TrimSpace(status), nullableString(errorText), now, now); err != nil { return "", fmt.Errorf("create brain message: %w", err) } return id, nil } // UpdateBrainMessage updates generated content/state. func (db *DB) UpdateBrainMessage(id, content, status, errorText string) error { now := time.Now().UTC().Format(time.RFC3339) if _, err := db.conn.Exec(`UPDATE brain_messages SET content = ?, status = ?, error = ?, updated_at = ? WHERE id = ?`, content, strings.TrimSpace(status), nullableString(errorText), now, id); err != nil { return fmt.Errorf("update brain message: %w", err) } return nil } // CreateBrainArtifact stores a generated artifact. func (db *DB) CreateBrainArtifact(chatID, messageID, artifactType, title, content, createdBy string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() if _, err := db.conn.Exec(`INSERT INTO brain_artifacts (id, chat_id, message_id, artifact_type, title, content, created_by, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, id, chatID, nullableString(messageID), strings.TrimSpace(artifactType), strings.TrimSpace(title), content, nullableString(createdBy), now); err != nil { return "", fmt.Errorf("create brain artifact: %w", err) } return id, nil } // GetBrainArtifacts lists artifacts for a chat. func (db *DB) GetBrainArtifacts(chatID string) ([]BrainArtifact, error) { rows, err := db.conn.Query(`SELECT id, chat_id, message_id, artifact_type, title, content, created_by, created_at FROM brain_artifacts WHERE chat_id = ? ORDER BY created_at DESC`, chatID) if err != nil { return nil, fmt.Errorf("get brain artifacts: %w", err) } defer rows.Close() artifacts := make([]BrainArtifact, 0) for rows.Next() { var a BrainArtifact var messageID, createdBy sql.NullString if err := rows.Scan(&a.ID, &a.ChatID, &messageID, &a.Type, &a.Title, &a.Content, &createdBy, &a.CreatedAt); err != nil { return nil, fmt.Errorf("scan brain artifact: %w", err) } a.MessageID = nullStringToPtr(messageID) a.CreatedBy = nullStringToPtr(createdBy) artifacts = append(artifacts, a) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain artifacts: %w", err) } return artifacts, nil } // CreateBrainToolCall stores a tool execution trace. func (db *DB) CreateBrainToolCall(chatID, messageID, toolName, inputJSON, outputJSON, status, errorText string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() if _, err := db.conn.Exec(`INSERT INTO brain_tool_calls (id, chat_id, message_id, tool_name, input_json, output_json, status, error, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, chatID, messageID, toolName, inputJSON, outputJSON, status, nullableString(errorText), now); err != nil { return "", fmt.Errorf("create brain tool call: %w", err) } return id, nil } // GetBrainModelsWithProvider returns active models and provider metadata for UI pickers. func (db *DB) GetBrainModelsWithProvider(activeOnly bool) ([]map[string]interface{}, error) { query := ` SELECT m.id, m.name, COALESCE(m.display_name, ''), m.provider_id, p.name, p.kind, m.is_active, m.is_default, p.is_active, p.is_default FROM brain_models m JOIN brain_providers p ON p.id = m.provider_id ` if activeOnly { query += ` WHERE m.is_active = 1 AND p.is_active = 1` } query += ` ORDER BY m.is_default DESC, p.is_default DESC, p.name ASC, m.name ASC` rows, err := db.conn.Query(query) if err != nil { return nil, fmt.Errorf("get brain models with provider: %w", err) } defer rows.Close() items := make([]map[string]interface{}, 0) for rows.Next() { var modelID, modelName, display, providerID, providerName, providerKind string var modelActive, modelDefault, providerActive, providerDefault int if err := rows.Scan(&modelID, &modelName, &display, &providerID, &providerName, &providerKind, &modelActive, &modelDefault, &providerActive, &providerDefault); err != nil { return nil, fmt.Errorf("scan brain model picker row: %w", err) } items = append(items, map[string]interface{}{ "id": modelID, "name": modelName, "display_name": display, "provider_id": providerID, "provider_name": providerName, "provider_kind": providerKind, "is_active": intToBool(modelActive), "is_default": intToBool(modelDefault), "provider_active": intToBool(providerActive), "provider_default": intToBool(providerDefault), }) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate brain model picker rows: %w", err) } return items, nil } ================================================ FILE: internal/database/cleanup.go ================================================ package database import ( "log/slog" "time" ) // StartCleanupJobs launches background goroutines that periodically clean up // expired sessions and expired rate limits. func (db *DB) StartCleanupJobs() { slog.Info("Starting periodic cleanup jobs...") // Cleanup expired sessions (every 1 hour) go func() { ticker := time.NewTicker(1 * time.Hour) defer ticker.Stop() for range ticker.C { db.cleanupExpiredSessions() } }() // Cleanup expired rate limits (every 10 minutes) go func() { ticker := time.NewTicker(10 * time.Minute) defer ticker.Stop() for range ticker.C { db.cleanupRateLimits() } }() slog.Info("Cleanup jobs scheduled") } // cleanupExpiredSessions removes sessions that have passed their expiration time. func (db *DB) cleanupExpiredSessions() { now := time.Now().UTC().Format(time.RFC3339) result, err := db.conn.Exec("DELETE FROM sessions WHERE expires_at < ?", now) if err != nil { slog.Error("Failed to cleanup expired sessions", "error", err) return } if affected, _ := result.RowsAffected(); affected > 0 { slog.Info("Cleaned up expired sessions", "count", affected) } } // cleanupRateLimits removes expired rate limit entries (15-minute window). func (db *DB) cleanupRateLimits() { const windowMs int64 = 15 * 60 * 1000 // 15 minutes cleaned, err := db.CleanupExpiredRateLimits(windowMs) if err != nil { slog.Error("Failed to cleanup rate limits", "error", err) return } if cleaned > 0 { slog.Info("Cleaned up expired rate limits", "count", cleaned) } } ================================================ FILE: internal/database/connections.go ================================================ package database import ( "context" "database/sql" "encoding/json" "fmt" "time" "github.com/google/uuid" ) // Connection represents a connection record (agent or embedded). type Connection struct { ID string `json:"id"` Name string `json:"name"` TunnelToken string `json:"tunnel_token"` IsEmbedded bool `json:"is_embedded"` Status string `json:"status"` LastSeenAt *string `json:"last_seen_at"` HostInfoJSON *string `json:"host_info"` CreatedAt string `json:"created_at"` } // HostInfo represents the host machine metrics reported by the tunnel agent. type HostInfo struct { Hostname string `json:"hostname"` OS string `json:"os"` Arch string `json:"arch"` CPUCores int `json:"cpu_cores"` MemoryTotal int64 `json:"memory_total"` MemoryFree int64 `json:"memory_free"` DiskTotal int64 `json:"disk_total"` DiskFree int64 `json:"disk_free"` GoVersion string `json:"go_version"` AgentUptime float64 `json:"agent_uptime"` CollectedAt string `json:"collected_at"` } // GetConnections retrieves all connections ordered by creation date. func (db *DB) GetConnections() ([]Connection, error) { rows, err := db.conn.Query( "SELECT id, name, tunnel_token, is_embedded, status, last_seen_at, host_info, created_at FROM connections ORDER BY created_at ASC", ) if err != nil { return nil, fmt.Errorf("get connections: %w", err) } defer rows.Close() var conns []Connection for rows.Next() { var c Connection var lastSeenAt, hostInfo sql.NullString var isEmbedded int if err := rows.Scan(&c.ID, &c.Name, &c.TunnelToken, &isEmbedded, &c.Status, &lastSeenAt, &hostInfo, &c.CreatedAt); err != nil { return nil, fmt.Errorf("scan connection: %w", err) } c.IsEmbedded = isEmbedded == 1 c.LastSeenAt = nullStringToPtr(lastSeenAt) c.HostInfoJSON = nullStringToPtr(hostInfo) conns = append(conns, c) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate connection rows: %w", err) } return conns, nil } // GetConnectionByToken retrieves a connection by its tunnel token. func (db *DB) GetConnectionByToken(token string) (*Connection, error) { row := db.conn.QueryRow( "SELECT id, name, tunnel_token, is_embedded, status, last_seen_at, host_info, created_at FROM connections WHERE tunnel_token = ?", token, ) var c Connection var lastSeenAt, hostInfo sql.NullString var isEmbedded int err := row.Scan( &c.ID, &c.Name, &c.TunnelToken, &isEmbedded, &c.Status, &lastSeenAt, &hostInfo, &c.CreatedAt, ) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get connection by token: %w", err) } c.IsEmbedded = isEmbedded == 1 c.LastSeenAt = nullStringToPtr(lastSeenAt) c.HostInfoJSON = nullStringToPtr(hostInfo) return &c, nil } // GetConnectionByTokenCtx retrieves a connection by its tunnel token using a context. // This is used by tunnel auth to avoid hanging while SQLite is busy. func (db *DB) GetConnectionByTokenCtx(ctx context.Context, token string) (*Connection, error) { row := db.conn.QueryRowContext(ctx, "SELECT id, name, tunnel_token, is_embedded, status, last_seen_at, host_info, created_at FROM connections WHERE tunnel_token = ?", token, ) var c Connection var lastSeenAt, hostInfo sql.NullString var isEmbedded int err := row.Scan( &c.ID, &c.Name, &c.TunnelToken, &isEmbedded, &c.Status, &lastSeenAt, &hostInfo, &c.CreatedAt, ) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get connection by token: %w", err) } c.IsEmbedded = isEmbedded == 1 c.LastSeenAt = nullStringToPtr(lastSeenAt) c.HostInfoJSON = nullStringToPtr(hostInfo) return &c, nil } // GetConnectionByID retrieves a connection by its ID. func (db *DB) GetConnectionByID(id string) (*Connection, error) { row := db.conn.QueryRow( "SELECT id, name, tunnel_token, is_embedded, status, last_seen_at, host_info, created_at FROM connections WHERE id = ?", id, ) var c Connection var lastSeenAt, hostInfo sql.NullString var isEmbedded int err := row.Scan(&c.ID, &c.Name, &c.TunnelToken, &isEmbedded, &c.Status, &lastSeenAt, &hostInfo, &c.CreatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get connection by id: %w", err) } c.IsEmbedded = isEmbedded == 1 c.LastSeenAt = nullStringToPtr(lastSeenAt) c.HostInfoJSON = nullStringToPtr(hostInfo) return &c, nil } // GetConnectionCount returns the total number of connections. func (db *DB) GetConnectionCount() (int, error) { var count int err := db.conn.QueryRow("SELECT COUNT(*) FROM connections").Scan(&count) if err != nil { return 0, fmt.Errorf("get connection count: %w", err) } return count, nil } // CreateConnection creates a new connection and returns its ID. func (db *DB) CreateConnection(name, token string, isEmbedded bool) (string, error) { id := uuid.NewString() embedded := 0 if isEmbedded { embedded = 1 } _, err := db.conn.Exec( "INSERT INTO connections (id, name, tunnel_token, is_embedded) VALUES (?, ?, ?, ?)", id, name, token, embedded, ) if err != nil { return "", fmt.Errorf("create connection: %w", err) } return id, nil } // UpdateConnectionStatus updates the status and last_seen_at of a connection. func (db *DB) UpdateConnectionStatus(id, status string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( "UPDATE connections SET status = ?, last_seen_at = ? WHERE id = ?", status, now, id, ) if err != nil { return fmt.Errorf("update connection status: %w", err) } return nil } // DeleteConnection deletes a connection by its ID. func (db *DB) DeleteConnection(id string) error { _, err := db.conn.Exec("DELETE FROM connections WHERE id = ?", id) if err != nil { return fmt.Errorf("delete connection: %w", err) } return nil } // UpdateConnectionToken updates the tunnel token for a connection. func (db *DB) UpdateConnectionToken(id, newToken string) error { _, err := db.conn.Exec("UPDATE connections SET tunnel_token = ? WHERE id = ?", newToken, id) if err != nil { return fmt.Errorf("update connection token: %w", err) } return nil } // UpdateConnectionName updates the display name for a connection. func (db *DB) UpdateConnectionName(id, newName string) error { _, err := db.conn.Exec("UPDATE connections SET name = ? WHERE id = ?", newName, id) if err != nil { return fmt.Errorf("update connection name: %w", err) } return nil } // UpdateConnectionHostInfo stores the host info JSON for a connection. func (db *DB) UpdateConnectionHostInfo(connId string, info HostInfo) error { data, err := json.Marshal(info) if err != nil { return fmt.Errorf("marshal host info: %w", err) } _, err = db.conn.Exec( "UPDATE connections SET host_info = ? WHERE id = ?", string(data), connId, ) if err != nil { return fmt.Errorf("update connection host info: %w", err) } return nil } // GetConnectionHostInfo retrieves the parsed host info for a connection. func (db *DB) GetConnectionHostInfo(connId string) (*HostInfo, error) { var hostInfoStr sql.NullString err := db.conn.QueryRow( "SELECT host_info FROM connections WHERE id = ?", connId, ).Scan(&hostInfoStr) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get connection host info: %w", err) } if !hostInfoStr.Valid || hostInfoStr.String == "" { return nil, nil } var info HostInfo if err := json.Unmarshal([]byte(hostInfoStr.String), &info); err != nil { return nil, nil } return &info, nil } // GetEmbeddedConnection retrieves the embedded connection (if any). func (db *DB) GetEmbeddedConnection() (*Connection, error) { row := db.conn.QueryRow( "SELECT id, name, tunnel_token, is_embedded, status, last_seen_at, host_info, created_at FROM connections WHERE is_embedded = 1 LIMIT 1", ) var c Connection var lastSeenAt, hostInfo sql.NullString var isEmbedded int err := row.Scan(&c.ID, &c.Name, &c.TunnelToken, &isEmbedded, &c.Status, &lastSeenAt, &hostInfo, &c.CreatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get embedded connection: %w", err) } c.IsEmbedded = isEmbedded == 1 c.LastSeenAt = nullStringToPtr(lastSeenAt) c.HostInfoJSON = nullStringToPtr(hostInfo) return &c, nil } ================================================ FILE: internal/database/dashboards.go ================================================ package database import ( "database/sql" "fmt" "time" "github.com/google/uuid" ) const ( systemDashboardName = "System Overview" systemDashboardDescription = "Built-in operational dashboard for ClickHouse health and query performance." systemDashboardCreatedBy = "system" ) // Dashboard represents a dashboard record. type Dashboard struct { ID string `json:"id"` Name string `json:"name"` Description *string `json:"description"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // Panel represents a dashboard panel. type Panel struct { ID string `json:"id"` DashboardID string `json:"dashboard_id"` Name string `json:"name"` PanelType string `json:"panel_type"` Query string `json:"query"` ConnectionID *string `json:"connection_id"` Config string `json:"config"` LayoutX int `json:"layout_x"` LayoutY int `json:"layout_y"` LayoutW int `json:"layout_w"` LayoutH int `json:"layout_h"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // GetDashboards retrieves all dashboards. func (db *DB) GetDashboards() ([]Dashboard, error) { rows, err := db.conn.Query( `SELECT id, name, description, created_by, created_at, updated_at FROM dashboards ORDER BY updated_at DESC`, ) if err != nil { return nil, fmt.Errorf("get dashboards: %w", err) } defer rows.Close() var dashboards []Dashboard for rows.Next() { var d Dashboard var desc, createdBy sql.NullString if err := rows.Scan(&d.ID, &d.Name, &desc, &createdBy, &d.CreatedAt, &d.UpdatedAt); err != nil { return nil, fmt.Errorf("scan dashboard: %w", err) } d.Description = nullStringToPtr(desc) d.CreatedBy = nullStringToPtr(createdBy) dashboards = append(dashboards, d) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate dashboard rows: %w", err) } return dashboards, nil } // GetDashboardByID retrieves a dashboard by ID. func (db *DB) GetDashboardByID(id string) (*Dashboard, error) { row := db.conn.QueryRow( `SELECT id, name, description, created_by, created_at, updated_at FROM dashboards WHERE id = ?`, id, ) var d Dashboard var desc, createdBy sql.NullString err := row.Scan(&d.ID, &d.Name, &desc, &createdBy, &d.CreatedAt, &d.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get dashboard by id: %w", err) } d.Description = nullStringToPtr(desc) d.CreatedBy = nullStringToPtr(createdBy) return &d, nil } // CreateDashboard creates a new dashboard and returns its ID. func (db *DB) CreateDashboard(name, description, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var desc, creator interface{} if description != "" { desc = description } if createdBy != "" { creator = createdBy } _, err := db.conn.Exec( `INSERT INTO dashboards (id, name, description, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)`, id, name, desc, creator, now, now, ) if err != nil { return "", fmt.Errorf("create dashboard: %w", err) } return id, nil } // UpdateDashboard updates a dashboard's name and description. func (db *DB) UpdateDashboard(id, name, description string) error { now := time.Now().UTC().Format(time.RFC3339) var desc interface{} if description != "" { desc = description } _, err := db.conn.Exec( "UPDATE dashboards SET name = ?, description = ?, updated_at = ? WHERE id = ?", name, desc, now, id, ) if err != nil { return fmt.Errorf("update dashboard: %w", err) } return nil } // DeleteDashboard deletes a dashboard and all its panels (cascade). func (db *DB) DeleteDashboard(id string) error { _, err := db.conn.Exec("DELETE FROM dashboards WHERE id = ?", id) if err != nil { return fmt.Errorf("delete dashboard: %w", err) } return nil } // GetPanelsByDashboard retrieves all panels for a dashboard. func (db *DB) GetPanelsByDashboard(dashboardID string) ([]Panel, error) { rows, err := db.conn.Query( `SELECT id, dashboard_id, name, panel_type, query, connection_id, config, layout_x, layout_y, layout_w, layout_h, created_at, updated_at FROM panels WHERE dashboard_id = ? ORDER BY layout_y ASC, layout_x ASC`, dashboardID, ) if err != nil { return nil, fmt.Errorf("get panels by dashboard: %w", err) } defer rows.Close() var panels []Panel for rows.Next() { var p Panel var connID sql.NullString if err := rows.Scan(&p.ID, &p.DashboardID, &p.Name, &p.PanelType, &p.Query, &connID, &p.Config, &p.LayoutX, &p.LayoutY, &p.LayoutW, &p.LayoutH, &p.CreatedAt, &p.UpdatedAt); err != nil { return nil, fmt.Errorf("scan panel: %w", err) } p.ConnectionID = nullStringToPtr(connID) panels = append(panels, p) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate panel rows: %w", err) } return panels, nil } // GetPanelByID retrieves a panel by ID. func (db *DB) GetPanelByID(id string) (*Panel, error) { row := db.conn.QueryRow( `SELECT id, dashboard_id, name, panel_type, query, connection_id, config, layout_x, layout_y, layout_w, layout_h, created_at, updated_at FROM panels WHERE id = ?`, id, ) var p Panel var connID sql.NullString err := row.Scan(&p.ID, &p.DashboardID, &p.Name, &p.PanelType, &p.Query, &connID, &p.Config, &p.LayoutX, &p.LayoutY, &p.LayoutW, &p.LayoutH, &p.CreatedAt, &p.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get panel by id: %w", err) } p.ConnectionID = nullStringToPtr(connID) return &p, nil } // CreatePanel creates a new panel and returns its ID. func (db *DB) CreatePanel(dashboardID, name, panelType, query, connectionID, config string, x, y, w, h int) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var connID interface{} if connectionID != "" { connID = connectionID } if config == "" { config = "{}" } _, err := db.conn.Exec( `INSERT INTO panels (id, dashboard_id, name, panel_type, query, connection_id, config, layout_x, layout_y, layout_w, layout_h, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, dashboardID, name, panelType, query, connID, config, x, y, w, h, now, now, ) if err != nil { return "", fmt.Errorf("create panel: %w", err) } return id, nil } // UpdatePanel updates a panel. func (db *DB) UpdatePanel(id, name, panelType, query, connectionID, config string, x, y, w, h int) error { now := time.Now().UTC().Format(time.RFC3339) var connID interface{} if connectionID != "" { connID = connectionID } _, err := db.conn.Exec( `UPDATE panels SET name = ?, panel_type = ?, query = ?, connection_id = ?, config = ?, layout_x = ?, layout_y = ?, layout_w = ?, layout_h = ?, updated_at = ? WHERE id = ?`, name, panelType, query, connID, config, x, y, w, h, now, id, ) if err != nil { return fmt.Errorf("update panel: %w", err) } return nil } // DeletePanel deletes a panel by ID. func (db *DB) DeletePanel(id string) error { _, err := db.conn.Exec("DELETE FROM panels WHERE id = ?", id) if err != nil { return fmt.Errorf("delete panel: %w", err) } return nil } type seededPanel struct { Name string PanelType string Query string Config string X int Y int W int H int } // EnsureSystemOverviewDashboard creates or updates a built-in default dashboard // with operational ClickHouse metrics. func (db *DB) EnsureSystemOverviewDashboard() error { tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin system dashboard transaction: %w", err) } defer tx.Rollback() now := time.Now().UTC().Format(time.RFC3339) dashboardID := "" if err := tx.QueryRow( `SELECT id FROM dashboards WHERE created_by = ? LIMIT 1`, systemDashboardCreatedBy, ).Scan(&dashboardID); err != nil { if err == sql.ErrNoRows { dashboardID = uuid.NewString() if _, err := tx.Exec( `INSERT INTO dashboards (id, name, description, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?)`, dashboardID, systemDashboardName, systemDashboardDescription, systemDashboardCreatedBy, now, now, ); err != nil { return fmt.Errorf("insert system dashboard: %w", err) } } else { return fmt.Errorf("get system dashboard: %w", err) } } else { if _, err := tx.Exec( `UPDATE dashboards SET name = ?, description = ?, updated_at = ? WHERE id = ?`, systemDashboardName, systemDashboardDescription, now, dashboardID, ); err != nil { return fmt.Errorf("update system dashboard metadata: %w", err) } } panels := []seededPanel{ { Name: "ClickHouse Version", PanelType: "stat", Query: `SELECT version() AS version`, Config: `{"chartType":"stat"}`, X: 0, Y: 0, W: 2, H: 3, }, { Name: "Uptime (seconds)", PanelType: "stat", Query: `SELECT toUInt64(anyIf(value, metric = 'Uptime')) AS uptime_sec FROM system.asynchronous_metrics`, Config: `{"chartType":"stat"}`, X: 2, Y: 0, W: 2, H: 3, }, { Name: "Active Queries", PanelType: "stat", Query: `SELECT count() AS active_queries FROM system.processes`, Config: `{"chartType":"stat"}`, X: 4, Y: 0, W: 2, H: 3, }, { Name: "Connected Users", PanelType: "stat", Query: `SELECT uniq(user) AS connected_users FROM system.processes`, Config: `{"chartType":"stat"}`, X: 6, Y: 0, W: 2, H: 3, }, { Name: "Databases", PanelType: "stat", Query: `SELECT count() AS databases FROM system.databases`, Config: `{"chartType":"stat"}`, X: 8, Y: 0, W: 2, H: 3, }, { Name: "Tables", PanelType: "stat", Query: `SELECT count() AS tables FROM system.tables WHERE database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')`, Config: `{"chartType":"stat"}`, X: 10, Y: 0, W: 2, H: 3, }, { Name: "Queries / interval", PanelType: "timeseries", Query: `SELECT toStartOfInterval(event_time, INTERVAL $__interval second) AS ts, count() AS queries FROM system.query_log WHERE type = 'QueryFinish' AND $__timestamp(event_time) GROUP BY ts ORDER BY ts`, Config: `{"chartType":"timeseries","xColumn":"ts","yColumns":["queries"],"colors":["#F97316"],"legendPosition":"bottom"}`, X: 0, Y: 3, W: 4, H: 5, }, { Name: "P95 Query Latency (ms) / interval", PanelType: "timeseries", Query: `SELECT toStartOfInterval(event_time, INTERVAL $__interval second) AS ts, round(quantile(0.95)(query_duration_ms), 2) AS p95_ms FROM system.query_log WHERE type = 'QueryFinish' AND $__timestamp(event_time) GROUP BY ts ORDER BY ts`, Config: `{"chartType":"timeseries","xColumn":"ts","yColumns":["p95_ms"],"colors":["#EF4444"],"legendPosition":"bottom"}`, X: 4, Y: 3, W: 4, H: 5, }, { Name: "Read MB / interval", PanelType: "timeseries", Query: `SELECT toStartOfInterval(event_time, INTERVAL $__interval second) AS ts, round(sum(read_bytes) / 1048576, 2) AS read_mb FROM system.query_log WHERE type = 'QueryFinish' AND $__timestamp(event_time) GROUP BY ts ORDER BY ts`, Config: `{"chartType":"timeseries","xColumn":"ts","yColumns":["read_mb"],"colors":["#10B981"],"legendPosition":"bottom"}`, X: 8, Y: 3, W: 4, H: 5, }, { Name: "Top Slow Queries", PanelType: "table", Query: `SELECT query_id, user, query_duration_ms, read_rows, formatReadableSize(read_bytes) AS read_size, left(query, 120) AS sample_query FROM system.query_log WHERE type = 'QueryFinish' AND $__timestamp(event_time) ORDER BY query_duration_ms DESC LIMIT 50`, Config: `{"chartType":"table"}`, X: 0, Y: 8, W: 7, H: 6, }, { Name: "Top Tables by Size", PanelType: "table", Query: `SELECT concat(database, '.', table) AS table_name, round(sum(bytes_on_disk) / 1048576, 2) AS size_mb, sum(rows) AS rows FROM system.parts WHERE active GROUP BY table_name ORDER BY size_mb DESC LIMIT 50`, Config: `{"chartType":"table"}`, X: 7, Y: 8, W: 5, H: 6, }, { Name: "Disk Space", PanelType: "table", Query: `SELECT name, path, round(total_space / 1073741824, 2) AS total_gb, round(free_space / 1073741824, 2) AS free_gb, round((total_space - free_space) / 1073741824, 2) AS used_gb FROM system.disks ORDER BY used_gb DESC`, Config: `{"chartType":"table"}`, X: 0, Y: 14, W: 6, H: 5, }, { Name: "Background Operations", PanelType: "table", Query: `SELECT 'merges_running' AS metric, count() AS value FROM system.merges UNION ALL SELECT 'mutations_pending' AS metric, countIf(is_done = 0) AS value FROM system.mutations UNION ALL SELECT 'replication_queue' AS metric, sum(queue_size) AS value FROM system.replicas`, Config: `{"chartType":"table"}`, X: 6, Y: 14, W: 6, H: 5, }, } existing := map[string]string{} rows, err := tx.Query(`SELECT id, name FROM panels WHERE dashboard_id = ?`, dashboardID) if err != nil { return fmt.Errorf("list existing system panels: %w", err) } defer rows.Close() for rows.Next() { var panelID, name string if err := rows.Scan(&panelID, &name); err != nil { return fmt.Errorf("scan existing system panel: %w", err) } existing[name] = panelID } if err := rows.Err(); err != nil { return fmt.Errorf("iterate existing system panels: %w", err) } for _, p := range panels { if panelID, ok := existing[p.Name]; ok { if _, err := tx.Exec( `UPDATE panels SET panel_type = ?, query = ?, connection_id = NULL, config = ?, layout_x = ?, layout_y = ?, layout_w = ?, layout_h = ?, updated_at = ? WHERE id = ?`, p.PanelType, p.Query, p.Config, p.X, p.Y, p.W, p.H, now, panelID, ); err != nil { return fmt.Errorf("update system panel %q: %w", p.Name, err) } } else { if _, err := tx.Exec( `INSERT INTO panels ( id, dashboard_id, name, panel_type, query, connection_id, config, layout_x, layout_y, layout_w, layout_h, created_at, updated_at ) VALUES (?, ?, ?, ?, ?, NULL, ?, ?, ?, ?, ?, ?, ?)`, uuid.NewString(), dashboardID, p.Name, p.PanelType, p.Query, p.Config, p.X, p.Y, p.W, p.H, now, now, ); err != nil { return fmt.Errorf("insert system panel %q: %w", p.Name, err) } } } if err := tx.Commit(); err != nil { return fmt.Errorf("commit system dashboard seed: %w", err) } return nil } ================================================ FILE: internal/database/database.go ================================================ package database import ( "database/sql" "fmt" "log/slog" "os" "path/filepath" "strconv" _ "modernc.org/sqlite" ) // nullStringToPtr converts a sql.NullString to a *string (nil if not valid). func nullStringToPtr(ns sql.NullString) *string { if ns.Valid { return &ns.String } return nil } // DB wraps the SQLite connection. type DB struct { conn *sql.DB path string } // Open opens the SQLite database at the given path, runs migrations, and returns a DB. func Open(path string) (*DB, error) { // Ensure directory exists dir := filepath.Dir(path) if dir != "" && dir != "." { if err := os.MkdirAll(dir, 0755); err != nil { slog.Warn("Could not create database directory", "dir", dir, "error", err) } } dsn := fmt.Sprintf("%s?_pragma=foreign_keys(1)&_pragma=journal_mode(wal)&_pragma=busy_timeout(5000)", path) conn, err := sql.Open("sqlite", dsn) if err != nil { return nil, fmt.Errorf("open sqlite: %w", err) } // SQLite is single-writer, but WAL allows concurrent readers. // Keep a small pool so reads (session/token checks) are not blocked by long sync writes. maxOpenConns := 8 if raw := os.Getenv("CHUI_SQLITE_MAX_OPEN_CONNS"); raw != "" { if parsed, parseErr := strconv.Atoi(raw); parseErr == nil && parsed > 0 { maxOpenConns = parsed } } conn.SetMaxOpenConns(maxOpenConns) conn.SetMaxIdleConns(maxOpenConns) // Verify connection if err := conn.Ping(); err != nil { conn.Close() return nil, fmt.Errorf("ping sqlite: %w", err) } db := &DB{conn: conn, path: path} // Run migrations if err := db.runMigrations(); err != nil { conn.Close() return nil, fmt.Errorf("migrations: %w", err) } slog.Info("Database initialized", "path", path) return db, nil } // Close closes the database connection. func (db *DB) Close() error { return db.conn.Close() } // Conn returns the underlying sql.DB for advanced usage. func (db *DB) Conn() *sql.DB { return db.conn } ================================================ FILE: internal/database/migrations.go ================================================ package database import ( "fmt" "log/slog" "strings" "github.com/google/uuid" ) func (db *DB) runMigrations() error { slog.Info("Running database migrations...") stmts := []string{ // Installation settings (key-value store) `CREATE TABLE IF NOT EXISTS settings ( key TEXT PRIMARY KEY, value TEXT NOT NULL, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, // Connections (replaces tunnel_connections, no org_id) `CREATE TABLE IF NOT EXISTS connections ( id TEXT PRIMARY KEY, name TEXT NOT NULL, tunnel_token TEXT UNIQUE NOT NULL, is_embedded INTEGER DEFAULT 0, status TEXT DEFAULT 'disconnected', last_seen_at TEXT, host_info TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE UNIQUE INDEX IF NOT EXISTS idx_conn_token ON connections(tunnel_token)`, // Sessions (no org_id) `CREATE TABLE IF NOT EXISTS sessions ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, clickhouse_user TEXT NOT NULL, encrypted_password TEXT NOT NULL, token TEXT UNIQUE NOT NULL, expires_at TEXT NOT NULL, user_role TEXT DEFAULT 'viewer', created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE UNIQUE INDEX IF NOT EXISTS idx_session_token ON sessions(token)`, `CREATE INDEX IF NOT EXISTS idx_session_conn ON sessions(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_session_expires ON sessions(expires_at)`, // Rate limits `CREATE TABLE IF NOT EXISTS rate_limits ( identifier TEXT PRIMARY KEY, type TEXT NOT NULL, attempts INTEGER NOT NULL DEFAULT 0, first_attempt_at TEXT NOT NULL, locked_until TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_rate_limit_type ON rate_limits(type)`, `CREATE INDEX IF NOT EXISTS idx_rate_limit_locked ON rate_limits(locked_until)`, // User role overrides `CREATE TABLE IF NOT EXISTS user_roles ( username TEXT PRIMARY KEY, role TEXT NOT NULL DEFAULT 'viewer', created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, // Saved queries (was in ClickHouse, now SQLite) `CREATE TABLE IF NOT EXISTS saved_queries ( id TEXT PRIMARY KEY, name TEXT NOT NULL, description TEXT, query TEXT NOT NULL, connection_id TEXT REFERENCES connections(id) ON DELETE SET NULL, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, // Dashboards (was in ClickHouse, now SQLite) `CREATE TABLE IF NOT EXISTS dashboards ( id TEXT PRIMARY KEY, name TEXT NOT NULL, description TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, // Dashboard panels `CREATE TABLE IF NOT EXISTS panels ( id TEXT PRIMARY KEY, dashboard_id TEXT NOT NULL REFERENCES dashboards(id) ON DELETE CASCADE, name TEXT NOT NULL, panel_type TEXT NOT NULL DEFAULT 'table', query TEXT NOT NULL, connection_id TEXT REFERENCES connections(id) ON DELETE SET NULL, config TEXT DEFAULT '{}', layout_x INTEGER DEFAULT 0, layout_y INTEGER DEFAULT 0, layout_w INTEGER DEFAULT 6, layout_h INTEGER DEFAULT 4, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_panel_dashboard ON panels(dashboard_id)`, // Schedules (was in ClickHouse, now SQLite) `CREATE TABLE IF NOT EXISTS schedules ( id TEXT PRIMARY KEY, name TEXT NOT NULL, saved_query_id TEXT REFERENCES saved_queries(id) ON DELETE CASCADE, connection_id TEXT REFERENCES connections(id) ON DELETE SET NULL, cron TEXT NOT NULL, timezone TEXT DEFAULT 'UTC', enabled INTEGER DEFAULT 1, timeout_ms INTEGER DEFAULT 60000, last_run_at TEXT, next_run_at TEXT, last_status TEXT, last_error TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, // Schedule runs `CREATE TABLE IF NOT EXISTS schedule_runs ( id TEXT PRIMARY KEY, schedule_id TEXT NOT NULL REFERENCES schedules(id) ON DELETE CASCADE, started_at TEXT NOT NULL, finished_at TEXT, status TEXT NOT NULL, rows_affected INTEGER DEFAULT 0, elapsed_ms INTEGER DEFAULT 0, error TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_sched_run_schedule ON schedule_runs(schedule_id)`, // Audit logs (was stub, now real) `CREATE TABLE IF NOT EXISTS audit_logs ( id TEXT PRIMARY KEY, action TEXT NOT NULL, username TEXT, connection_id TEXT, details TEXT, ip_address TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_audit_created ON audit_logs(created_at)`, // Brain providers (admin-managed) `CREATE TABLE IF NOT EXISTS brain_providers ( id TEXT PRIMARY KEY, name TEXT NOT NULL, kind TEXT NOT NULL, base_url TEXT, encrypted_api_key TEXT, is_active INTEGER DEFAULT 1, is_default INTEGER DEFAULT 0, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_brain_provider_active ON brain_providers(is_active)`, // Brain models by provider `CREATE TABLE IF NOT EXISTS brain_models ( id TEXT PRIMARY KEY, provider_id TEXT NOT NULL REFERENCES brain_providers(id) ON DELETE CASCADE, name TEXT NOT NULL, display_name TEXT, is_active INTEGER DEFAULT 1, is_default INTEGER DEFAULT 0, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(provider_id, name) )`, `CREATE INDEX IF NOT EXISTS idx_brain_model_provider ON brain_models(provider_id)`, `CREATE INDEX IF NOT EXISTS idx_brain_model_active ON brain_models(is_active)`, // Brain chats `CREATE TABLE IF NOT EXISTS brain_chats ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, username TEXT NOT NULL, title TEXT NOT NULL, provider_id TEXT REFERENCES brain_providers(id) ON DELETE SET NULL, model_id TEXT REFERENCES brain_models(id) ON DELETE SET NULL, archived INTEGER DEFAULT 0, last_message_at TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_brain_chat_user ON brain_chats(username, connection_id)`, `CREATE INDEX IF NOT EXISTS idx_brain_chat_lastmsg ON brain_chats(last_message_at)`, // Brain messages `CREATE TABLE IF NOT EXISTS brain_messages ( id TEXT PRIMARY KEY, chat_id TEXT NOT NULL REFERENCES brain_chats(id) ON DELETE CASCADE, role TEXT NOT NULL, content TEXT NOT NULL, status TEXT NOT NULL DEFAULT 'complete', error TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_brain_msg_chat ON brain_messages(chat_id, created_at)`, // Brain artifacts `CREATE TABLE IF NOT EXISTS brain_artifacts ( id TEXT PRIMARY KEY, chat_id TEXT NOT NULL REFERENCES brain_chats(id) ON DELETE CASCADE, message_id TEXT REFERENCES brain_messages(id) ON DELETE SET NULL, artifact_type TEXT NOT NULL, title TEXT NOT NULL, content TEXT NOT NULL, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_brain_artifact_chat ON brain_artifacts(chat_id, created_at)`, // Brain tool call traces `CREATE TABLE IF NOT EXISTS brain_tool_calls ( id TEXT PRIMARY KEY, chat_id TEXT NOT NULL REFERENCES brain_chats(id) ON DELETE CASCADE, message_id TEXT NOT NULL REFERENCES brain_messages(id) ON DELETE CASCADE, tool_name TEXT NOT NULL, input_json TEXT NOT NULL, output_json TEXT NOT NULL, status TEXT NOT NULL, error TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_brain_tool_chat ON brain_tool_calls(chat_id, created_at)`, // Brain skills (admin-managed system prompts) `CREATE TABLE IF NOT EXISTS brain_skills ( id TEXT PRIMARY KEY, name TEXT NOT NULL, content TEXT NOT NULL, is_active INTEGER DEFAULT 1, is_default INTEGER DEFAULT 0, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_brain_skill_active ON brain_skills(is_active)`, // ══════════════════════════════════════════════════════════════ // Governance tables (Pro feature) // ══════════════════════════════════════════════════════════════ // Governance sync state (watermark tracking per connection) `CREATE TABLE IF NOT EXISTS gov_sync_state ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, sync_type TEXT NOT NULL, last_synced_at TEXT, watermark TEXT, status TEXT DEFAULT 'idle', last_error TEXT, row_count INTEGER DEFAULT 0, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(connection_id, sync_type) )`, `CREATE INDEX IF NOT EXISTS idx_gov_sync_conn ON gov_sync_state(connection_id)`, // Governance databases `CREATE TABLE IF NOT EXISTS gov_databases ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, name TEXT NOT NULL, engine TEXT, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL, is_deleted INTEGER DEFAULT 0, UNIQUE(connection_id, name) )`, `CREATE INDEX IF NOT EXISTS idx_gov_db_conn ON gov_databases(connection_id)`, // Governance tables `CREATE TABLE IF NOT EXISTS gov_tables ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, database_name TEXT NOT NULL, table_name TEXT NOT NULL, engine TEXT, table_uuid TEXT, total_rows INTEGER DEFAULT 0, total_bytes INTEGER DEFAULT 0, partition_count INTEGER DEFAULT 0, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL, is_deleted INTEGER DEFAULT 0, UNIQUE(connection_id, database_name, table_name) )`, `CREATE INDEX IF NOT EXISTS idx_gov_tbl_conn ON gov_tables(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_tbl_db ON gov_tables(connection_id, database_name)`, // Governance columns `CREATE TABLE IF NOT EXISTS gov_columns ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, database_name TEXT NOT NULL, table_name TEXT NOT NULL, column_name TEXT NOT NULL, column_type TEXT NOT NULL, column_position INTEGER DEFAULT 0, default_kind TEXT, default_expression TEXT, comment TEXT, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL, is_deleted INTEGER DEFAULT 0, UNIQUE(connection_id, database_name, table_name, column_name) )`, `CREATE INDEX IF NOT EXISTS idx_gov_col_conn ON gov_columns(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_col_tbl ON gov_columns(connection_id, database_name, table_name)`, // Governance schema changes `CREATE TABLE IF NOT EXISTS gov_schema_changes ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, change_type TEXT NOT NULL, database_name TEXT NOT NULL, table_name TEXT, column_name TEXT, old_value TEXT, new_value TEXT, detected_at TEXT NOT NULL, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_schema_conn ON gov_schema_changes(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_schema_time ON gov_schema_changes(connection_id, detected_at)`, // Governance query log `CREATE TABLE IF NOT EXISTS gov_query_log ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, query_id TEXT NOT NULL, ch_user TEXT NOT NULL, query_text TEXT NOT NULL, normalized_hash TEXT, query_kind TEXT, event_time TEXT NOT NULL, duration_ms INTEGER DEFAULT 0, read_rows INTEGER DEFAULT 0, read_bytes INTEGER DEFAULT 0, result_rows INTEGER DEFAULT 0, written_rows INTEGER DEFAULT 0, written_bytes INTEGER DEFAULT 0, memory_usage INTEGER DEFAULT 0, tables_used TEXT, is_error INTEGER DEFAULT 0, error_message TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(connection_id, query_id) )`, `CREATE INDEX IF NOT EXISTS idx_gov_qlog_conn ON gov_query_log(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_qlog_time ON gov_query_log(connection_id, event_time)`, `CREATE INDEX IF NOT EXISTS idx_gov_qlog_user ON gov_query_log(connection_id, ch_user)`, `CREATE INDEX IF NOT EXISTS idx_gov_qlog_hash ON gov_query_log(connection_id, normalized_hash)`, // Governance lineage edges `CREATE TABLE IF NOT EXISTS gov_lineage_edges ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, source_database TEXT NOT NULL, source_table TEXT NOT NULL, target_database TEXT NOT NULL, target_table TEXT NOT NULL, query_id TEXT, ch_user TEXT, edge_type TEXT NOT NULL, detected_at TEXT NOT NULL, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_lineage_conn ON gov_lineage_edges(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_lineage_src ON gov_lineage_edges(connection_id, source_database, source_table)`, `CREATE INDEX IF NOT EXISTS idx_gov_lineage_tgt ON gov_lineage_edges(connection_id, target_database, target_table)`, // Governance column-level lineage edges `CREATE TABLE IF NOT EXISTS gov_lineage_column_edges ( id TEXT PRIMARY KEY, lineage_edge_id TEXT NOT NULL REFERENCES gov_lineage_edges(id) ON DELETE CASCADE, connection_id TEXT NOT NULL, source_column TEXT NOT NULL, target_column TEXT NOT NULL, created_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(lineage_edge_id, source_column, target_column) )`, `CREATE INDEX IF NOT EXISTS idx_gov_col_lineage_edge ON gov_lineage_column_edges(lineage_edge_id)`, // Governance sensitivity tags `CREATE TABLE IF NOT EXISTS gov_tags ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, object_type TEXT NOT NULL, database_name TEXT NOT NULL, table_name TEXT NOT NULL, column_name TEXT NOT NULL DEFAULT '', tag TEXT NOT NULL, tagged_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(connection_id, object_type, database_name, table_name, column_name, tag) )`, `CREATE INDEX IF NOT EXISTS idx_gov_tag_conn ON gov_tags(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_tag_obj ON gov_tags(connection_id, database_name, table_name)`, // Governance ClickHouse users `CREATE TABLE IF NOT EXISTS gov_ch_users ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, name TEXT NOT NULL, auth_type TEXT, host_ip TEXT, default_roles TEXT, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL, UNIQUE(connection_id, name) )`, `CREATE INDEX IF NOT EXISTS idx_gov_chuser_conn ON gov_ch_users(connection_id)`, // Governance ClickHouse roles `CREATE TABLE IF NOT EXISTS gov_ch_roles ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, name TEXT NOT NULL, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL, UNIQUE(connection_id, name) )`, `CREATE INDEX IF NOT EXISTS idx_gov_chrole_conn ON gov_ch_roles(connection_id)`, // Governance role grants `CREATE TABLE IF NOT EXISTS gov_role_grants ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, user_name TEXT NOT NULL, granted_role_name TEXT NOT NULL, is_default INTEGER DEFAULT 0, with_admin_option INTEGER DEFAULT 0, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL, UNIQUE(connection_id, user_name, granted_role_name) )`, `CREATE INDEX IF NOT EXISTS idx_gov_rolegrant_conn ON gov_role_grants(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_rolegrant_user ON gov_role_grants(connection_id, user_name)`, `CREATE INDEX IF NOT EXISTS idx_gov_rolegrant_role ON gov_role_grants(connection_id, granted_role_name)`, // Governance grants `CREATE TABLE IF NOT EXISTS gov_grants ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, user_name TEXT, role_name TEXT, access_type TEXT NOT NULL, grant_database TEXT, grant_table TEXT, grant_column TEXT, is_partial_revoke INTEGER DEFAULT 0, grant_option INTEGER DEFAULT 0, first_seen TEXT NOT NULL, last_updated TEXT NOT NULL )`, `CREATE INDEX IF NOT EXISTS idx_gov_grant_conn ON gov_grants(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_grant_user ON gov_grants(connection_id, user_name)`, `CREATE INDEX IF NOT EXISTS idx_gov_grant_role ON gov_grants(connection_id, role_name)`, // Governance access matrix (materialized) `CREATE TABLE IF NOT EXISTS gov_access_matrix ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, user_name TEXT NOT NULL, role_name TEXT, database_name TEXT, table_name TEXT, privilege TEXT NOT NULL, is_direct_grant INTEGER DEFAULT 0, last_query_time TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_matrix_conn ON gov_access_matrix(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_matrix_user ON gov_access_matrix(connection_id, user_name)`, // Governance policies `CREATE TABLE IF NOT EXISTS gov_policies ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, name TEXT NOT NULL, description TEXT, object_type TEXT NOT NULL, object_database TEXT, object_table TEXT, object_column TEXT, required_role TEXT, severity TEXT DEFAULT 'warn', enforcement_mode TEXT NOT NULL DEFAULT 'warn', enabled INTEGER DEFAULT 1, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_policy_conn ON gov_policies(connection_id)`, // Governance policy violations `CREATE TABLE IF NOT EXISTS gov_policy_violations ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, policy_id TEXT NOT NULL REFERENCES gov_policies(id) ON DELETE CASCADE, query_log_id TEXT, ch_user TEXT NOT NULL, violation_detail TEXT, severity TEXT NOT NULL, detection_phase TEXT NOT NULL DEFAULT 'post_exec', request_endpoint TEXT, detected_at TEXT NOT NULL, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_violation_conn ON gov_policy_violations(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_violation_policy ON gov_policy_violations(policy_id)`, `CREATE INDEX IF NOT EXISTS idx_gov_violation_time ON gov_policy_violations(connection_id, detected_at)`, // Governance object notes/comments (table/column level) `CREATE TABLE IF NOT EXISTS gov_object_comments ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, object_type TEXT NOT NULL, database_name TEXT NOT NULL, table_name TEXT NOT NULL, column_name TEXT NOT NULL DEFAULT '', comment_text TEXT NOT NULL, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_comment_obj ON gov_object_comments(connection_id, object_type, database_name, table_name, column_name, created_at)`, // Governance incidents (Collibra-style workflow, simplified) `CREATE TABLE IF NOT EXISTS gov_incidents ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, source_type TEXT NOT NULL DEFAULT 'manual', source_ref TEXT, dedupe_key TEXT, title TEXT NOT NULL, severity TEXT NOT NULL DEFAULT 'warn', status TEXT NOT NULL DEFAULT 'open', assignee TEXT, details TEXT, resolution_note TEXT, occurrence_count INTEGER NOT NULL DEFAULT 1, first_seen_at TEXT NOT NULL, last_seen_at TEXT NOT NULL, resolved_at TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_incident_conn_status ON gov_incidents(connection_id, status, severity, last_seen_at)`, `CREATE INDEX IF NOT EXISTS idx_gov_incident_source ON gov_incidents(connection_id, source_type, source_ref)`, `CREATE INDEX IF NOT EXISTS idx_gov_incident_dedupe ON gov_incidents(connection_id, dedupe_key, status)`, `CREATE TABLE IF NOT EXISTS gov_incident_comments ( id TEXT PRIMARY KEY, incident_id TEXT NOT NULL REFERENCES gov_incidents(id) ON DELETE CASCADE, comment_text TEXT NOT NULL, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_gov_incident_comment_incident ON gov_incident_comments(incident_id, created_at)`, // Alerting channels (SMTP/Resend/Brevo) `CREATE TABLE IF NOT EXISTS alert_channels ( id TEXT PRIMARY KEY, name TEXT NOT NULL, channel_type TEXT NOT NULL, config_encrypted TEXT NOT NULL, is_active INTEGER DEFAULT 1, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_alert_channel_active ON alert_channels(is_active)`, `CREATE UNIQUE INDEX IF NOT EXISTS idx_alert_channel_name_unique ON alert_channels(name)`, // Alert rules `CREATE TABLE IF NOT EXISTS alert_rules ( id TEXT PRIMARY KEY, name TEXT NOT NULL, event_type TEXT NOT NULL, severity_min TEXT NOT NULL DEFAULT 'warn', enabled INTEGER DEFAULT 1, cooldown_seconds INTEGER DEFAULT 300, max_attempts INTEGER DEFAULT 5, subject_template TEXT, body_template TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_alert_rule_enabled ON alert_rules(enabled)`, `CREATE INDEX IF NOT EXISTS idx_alert_rule_event ON alert_rules(event_type, enabled)`, // Rule routes map rules to channels and recipients `CREATE TABLE IF NOT EXISTS alert_rule_routes ( id TEXT PRIMARY KEY, rule_id TEXT NOT NULL REFERENCES alert_rules(id) ON DELETE CASCADE, channel_id TEXT NOT NULL REFERENCES alert_channels(id) ON DELETE CASCADE, recipients_json TEXT NOT NULL, is_active INTEGER DEFAULT 1, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_alert_route_rule ON alert_rule_routes(rule_id)`, `CREATE INDEX IF NOT EXISTS idx_alert_route_channel ON alert_rule_routes(channel_id)`, // Per-route delivery policy (digest/escalation metadata) `CREATE TABLE IF NOT EXISTS alert_route_policies ( route_id TEXT PRIMARY KEY REFERENCES alert_rule_routes(id) ON DELETE CASCADE, delivery_mode TEXT NOT NULL DEFAULT 'immediate', digest_window_minutes INTEGER NOT NULL DEFAULT 0, escalation_channel_id TEXT REFERENCES alert_channels(id) ON DELETE SET NULL, escalation_recipients_json TEXT, escalation_after_failures INTEGER NOT NULL DEFAULT 0, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_alert_route_policy_delivery ON alert_route_policies(delivery_mode, digest_window_minutes)`, // Alert events emitted by governance/scheduler/other subsystems `CREATE TABLE IF NOT EXISTS alert_events ( id TEXT PRIMARY KEY, connection_id TEXT, event_type TEXT NOT NULL, severity TEXT NOT NULL, title TEXT NOT NULL, message TEXT NOT NULL, payload_json TEXT, fingerprint TEXT, source_ref TEXT, status TEXT NOT NULL DEFAULT 'new', created_at TEXT DEFAULT CURRENT_TIMESTAMP, processed_at TEXT )`, `CREATE INDEX IF NOT EXISTS idx_alert_event_status ON alert_events(status, created_at)`, `CREATE INDEX IF NOT EXISTS idx_alert_event_type ON alert_events(event_type, created_at)`, `CREATE INDEX IF NOT EXISTS idx_alert_event_fingerprint ON alert_events(fingerprint, created_at)`, // Dispatch jobs generated from events and routes `CREATE TABLE IF NOT EXISTS alert_dispatch_jobs ( id TEXT PRIMARY KEY, event_id TEXT NOT NULL REFERENCES alert_events(id) ON DELETE CASCADE, rule_id TEXT NOT NULL REFERENCES alert_rules(id) ON DELETE CASCADE, route_id TEXT NOT NULL REFERENCES alert_rule_routes(id) ON DELETE CASCADE, channel_id TEXT NOT NULL REFERENCES alert_channels(id) ON DELETE CASCADE, status TEXT NOT NULL DEFAULT 'queued', attempt_count INTEGER DEFAULT 0, max_attempts INTEGER DEFAULT 5, next_attempt_at TEXT NOT NULL, last_error TEXT, provider_message_id TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, sent_at TEXT )`, `CREATE INDEX IF NOT EXISTS idx_alert_job_due ON alert_dispatch_jobs(status, next_attempt_at)`, `CREATE INDEX IF NOT EXISTS idx_alert_job_event ON alert_dispatch_jobs(event_id)`, `CREATE INDEX IF NOT EXISTS idx_alert_job_route ON alert_dispatch_jobs(route_id)`, // Digest windows for routes configured in digest mode `CREATE TABLE IF NOT EXISTS alert_route_digests ( id TEXT PRIMARY KEY, route_id TEXT NOT NULL REFERENCES alert_rule_routes(id) ON DELETE CASCADE, rule_id TEXT NOT NULL REFERENCES alert_rules(id) ON DELETE CASCADE, channel_id TEXT NOT NULL REFERENCES alert_channels(id) ON DELETE CASCADE, bucket_start TEXT NOT NULL, bucket_end TEXT NOT NULL, event_type TEXT NOT NULL, severity TEXT NOT NULL, event_count INTEGER NOT NULL DEFAULT 0, event_ids_json TEXT NOT NULL, titles_json TEXT NOT NULL, status TEXT NOT NULL DEFAULT 'collecting', attempt_count INTEGER NOT NULL DEFAULT 0, max_attempts INTEGER NOT NULL DEFAULT 5, next_attempt_at TEXT NOT NULL, last_error TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, sent_at TEXT, UNIQUE(route_id, bucket_start, event_type, severity) )`, `CREATE INDEX IF NOT EXISTS idx_alert_digest_due ON alert_route_digests(status, next_attempt_at, bucket_end)`, `CREATE INDEX IF NOT EXISTS idx_alert_digest_route ON alert_route_digests(route_id, bucket_start)`, // ══════════════════════════════════════════════════════════════ // Pipeline tables (data ingestion pipelines) // ══════════════════════════════════════════════════════════════ // Pipeline definitions `CREATE TABLE IF NOT EXISTS pipelines ( id TEXT PRIMARY KEY, name TEXT NOT NULL, description TEXT, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, status TEXT NOT NULL DEFAULT 'draft', config TEXT NOT NULL DEFAULT '{}', created_by TEXT, last_started_at TEXT, last_stopped_at TEXT, last_error TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_pipeline_conn ON pipelines(connection_id)`, `CREATE INDEX IF NOT EXISTS idx_pipeline_status ON pipelines(status)`, // Pipeline graph nodes (sources and sinks) `CREATE TABLE IF NOT EXISTS pipeline_nodes ( id TEXT PRIMARY KEY, pipeline_id TEXT NOT NULL REFERENCES pipelines(id) ON DELETE CASCADE, node_type TEXT NOT NULL, label TEXT NOT NULL, position_x REAL NOT NULL DEFAULT 0, position_y REAL NOT NULL DEFAULT 0, config_encrypted TEXT NOT NULL DEFAULT '{}', created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_pipeline_node_pipeline ON pipeline_nodes(pipeline_id)`, // Pipeline graph edges (connections between nodes) `CREATE TABLE IF NOT EXISTS pipeline_edges ( id TEXT PRIMARY KEY, pipeline_id TEXT NOT NULL REFERENCES pipelines(id) ON DELETE CASCADE, source_node_id TEXT NOT NULL REFERENCES pipeline_nodes(id) ON DELETE CASCADE, target_node_id TEXT NOT NULL REFERENCES pipeline_nodes(id) ON DELETE CASCADE, source_handle TEXT, target_handle TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(pipeline_id, source_node_id, target_node_id) )`, `CREATE INDEX IF NOT EXISTS idx_pipeline_edge_pipeline ON pipeline_edges(pipeline_id)`, // Pipeline execution runs `CREATE TABLE IF NOT EXISTS pipeline_runs ( id TEXT PRIMARY KEY, pipeline_id TEXT NOT NULL REFERENCES pipelines(id) ON DELETE CASCADE, status TEXT NOT NULL DEFAULT 'running', started_at TEXT NOT NULL, finished_at TEXT, rows_ingested INTEGER DEFAULT 0, bytes_ingested INTEGER DEFAULT 0, errors_count INTEGER DEFAULT 0, last_error TEXT, metrics_json TEXT DEFAULT '{}', created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_pipeline_run_pipeline ON pipeline_runs(pipeline_id)`, `CREATE INDEX IF NOT EXISTS idx_pipeline_run_started ON pipeline_runs(pipeline_id, started_at)`, // Pipeline run logs `CREATE TABLE IF NOT EXISTS pipeline_run_logs ( id TEXT PRIMARY KEY, run_id TEXT NOT NULL REFERENCES pipeline_runs(id) ON DELETE CASCADE, level TEXT NOT NULL DEFAULT 'info', message TEXT NOT NULL, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_pipeline_run_log_run ON pipeline_run_logs(run_id, created_at)`, // ── Models (dbt-like SQL transformations) ───────────────────────── `CREATE TABLE IF NOT EXISTS models ( id TEXT PRIMARY KEY, name TEXT NOT NULL, description TEXT DEFAULT '', connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, target_database TEXT NOT NULL DEFAULT 'default', materialization TEXT NOT NULL DEFAULT 'view', sql_body TEXT NOT NULL DEFAULT '', table_engine TEXT NOT NULL DEFAULT 'MergeTree', order_by TEXT NOT NULL DEFAULT 'tuple()', status TEXT NOT NULL DEFAULT 'draft', last_error TEXT, last_run_at TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(connection_id, name) )`, `CREATE INDEX IF NOT EXISTS idx_model_conn ON models(connection_id)`, `CREATE TABLE IF NOT EXISTS model_runs ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, status TEXT NOT NULL DEFAULT 'running', total_models INTEGER NOT NULL DEFAULT 0, succeeded INTEGER NOT NULL DEFAULT 0, failed INTEGER NOT NULL DEFAULT 0, skipped INTEGER NOT NULL DEFAULT 0, started_at TEXT NOT NULL, finished_at TEXT, triggered_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_model_run_conn ON model_runs(connection_id, started_at)`, `CREATE TABLE IF NOT EXISTS model_run_results ( id TEXT PRIMARY KEY, run_id TEXT NOT NULL REFERENCES model_runs(id) ON DELETE CASCADE, model_id TEXT NOT NULL REFERENCES models(id) ON DELETE CASCADE, model_name TEXT NOT NULL, status TEXT NOT NULL DEFAULT 'pending', resolved_sql TEXT, elapsed_ms INTEGER DEFAULT 0, error TEXT, started_at TEXT, finished_at TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP )`, `CREATE INDEX IF NOT EXISTS idx_model_result_run ON model_run_results(run_id)`, `CREATE TABLE IF NOT EXISTS model_schedules ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, anchor_model_id TEXT REFERENCES models(id) ON DELETE CASCADE, cron TEXT NOT NULL, enabled INTEGER NOT NULL DEFAULT 1, last_run_at TEXT, next_run_at TEXT, last_status TEXT, last_error TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(connection_id, anchor_model_id) )`, `CREATE INDEX IF NOT EXISTS idx_model_sched_conn ON model_schedules(connection_id)`, } for _, stmt := range stmts { if _, err := db.conn.Exec(stmt); err != nil { return err } } // Migrate model_schedules: add anchor_model_id column if missing if err := db.migrateModelSchedulesAnchor(); err != nil { return fmt.Errorf("migrate model_schedules anchor: %w", err) } if err := db.ensureColumn("gov_policies", "enforcement_mode", "TEXT NOT NULL DEFAULT 'warn'"); err != nil { return err } if err := db.ensureColumn("gov_policy_violations", "detection_phase", "TEXT NOT NULL DEFAULT 'post_exec'"); err != nil { return err } if err := db.ensureColumn("gov_policy_violations", "request_endpoint", "TEXT"); err != nil { return err } if err := db.ensureColumn("brain_chats", "context_database", "TEXT"); err != nil { return err } if err := db.ensureColumn("brain_chats", "context_table", "TEXT"); err != nil { return err } if err := db.ensureColumn("brain_chats", "context_tables", "TEXT"); err != nil { return err } // Drop legacy tables from the old SaaS schema dropLegacy := []string{ "DROP TABLE IF EXISTS organizations", "DROP TABLE IF EXISTS tunnel_connections", "DROP TABLE IF EXISTS cloud_sessions", "DROP TABLE IF EXISTS scheduled_runs", "DROP TABLE IF EXISTS scheduled_jobs", "DROP TABLE IF EXISTS cloud_saved_queries", "DROP TABLE IF EXISTS cloud_panels", "DROP TABLE IF EXISTS cloud_dashboards", "DROP TABLE IF EXISTS cloud_user_roles", "DROP TABLE IF EXISTS beta_applications", "DROP TABLE IF EXISTS cloud_audit_logs", } for _, stmt := range dropLegacy { if _, err := db.conn.Exec(stmt); err != nil { slog.Warn("Failed to drop legacy table", "error", err) } } // Seed installation_id if not present var count int if err := db.conn.QueryRow("SELECT COUNT(*) FROM settings WHERE key = 'installation_id'").Scan(&count); err == nil && count == 0 { db.conn.Exec("INSERT INTO settings (key, value) VALUES ('installation_id', ?)", uuid.NewString()) slog.Info("Generated new installation ID") } // Seed default Brain skill if not present. if err := db.conn.QueryRow("SELECT COUNT(*) FROM brain_skills").Scan(&count); err == nil && count == 0 { now := "CURRENT_TIMESTAMP" db.conn.Exec(`INSERT INTO brain_skills (id, name, content, is_active, is_default, created_by, created_at, updated_at) VALUES (?, ?, ?, 1, 1, 'system', `+now+`, `+now+`)`, uuid.NewString(), "Default Brain Skill", `You are Brain, a senior ClickHouse copilot. Priorities: - Give correct SQL first, concise explanation second. - Keep queries safe and cost-aware: start with LIMIT 100 unless user asks otherwise. - Prefer explicit columns over SELECT * on large tables. - Use only schema fields known in context; if missing, ask a short clarifying question. - When uncertain, provide assumptions clearly. Artifacts: - When sharing SQL, return a runnable SQL block. - If a query result artifact exists, reference it by title and summarize key findings in bullets. - For follow-ups, reuse prior artifacts/chats when relevant. Tool behavior: - Read-only queries by default. - Never execute DDL/DROP/TRUNCATE/ALTER unless user explicitly asks and confirms. - For expensive requests, propose a lightweight preview query first. Formatting: 1) One-line intent acknowledgment. 2) SQL in a fenced sql block. 3) Short explanation and optional next-step variants.`, ) } slog.Info("Database migrations completed") return nil } // migrateModelSchedulesAnchor detects old model_schedules without anchor_model_id // and migrates data to the new schema. func (db *DB) migrateModelSchedulesAnchor() error { // Check if anchor_model_id column already exists rows, err := db.conn.Query("PRAGMA table_info(model_schedules)") if err != nil { return nil // table may not exist yet } defer rows.Close() hasAnchor := false for rows.Next() { var cid int var name, colType string var notNull, pk int var dfltValue interface{} if err := rows.Scan(&cid, &name, &colType, ¬Null, &dfltValue, &pk); err != nil { return err } if strings.EqualFold(strings.TrimSpace(name), "anchor_model_id") { hasAnchor = true } } if err := rows.Err(); err != nil { return err } if hasAnchor { return nil // already migrated } slog.Info("Migrating model_schedules to add anchor_model_id") // Rename old table if _, err := db.conn.Exec("ALTER TABLE model_schedules RENAME TO model_schedules_old"); err != nil { return fmt.Errorf("rename old table: %w", err) } // Create new table if _, err := db.conn.Exec(`CREATE TABLE model_schedules ( id TEXT PRIMARY KEY, connection_id TEXT NOT NULL REFERENCES connections(id) ON DELETE CASCADE, anchor_model_id TEXT REFERENCES models(id) ON DELETE CASCADE, cron TEXT NOT NULL, enabled INTEGER NOT NULL DEFAULT 1, last_run_at TEXT, next_run_at TEXT, last_status TEXT, last_error TEXT, created_by TEXT, created_at TEXT DEFAULT CURRENT_TIMESTAMP, updated_at TEXT DEFAULT CURRENT_TIMESTAMP, UNIQUE(connection_id, anchor_model_id) )`); err != nil { return fmt.Errorf("create new table: %w", err) } // Copy data with backfill: pick the first model by name as anchor if _, err := db.conn.Exec(`INSERT INTO model_schedules (id, connection_id, anchor_model_id, cron, enabled, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at) SELECT s.id, s.connection_id, (SELECT m.id FROM models m WHERE m.connection_id = s.connection_id ORDER BY m.name ASC LIMIT 1), s.cron, s.enabled, s.last_run_at, s.next_run_at, s.last_status, s.last_error, s.created_by, s.created_at, s.updated_at FROM model_schedules_old s`); err != nil { return fmt.Errorf("copy data: %w", err) } // Drop old table if _, err := db.conn.Exec("DROP TABLE model_schedules_old"); err != nil { return fmt.Errorf("drop old table: %w", err) } // Delete orphaned schedules with no anchor if _, err := db.conn.Exec("DELETE FROM model_schedules WHERE anchor_model_id IS NULL"); err != nil { return fmt.Errorf("delete orphans: %w", err) } // Recreate index if _, err := db.conn.Exec("CREATE INDEX IF NOT EXISTS idx_model_sched_conn ON model_schedules(connection_id)"); err != nil { return fmt.Errorf("recreate index: %w", err) } slog.Info("model_schedules migration complete") return nil } func (db *DB) ensureColumn(tableName, columnName, definition string) error { rows, err := db.conn.Query(fmt.Sprintf("PRAGMA table_info(%s)", tableName)) if err != nil { return fmt.Errorf("inspect table %s columns: %w", tableName, err) } defer rows.Close() for rows.Next() { var cid int var name, colType string var notNull, pk int var dfltValue interface{} if err := rows.Scan(&cid, &name, &colType, ¬Null, &dfltValue, &pk); err != nil { return fmt.Errorf("scan table info for %s: %w", tableName, err) } if strings.EqualFold(strings.TrimSpace(name), strings.TrimSpace(columnName)) { return nil } } if err := rows.Err(); err != nil { return fmt.Errorf("iterate table info for %s: %w", tableName, err) } if _, err := db.conn.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", tableName, columnName, definition)); err != nil { return fmt.Errorf("add column %s.%s: %w", tableName, columnName, err) } return nil } ================================================ FILE: internal/database/migrations_guardrails_test.go ================================================ package database import "testing" func TestGuardrailColumnsExistAfterMigrations(t *testing.T) { db := openTestDB(t) mustHaveColumn(t, db, "gov_policies", "enforcement_mode") mustHaveColumn(t, db, "gov_policy_violations", "detection_phase") mustHaveColumn(t, db, "gov_policy_violations", "request_endpoint") } func mustHaveColumn(t *testing.T, db *DB, tableName, columnName string) { t.Helper() rows, err := db.conn.Query("PRAGMA table_info(" + tableName + ")") if err != nil { t.Fatalf("inspect table %s: %v", tableName, err) } defer rows.Close() found := false for rows.Next() { var cid int var name, typ string var notNull, pk int var defaultValue interface{} if err := rows.Scan(&cid, &name, &typ, ¬Null, &defaultValue, &pk); err != nil { t.Fatalf("scan pragma for %s: %v", tableName, err) } if name == columnName { found = true break } } if err := rows.Err(); err != nil { t.Fatalf("iterate pragma for %s: %v", tableName, err) } if !found { t.Fatalf("expected column %s on table %s", columnName, tableName) } } ================================================ FILE: internal/database/models.go ================================================ package database import ( "database/sql" "fmt" "time" "github.com/google/uuid" ) // Model represents a SQL model definition (dbt-like). type Model struct { ID string `json:"id"` Name string `json:"name"` Description string `json:"description"` ConnectionID string `json:"connection_id"` TargetDatabase string `json:"target_database"` Materialization string `json:"materialization"` SQLBody string `json:"sql_body"` TableEngine string `json:"table_engine"` OrderBy string `json:"order_by"` Status string `json:"status"` LastError *string `json:"last_error"` LastRunAt *string `json:"last_run_at"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // ModelRun represents a batch execution of models. type ModelRun struct { ID string `json:"id"` ConnID string `json:"connection_id"` Status string `json:"status"` Total int `json:"total_models"` Succeeded int `json:"succeeded"` Failed int `json:"failed"` Skipped int `json:"skipped"` StartedAt string `json:"started_at"` FinishedAt *string `json:"finished_at"` TriggeredBy *string `json:"triggered_by"` CreatedAt string `json:"created_at"` } // ModelRunResult represents per-model results within a run. type ModelRunResult struct { ID string `json:"id"` RunID string `json:"run_id"` ModelID string `json:"model_id"` ModelName string `json:"model_name"` Status string `json:"status"` ResolvedSQL *string `json:"resolved_sql"` ElapsedMs int64 `json:"elapsed_ms"` Error *string `json:"error"` StartedAt *string `json:"started_at"` FinishedAt *string `json:"finished_at"` CreatedAt string `json:"created_at"` } // ── Model CRUD ────────────────────────────────────────────────────── // GetModelsByConnection returns all models for a connection. func (db *DB) GetModelsByConnection(connectionID string) ([]Model, error) { rows, err := db.conn.Query( `SELECT id, name, description, connection_id, target_database, materialization, sql_body, table_engine, order_by, status, last_error, last_run_at, created_by, created_at, updated_at FROM models WHERE connection_id = ? ORDER BY name ASC`, connectionID, ) if err != nil { return nil, fmt.Errorf("get models: %w", err) } defer rows.Close() var models []Model for rows.Next() { m, err := scanModel(rows) if err != nil { return nil, err } models = append(models, m) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate model rows: %w", err) } return models, nil } // GetModelByID returns a single model by ID. func (db *DB) GetModelByID(id string) (*Model, error) { row := db.conn.QueryRow( `SELECT id, name, description, connection_id, target_database, materialization, sql_body, table_engine, order_by, status, last_error, last_run_at, created_by, created_at, updated_at FROM models WHERE id = ?`, id, ) m, err := scanModelRow(row) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get model by id: %w", err) } return m, nil } // GetModelByName returns a model by connection and name. func (db *DB) GetModelByName(connectionID, name string) (*Model, error) { row := db.conn.QueryRow( `SELECT id, name, description, connection_id, target_database, materialization, sql_body, table_engine, order_by, status, last_error, last_run_at, created_by, created_at, updated_at FROM models WHERE connection_id = ? AND name = ?`, connectionID, name, ) m, err := scanModelRow(row) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get model by name: %w", err) } return m, nil } // CreateModel creates a new model. func (db *DB) CreateModel(connectionID, name, description, targetDB, materialization, sqlBody, tableEngine, orderBy, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var creator interface{} if createdBy != "" { creator = createdBy } _, err := db.conn.Exec( `INSERT INTO models (id, name, description, connection_id, target_database, materialization, sql_body, table_engine, order_by, status, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 'draft', ?, ?, ?)`, id, name, description, connectionID, targetDB, materialization, sqlBody, tableEngine, orderBy, creator, now, now, ) if err != nil { return "", fmt.Errorf("create model: %w", err) } return id, nil } // UpdateModel updates an existing model. func (db *DB) UpdateModel(id, name, description, targetDB, materialization, sqlBody, tableEngine, orderBy string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `UPDATE models SET name = ?, description = ?, target_database = ?, materialization = ?, sql_body = ?, table_engine = ?, order_by = ?, updated_at = ? WHERE id = ?`, name, description, targetDB, materialization, sqlBody, tableEngine, orderBy, now, id, ) if err != nil { return fmt.Errorf("update model: %w", err) } return nil } // DeleteModel removes a model by ID. func (db *DB) DeleteModel(id string) error { _, err := db.conn.Exec("DELETE FROM models WHERE id = ?", id) if err != nil { return fmt.Errorf("delete model: %w", err) } return nil } // UpdateModelStatus updates a model's status and last error. func (db *DB) UpdateModelStatus(id, status, lastError string) error { now := time.Now().UTC().Format(time.RFC3339) var errVal interface{} if lastError != "" { errVal = lastError } _, err := db.conn.Exec( "UPDATE models SET status = ?, last_error = ?, last_run_at = ?, updated_at = ? WHERE id = ?", status, errVal, now, now, id, ) if err != nil { return fmt.Errorf("update model status: %w", err) } return nil } // ── Model Runs ────────────────────────────────────────────────────── // CreateModelRun creates a new run record. func (db *DB) CreateModelRun(connectionID string, totalModels int, triggeredBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var trigger interface{} if triggeredBy != "" { trigger = triggeredBy } _, err := db.conn.Exec( `INSERT INTO model_runs (id, connection_id, status, total_models, started_at, triggered_by, created_at) VALUES (?, ?, 'running', ?, ?, ?, ?)`, id, connectionID, totalModels, now, trigger, now, ) if err != nil { return "", fmt.Errorf("create model run: %w", err) } return id, nil } // FinalizeModelRun marks a run as complete. func (db *DB) FinalizeModelRun(id, status string, succeeded, failed, skipped int) error { now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `UPDATE model_runs SET status = ?, succeeded = ?, failed = ?, skipped = ?, finished_at = ? WHERE id = ?`, status, succeeded, failed, skipped, now, id, ) if err != nil { return fmt.Errorf("finalize model run: %w", err) } return nil } // GetModelRuns returns recent runs for a connection. func (db *DB) GetModelRuns(connectionID string, limit, offset int) ([]ModelRun, error) { rows, err := db.conn.Query( `SELECT id, connection_id, status, total_models, succeeded, failed, skipped, started_at, finished_at, triggered_by, created_at FROM model_runs WHERE connection_id = ? ORDER BY started_at DESC LIMIT ? OFFSET ?`, connectionID, limit, offset, ) if err != nil { return nil, fmt.Errorf("get model runs: %w", err) } defer rows.Close() var runs []ModelRun for rows.Next() { var r ModelRun var finished, trigger sql.NullString if err := rows.Scan(&r.ID, &r.ConnID, &r.Status, &r.Total, &r.Succeeded, &r.Failed, &r.Skipped, &r.StartedAt, &finished, &trigger, &r.CreatedAt); err != nil { return nil, fmt.Errorf("scan model run: %w", err) } r.FinishedAt = nullStringToPtr(finished) r.TriggeredBy = nullStringToPtr(trigger) runs = append(runs, r) } return runs, rows.Err() } // GetModelRunByID returns a single run. func (db *DB) GetModelRunByID(id string) (*ModelRun, error) { row := db.conn.QueryRow( `SELECT id, connection_id, status, total_models, succeeded, failed, skipped, started_at, finished_at, triggered_by, created_at FROM model_runs WHERE id = ?`, id, ) var r ModelRun var finished, trigger sql.NullString err := row.Scan(&r.ID, &r.ConnID, &r.Status, &r.Total, &r.Succeeded, &r.Failed, &r.Skipped, &r.StartedAt, &finished, &trigger, &r.CreatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get model run: %w", err) } r.FinishedAt = nullStringToPtr(finished) r.TriggeredBy = nullStringToPtr(trigger) return &r, nil } // ── Model Run Results ─────────────────────────────────────────────── // CreateModelRunResult creates a pending result record for a model in a run. func (db *DB) CreateModelRunResult(runID, modelID, modelName string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `INSERT INTO model_run_results (id, run_id, model_id, model_name, status, created_at) VALUES (?, ?, ?, ?, 'pending', ?)`, id, runID, modelID, modelName, now, ) if err != nil { return "", fmt.Errorf("create model run result: %w", err) } return id, nil } // UpdateModelRunResult updates the result for a specific model in a run. func (db *DB) UpdateModelRunResult(runID, modelID, status, resolvedSQL string, elapsedMs int64, errMsg string) error { now := time.Now().UTC().Format(time.RFC3339) var sqlVal, errVal interface{} if resolvedSQL != "" { sqlVal = resolvedSQL } if errMsg != "" { errVal = errMsg } _, err := db.conn.Exec( `UPDATE model_run_results SET status = ?, resolved_sql = ?, elapsed_ms = ?, error = ?, started_at = COALESCE(started_at, ?), finished_at = ? WHERE run_id = ? AND model_id = ?`, status, sqlVal, elapsedMs, errVal, now, now, runID, modelID, ) if err != nil { return fmt.Errorf("update model run result: %w", err) } return nil } // GetModelRunResults returns all results for a run. func (db *DB) GetModelRunResults(runID string) ([]ModelRunResult, error) { rows, err := db.conn.Query( `SELECT id, run_id, model_id, model_name, status, resolved_sql, elapsed_ms, error, started_at, finished_at, created_at FROM model_run_results WHERE run_id = ? ORDER BY created_at ASC`, runID, ) if err != nil { return nil, fmt.Errorf("get model run results: %w", err) } defer rows.Close() var results []ModelRunResult for rows.Next() { var r ModelRunResult var resolvedSQL, errStr, started, finished sql.NullString if err := rows.Scan(&r.ID, &r.RunID, &r.ModelID, &r.ModelName, &r.Status, &resolvedSQL, &r.ElapsedMs, &errStr, &started, &finished, &r.CreatedAt); err != nil { return nil, fmt.Errorf("scan model run result: %w", err) } r.ResolvedSQL = nullStringToPtr(resolvedSQL) r.Error = nullStringToPtr(errStr) r.StartedAt = nullStringToPtr(started) r.FinishedAt = nullStringToPtr(finished) results = append(results, r) } return results, rows.Err() } // HasRunningModelRun checks if there's an active run for a connection. func (db *DB) HasRunningModelRun(connectionID string) (bool, error) { var count int err := db.conn.QueryRow( "SELECT COUNT(*) FROM model_runs WHERE connection_id = ? AND status = 'running'", connectionID, ).Scan(&count) if err != nil { return false, fmt.Errorf("check running model run: %w", err) } return count > 0, nil } // ── Helpers ───────────────────────────────────────────────────────── func scanModel(rows *sql.Rows) (Model, error) { var m Model var lastErr, lastRun, createdBy sql.NullString if err := rows.Scan(&m.ID, &m.Name, &m.Description, &m.ConnectionID, &m.TargetDatabase, &m.Materialization, &m.SQLBody, &m.TableEngine, &m.OrderBy, &m.Status, &lastErr, &lastRun, &createdBy, &m.CreatedAt, &m.UpdatedAt); err != nil { return m, fmt.Errorf("scan model: %w", err) } m.LastError = nullStringToPtr(lastErr) m.LastRunAt = nullStringToPtr(lastRun) m.CreatedBy = nullStringToPtr(createdBy) return m, nil } func scanModelRow(row *sql.Row) (*Model, error) { var m Model var lastErr, lastRun, createdBy sql.NullString err := row.Scan(&m.ID, &m.Name, &m.Description, &m.ConnectionID, &m.TargetDatabase, &m.Materialization, &m.SQLBody, &m.TableEngine, &m.OrderBy, &m.Status, &lastErr, &lastRun, &createdBy, &m.CreatedAt, &m.UpdatedAt) if err != nil { return nil, err } m.LastError = nullStringToPtr(lastErr) m.LastRunAt = nullStringToPtr(lastRun) m.CreatedBy = nullStringToPtr(createdBy) return &m, nil } // ── Model Schedules ───────────────────────────────────────────────── // ModelSchedule represents a cron schedule for running a model pipeline. type ModelSchedule struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` AnchorModelID *string `json:"anchor_model_id"` Cron string `json:"cron"` Enabled bool `json:"enabled"` LastRunAt *string `json:"last_run_at"` NextRunAt *string `json:"next_run_at"` LastStatus *string `json:"last_status"` LastError *string `json:"last_error"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // GetModelSchedulesByConnection returns all schedules for a connection. func (db *DB) GetModelSchedulesByConnection(connectionID string) ([]ModelSchedule, error) { rows, err := db.conn.Query( `SELECT id, connection_id, anchor_model_id, cron, enabled, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at FROM model_schedules WHERE connection_id = ?`, connectionID, ) if err != nil { return nil, fmt.Errorf("get model schedules: %w", err) } defer rows.Close() var schedules []ModelSchedule for rows.Next() { s, err := scanModelSchedule(rows) if err != nil { return nil, err } schedules = append(schedules, s) } return schedules, rows.Err() } // GetModelScheduleByAnchor returns the schedule for a specific pipeline anchor, or nil. func (db *DB) GetModelScheduleByAnchor(connectionID, anchorModelID string) (*ModelSchedule, error) { row := db.conn.QueryRow( `SELECT id, connection_id, anchor_model_id, cron, enabled, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at FROM model_schedules WHERE connection_id = ? AND anchor_model_id = ?`, connectionID, anchorModelID, ) var s ModelSchedule var enabled int var anchor, lastRun, nextRun, lastStatus, lastErr, createdBy sql.NullString err := row.Scan(&s.ID, &s.ConnectionID, &anchor, &s.Cron, &enabled, &lastRun, &nextRun, &lastStatus, &lastErr, &createdBy, &s.CreatedAt, &s.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get model schedule by anchor: %w", err) } s.Enabled = enabled == 1 s.AnchorModelID = nullStringToPtr(anchor) s.LastRunAt = nullStringToPtr(lastRun) s.NextRunAt = nullStringToPtr(nextRun) s.LastStatus = nullStringToPtr(lastStatus) s.LastError = nullStringToPtr(lastErr) s.CreatedBy = nullStringToPtr(createdBy) return &s, nil } // UpsertModelSchedule creates or replaces a model schedule for a pipeline anchor. func (db *DB) UpsertModelSchedule(connectionID, anchorModelID, cron, nextRunAt, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var creator interface{} if createdBy != "" { creator = createdBy } _, err := db.conn.Exec( `INSERT INTO model_schedules (id, connection_id, anchor_model_id, cron, enabled, next_run_at, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, 1, ?, ?, ?, ?) ON CONFLICT(connection_id, anchor_model_id) DO UPDATE SET cron = excluded.cron, enabled = 1, next_run_at = excluded.next_run_at, created_by = excluded.created_by, updated_at = excluded.updated_at`, id, connectionID, anchorModelID, cron, nextRunAt, creator, now, now, ) if err != nil { return "", fmt.Errorf("upsert model schedule: %w", err) } return id, nil } // UpdateModelScheduleStatusByID updates a schedule after a run attempt, by schedule ID. func (db *DB) UpdateModelScheduleStatusByID(scheduleID, status, lastError string, nextRunAt *string) error { now := time.Now().UTC().Format(time.RFC3339) var errVal interface{} if lastError != "" { errVal = lastError } _, err := db.conn.Exec( `UPDATE model_schedules SET last_run_at = ?, last_status = ?, last_error = ?, next_run_at = ?, updated_at = ? WHERE id = ?`, now, status, errVal, nextRunAt, now, scheduleID, ) if err != nil { return fmt.Errorf("update model schedule status: %w", err) } return nil } // DeleteModelScheduleByAnchor removes the schedule for a specific pipeline anchor. func (db *DB) DeleteModelScheduleByAnchor(connectionID, anchorModelID string) error { _, err := db.conn.Exec( "DELETE FROM model_schedules WHERE connection_id = ? AND anchor_model_id = ?", connectionID, anchorModelID, ) if err != nil { return fmt.Errorf("delete model schedule: %w", err) } return nil } // GetEnabledModelSchedules returns all enabled model schedules. func (db *DB) GetEnabledModelSchedules() ([]ModelSchedule, error) { rows, err := db.conn.Query( `SELECT id, connection_id, anchor_model_id, cron, enabled, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at FROM model_schedules WHERE enabled = 1`, ) if err != nil { return nil, fmt.Errorf("get enabled model schedules: %w", err) } defer rows.Close() var schedules []ModelSchedule for rows.Next() { s, err := scanModelSchedule(rows) if err != nil { return nil, err } schedules = append(schedules, s) } return schedules, rows.Err() } func scanModelSchedule(rows *sql.Rows) (ModelSchedule, error) { var s ModelSchedule var enabled int var anchor, lastRun, nextRun, lastStatus, lastErr, createdBy sql.NullString if err := rows.Scan(&s.ID, &s.ConnectionID, &anchor, &s.Cron, &enabled, &lastRun, &nextRun, &lastStatus, &lastErr, &createdBy, &s.CreatedAt, &s.UpdatedAt); err != nil { return s, fmt.Errorf("scan model schedule: %w", err) } s.Enabled = enabled == 1 s.AnchorModelID = nullStringToPtr(anchor) s.LastRunAt = nullStringToPtr(lastRun) s.NextRunAt = nullStringToPtr(nextRun) s.LastStatus = nullStringToPtr(lastStatus) s.LastError = nullStringToPtr(lastErr) s.CreatedBy = nullStringToPtr(createdBy) return s, nil } ================================================ FILE: internal/database/pipelines.go ================================================ package database import ( "database/sql" "fmt" "time" "github.com/google/uuid" ) // Pipeline represents a data ingestion pipeline. type Pipeline struct { ID string `json:"id"` Name string `json:"name"` Description *string `json:"description"` ConnectionID string `json:"connection_id"` Status string `json:"status"` Config string `json:"config"` CreatedBy *string `json:"created_by"` LastStartedAt *string `json:"last_started_at"` LastStoppedAt *string `json:"last_stopped_at"` LastError *string `json:"last_error"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // PipelineNode represents a node in a pipeline graph. type PipelineNode struct { ID string `json:"id"` PipelineID string `json:"pipeline_id"` NodeType string `json:"node_type"` Label string `json:"label"` PositionX float64 `json:"position_x"` PositionY float64 `json:"position_y"` ConfigEncrypted string `json:"config_encrypted"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // PipelineEdge represents a connection between two nodes. type PipelineEdge struct { ID string `json:"id"` PipelineID string `json:"pipeline_id"` SourceNodeID string `json:"source_node_id"` TargetNodeID string `json:"target_node_id"` SourceHandle *string `json:"source_handle"` TargetHandle *string `json:"target_handle"` CreatedAt string `json:"created_at"` } // PipelineRun represents an execution run of a pipeline. type PipelineRun struct { ID string `json:"id"` PipelineID string `json:"pipeline_id"` Status string `json:"status"` StartedAt string `json:"started_at"` FinishedAt *string `json:"finished_at"` RowsIngested int64 `json:"rows_ingested"` BytesIngested int64 `json:"bytes_ingested"` ErrorsCount int64 `json:"errors_count"` LastError *string `json:"last_error"` MetricsJSON string `json:"metrics_json"` CreatedAt string `json:"created_at"` } // PipelineRunLog represents a log entry for a pipeline run. type PipelineRunLog struct { ID string `json:"id"` RunID string `json:"run_id"` Level string `json:"level"` Message string `json:"message"` CreatedAt string `json:"created_at"` } // ── Pipeline CRUD ────────────────────────────────────────────────── // GetPipelines retrieves all pipelines. func (db *DB) GetPipelines() ([]Pipeline, error) { rows, err := db.conn.Query( `SELECT id, name, description, connection_id, status, config, created_by, last_started_at, last_stopped_at, last_error, created_at, updated_at FROM pipelines ORDER BY updated_at DESC`, ) if err != nil { return nil, fmt.Errorf("get pipelines: %w", err) } defer rows.Close() var pipelines []Pipeline for rows.Next() { p, err := scanPipeline(rows) if err != nil { return nil, err } pipelines = append(pipelines, p) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate pipeline rows: %w", err) } return pipelines, nil } // GetPipelineByID retrieves a pipeline by ID. func (db *DB) GetPipelineByID(id string) (*Pipeline, error) { row := db.conn.QueryRow( `SELECT id, name, description, connection_id, status, config, created_by, last_started_at, last_stopped_at, last_error, created_at, updated_at FROM pipelines WHERE id = ?`, id, ) var p Pipeline var desc, createdBy, lastStarted, lastStopped, lastErr sql.NullString err := row.Scan(&p.ID, &p.Name, &desc, &p.ConnectionID, &p.Status, &p.Config, &createdBy, &lastStarted, &lastStopped, &lastErr, &p.CreatedAt, &p.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get pipeline by id: %w", err) } p.Description = nullStringToPtr(desc) p.CreatedBy = nullStringToPtr(createdBy) p.LastStartedAt = nullStringToPtr(lastStarted) p.LastStoppedAt = nullStringToPtr(lastStopped) p.LastError = nullStringToPtr(lastErr) return &p, nil } // GetPipelinesByStatus retrieves pipelines with a given status. func (db *DB) GetPipelinesByStatus(status string) ([]Pipeline, error) { rows, err := db.conn.Query( `SELECT id, name, description, connection_id, status, config, created_by, last_started_at, last_stopped_at, last_error, created_at, updated_at FROM pipelines WHERE status = ? ORDER BY updated_at DESC`, status, ) if err != nil { return nil, fmt.Errorf("get pipelines by status: %w", err) } defer rows.Close() var pipelines []Pipeline for rows.Next() { p, err := scanPipeline(rows) if err != nil { return nil, err } pipelines = append(pipelines, p) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate pipeline rows: %w", err) } return pipelines, nil } // CreatePipeline creates a new pipeline and returns its ID. func (db *DB) CreatePipeline(name, description, connectionID, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var desc, creator interface{} if description != "" { desc = description } if createdBy != "" { creator = createdBy } _, err := db.conn.Exec( `INSERT INTO pipelines (id, name, description, connection_id, status, config, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, 'draft', '{}', ?, ?, ?)`, id, name, desc, connectionID, creator, now, now, ) if err != nil { return "", fmt.Errorf("create pipeline: %w", err) } return id, nil } // UpdatePipeline updates a pipeline's name and description. func (db *DB) UpdatePipeline(id, name, description string) error { now := time.Now().UTC().Format(time.RFC3339) var desc interface{} if description != "" { desc = description } _, err := db.conn.Exec( "UPDATE pipelines SET name = ?, description = ?, updated_at = ? WHERE id = ?", name, desc, now, id, ) if err != nil { return fmt.Errorf("update pipeline: %w", err) } return nil } // UpdatePipelineStatus updates a pipeline's status and optional error/timestamp fields. func (db *DB) UpdatePipelineStatus(id, status, lastError string) error { now := time.Now().UTC().Format(time.RFC3339) var errVal interface{} if lastError != "" { errVal = lastError } var startedAt, stoppedAt interface{} switch status { case "running", "starting": startedAt = now case "stopped", "error": stoppedAt = now } _, err := db.conn.Exec( `UPDATE pipelines SET status = ?, last_error = ?, last_started_at = COALESCE(?, last_started_at), last_stopped_at = COALESCE(?, last_stopped_at), updated_at = ? WHERE id = ?`, status, errVal, startedAt, stoppedAt, now, id, ) if err != nil { return fmt.Errorf("update pipeline status: %w", err) } return nil } // DeletePipeline deletes a pipeline and all related data (cascade). func (db *DB) DeletePipeline(id string) error { _, err := db.conn.Exec("DELETE FROM pipelines WHERE id = ?", id) if err != nil { return fmt.Errorf("delete pipeline: %w", err) } return nil } // ── Pipeline Graph ───────────────────────────────────────────────── // SavePipelineGraph atomically replaces all nodes and edges for a pipeline. func (db *DB) SavePipelineGraph(pipelineID string, nodes []PipelineNode, edges []PipelineEdge, viewportConfig string) error { tx, err := db.conn.Begin() if err != nil { return fmt.Errorf("begin graph transaction: %w", err) } defer tx.Rollback() now := time.Now().UTC().Format(time.RFC3339) // Delete existing nodes and edges (edges cascade from nodes) if _, err := tx.Exec("DELETE FROM pipeline_edges WHERE pipeline_id = ?", pipelineID); err != nil { return fmt.Errorf("delete old edges: %w", err) } if _, err := tx.Exec("DELETE FROM pipeline_nodes WHERE pipeline_id = ?", pipelineID); err != nil { return fmt.Errorf("delete old nodes: %w", err) } // Insert nodes for _, n := range nodes { nodeID := n.ID if nodeID == "" { nodeID = uuid.NewString() } _, err := tx.Exec( `INSERT INTO pipeline_nodes (id, pipeline_id, node_type, label, position_x, position_y, config_encrypted, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, nodeID, pipelineID, n.NodeType, n.Label, n.PositionX, n.PositionY, n.ConfigEncrypted, now, now, ) if err != nil { return fmt.Errorf("insert node %s: %w", nodeID, err) } } // Insert edges for _, e := range edges { edgeID := e.ID if edgeID == "" { edgeID = uuid.NewString() } var srcHandle, tgtHandle interface{} if e.SourceHandle != nil { srcHandle = *e.SourceHandle } if e.TargetHandle != nil { tgtHandle = *e.TargetHandle } _, err := tx.Exec( `INSERT INTO pipeline_edges (id, pipeline_id, source_node_id, target_node_id, source_handle, target_handle, created_at) VALUES (?, ?, ?, ?, ?, ?, ?)`, edgeID, pipelineID, e.SourceNodeID, e.TargetNodeID, srcHandle, tgtHandle, now, ) if err != nil { return fmt.Errorf("insert edge %s: %w", edgeID, err) } } // Update pipeline config (viewport) and updated_at if viewportConfig != "" { if _, err := tx.Exec("UPDATE pipelines SET config = ?, updated_at = ? WHERE id = ?", viewportConfig, now, pipelineID); err != nil { return fmt.Errorf("update pipeline config: %w", err) } } else { if _, err := tx.Exec("UPDATE pipelines SET updated_at = ? WHERE id = ?", now, pipelineID); err != nil { return fmt.Errorf("update pipeline updated_at: %w", err) } } if err := tx.Commit(); err != nil { return fmt.Errorf("commit graph transaction: %w", err) } return nil } // GetPipelineGraph retrieves all nodes and edges for a pipeline. func (db *DB) GetPipelineGraph(pipelineID string) ([]PipelineNode, []PipelineEdge, error) { // Nodes nodeRows, err := db.conn.Query( `SELECT id, pipeline_id, node_type, label, position_x, position_y, config_encrypted, created_at, updated_at FROM pipeline_nodes WHERE pipeline_id = ? ORDER BY created_at ASC`, pipelineID, ) if err != nil { return nil, nil, fmt.Errorf("get pipeline nodes: %w", err) } defer nodeRows.Close() var nodes []PipelineNode for nodeRows.Next() { var n PipelineNode if err := nodeRows.Scan(&n.ID, &n.PipelineID, &n.NodeType, &n.Label, &n.PositionX, &n.PositionY, &n.ConfigEncrypted, &n.CreatedAt, &n.UpdatedAt); err != nil { return nil, nil, fmt.Errorf("scan pipeline node: %w", err) } nodes = append(nodes, n) } if err := nodeRows.Err(); err != nil { return nil, nil, fmt.Errorf("iterate node rows: %w", err) } // Edges edgeRows, err := db.conn.Query( `SELECT id, pipeline_id, source_node_id, target_node_id, source_handle, target_handle, created_at FROM pipeline_edges WHERE pipeline_id = ? ORDER BY created_at ASC`, pipelineID, ) if err != nil { return nil, nil, fmt.Errorf("get pipeline edges: %w", err) } defer edgeRows.Close() var edges []PipelineEdge for edgeRows.Next() { var e PipelineEdge var srcHandle, tgtHandle sql.NullString if err := edgeRows.Scan(&e.ID, &e.PipelineID, &e.SourceNodeID, &e.TargetNodeID, &srcHandle, &tgtHandle, &e.CreatedAt); err != nil { return nil, nil, fmt.Errorf("scan pipeline edge: %w", err) } e.SourceHandle = nullStringToPtr(srcHandle) e.TargetHandle = nullStringToPtr(tgtHandle) edges = append(edges, e) } if err := edgeRows.Err(); err != nil { return nil, nil, fmt.Errorf("iterate edge rows: %w", err) } return nodes, edges, nil } // ── Pipeline Runs ────────────────────────────────────────────────── // CreatePipelineRun creates a new run record and returns its ID. func (db *DB) CreatePipelineRun(pipelineID, status string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `INSERT INTO pipeline_runs (id, pipeline_id, status, started_at, created_at) VALUES (?, ?, ?, ?, ?)`, id, pipelineID, status, now, now, ) if err != nil { return "", fmt.Errorf("create pipeline run: %w", err) } return id, nil } // UpdatePipelineRun updates a run's status and metrics. func (db *DB) UpdatePipelineRun(id, status string, rowsIngested, bytesIngested, errorsCount int64, lastError, metricsJSON string) error { now := time.Now().UTC().Format(time.RFC3339) var errVal interface{} if lastError != "" { errVal = lastError } var finished interface{} if status != "running" { finished = now } if metricsJSON == "" { metricsJSON = "{}" } _, err := db.conn.Exec( `UPDATE pipeline_runs SET status = ?, finished_at = ?, rows_ingested = ?, bytes_ingested = ?, errors_count = ?, last_error = ?, metrics_json = ? WHERE id = ?`, status, finished, rowsIngested, bytesIngested, errorsCount, errVal, metricsJSON, id, ) if err != nil { return fmt.Errorf("update pipeline run: %w", err) } return nil } // GetPipelineRuns retrieves runs for a pipeline with limit/offset. func (db *DB) GetPipelineRuns(pipelineID string, limit, offset int) ([]PipelineRun, error) { rows, err := db.conn.Query( `SELECT id, pipeline_id, status, started_at, finished_at, rows_ingested, bytes_ingested, errors_count, last_error, metrics_json, created_at FROM pipeline_runs WHERE pipeline_id = ? ORDER BY started_at DESC LIMIT ? OFFSET ?`, pipelineID, limit, offset, ) if err != nil { return nil, fmt.Errorf("get pipeline runs: %w", err) } defer rows.Close() var runs []PipelineRun for rows.Next() { var r PipelineRun var finished, lastErr sql.NullString if err := rows.Scan(&r.ID, &r.PipelineID, &r.Status, &r.StartedAt, &finished, &r.RowsIngested, &r.BytesIngested, &r.ErrorsCount, &lastErr, &r.MetricsJSON, &r.CreatedAt); err != nil { return nil, fmt.Errorf("scan pipeline run: %w", err) } r.FinishedAt = nullStringToPtr(finished) r.LastError = nullStringToPtr(lastErr) runs = append(runs, r) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate run rows: %w", err) } return runs, nil } // ── Pipeline Run Logs ────────────────────────────────────────────── // CreatePipelineRunLog creates a log entry for a pipeline run. func (db *DB) CreatePipelineRunLog(runID, level, message string) error { id := uuid.NewString() _, err := db.conn.Exec( `INSERT INTO pipeline_run_logs (id, run_id, level, message, created_at) VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP)`, id, runID, level, message, ) if err != nil { return fmt.Errorf("create pipeline run log: %w", err) } return nil } // GetPipelineRunLogs retrieves logs for a pipeline run. func (db *DB) GetPipelineRunLogs(runID string, limit int) ([]PipelineRunLog, error) { if limit <= 0 { limit = 200 } rows, err := db.conn.Query( `SELECT id, run_id, level, message, created_at FROM pipeline_run_logs WHERE run_id = ? ORDER BY created_at DESC LIMIT ?`, runID, limit, ) if err != nil { return nil, fmt.Errorf("get pipeline run logs: %w", err) } defer rows.Close() var logs []PipelineRunLog for rows.Next() { var l PipelineRunLog if err := rows.Scan(&l.ID, &l.RunID, &l.Level, &l.Message, &l.CreatedAt); err != nil { return nil, fmt.Errorf("scan pipeline run log: %w", err) } logs = append(logs, l) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate log rows: %w", err) } return logs, nil } // ── Helpers ──────────────────────────────────────────────────────── // scanPipeline scans a pipeline row from a *sql.Rows. func scanPipeline(rows *sql.Rows) (Pipeline, error) { var p Pipeline var desc, createdBy, lastStarted, lastStopped, lastErr sql.NullString if err := rows.Scan(&p.ID, &p.Name, &desc, &p.ConnectionID, &p.Status, &p.Config, &createdBy, &lastStarted, &lastStopped, &lastErr, &p.CreatedAt, &p.UpdatedAt); err != nil { return p, fmt.Errorf("scan pipeline: %w", err) } p.Description = nullStringToPtr(desc) p.CreatedBy = nullStringToPtr(createdBy) p.LastStartedAt = nullStringToPtr(lastStarted) p.LastStoppedAt = nullStringToPtr(lastStopped) p.LastError = nullStringToPtr(lastErr) return p, nil } ================================================ FILE: internal/database/rate_limits.go ================================================ package database import ( "database/sql" "fmt" "time" ) // RateLimitEntry represents a rate limit record. type RateLimitEntry struct { Identifier string `json:"identifier"` Type string `json:"type"` Attempts int `json:"attempts"` FirstAttemptAt string `json:"first_attempt_at"` LockedUntil *string `json:"locked_until"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // GetRateLimit retrieves a rate limit entry by identifier. func (db *DB) GetRateLimit(identifier string) (*RateLimitEntry, error) { row := db.conn.QueryRow( "SELECT identifier, type, attempts, first_attempt_at, locked_until, created_at, updated_at FROM rate_limits WHERE identifier = ?", identifier, ) var r RateLimitEntry var lockedUntil sql.NullString err := row.Scan( &r.Identifier, &r.Type, &r.Attempts, &r.FirstAttemptAt, &lockedUntil, &r.CreatedAt, &r.UpdatedAt, ) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get rate limit: %w", err) } r.LockedUntil = nullStringToPtr(lockedUntil) return &r, nil } // UpsertRateLimit inserts or updates a rate limit entry. func (db *DB) UpsertRateLimit(identifier, limitType string, attempts int, firstAttempt time.Time, lockedUntil *time.Time) error { now := time.Now().UTC().Format(time.RFC3339) firstAttemptStr := firstAttempt.UTC().Format(time.RFC3339) var lockedUntilVal interface{} if lockedUntil != nil { lockedUntilVal = lockedUntil.UTC().Format(time.RFC3339) } _, err := db.conn.Exec(` INSERT INTO rate_limits (identifier, type, attempts, first_attempt_at, locked_until, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?) ON CONFLICT(identifier) DO UPDATE SET type = excluded.type, attempts = excluded.attempts, first_attempt_at = excluded.first_attempt_at, locked_until = excluded.locked_until, updated_at = excluded.updated_at `, identifier, limitType, attempts, firstAttemptStr, lockedUntilVal, now, now) if err != nil { return fmt.Errorf("upsert rate limit: %w", err) } return nil } // DeleteRateLimit deletes a rate limit entry by identifier. func (db *DB) DeleteRateLimit(identifier string) error { _, err := db.conn.Exec("DELETE FROM rate_limits WHERE identifier = ?", identifier) if err != nil { return fmt.Errorf("delete rate limit: %w", err) } return nil } // CleanupExpiredRateLimits removes rate limit entries that have expired based on the window. // Returns the number of entries deleted. func (db *DB) CleanupExpiredRateLimits(windowMs int64) (int64, error) { cutoff := time.Now().UTC().Add(-time.Duration(windowMs) * time.Millisecond).Format(time.RFC3339) now := time.Now().UTC().Format(time.RFC3339) result, err := db.conn.Exec(` DELETE FROM rate_limits WHERE (first_attempt_at < ? AND (locked_until IS NULL OR locked_until < ?)) OR (locked_until IS NOT NULL AND locked_until < ?) `, cutoff, now, now) if err != nil { return 0, fmt.Errorf("cleanup expired rate limits: %w", err) } count, err := result.RowsAffected() if err != nil { return 0, fmt.Errorf("get rows affected: %w", err) } return count, nil } ================================================ FILE: internal/database/saved_queries.go ================================================ package database import ( "database/sql" "fmt" "time" "github.com/google/uuid" ) // SavedQuery represents a saved SQL query. type SavedQuery struct { ID string `json:"id"` Name string `json:"name"` Description *string `json:"description"` Query string `json:"query"` ConnectionID *string `json:"connection_id"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // CreateSavedQueryParams holds parameters for creating a saved query. type CreateSavedQueryParams struct { Name string Description string Query string ConnectionID string CreatedBy string } // GetSavedQueries retrieves all saved queries. func (db *DB) GetSavedQueries() ([]SavedQuery, error) { rows, err := db.conn.Query( `SELECT id, name, description, query, connection_id, created_by, created_at, updated_at FROM saved_queries ORDER BY updated_at DESC`, ) if err != nil { return nil, fmt.Errorf("get saved queries: %w", err) } defer rows.Close() var queries []SavedQuery for rows.Next() { var q SavedQuery var desc, connID, createdBy sql.NullString if err := rows.Scan(&q.ID, &q.Name, &desc, &q.Query, &connID, &createdBy, &q.CreatedAt, &q.UpdatedAt); err != nil { return nil, fmt.Errorf("scan saved query: %w", err) } q.Description = nullStringToPtr(desc) q.ConnectionID = nullStringToPtr(connID) q.CreatedBy = nullStringToPtr(createdBy) queries = append(queries, q) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate saved query rows: %w", err) } return queries, nil } // GetSavedQueryByID retrieves a saved query by ID. func (db *DB) GetSavedQueryByID(id string) (*SavedQuery, error) { row := db.conn.QueryRow( `SELECT id, name, description, query, connection_id, created_by, created_at, updated_at FROM saved_queries WHERE id = ?`, id, ) var q SavedQuery var desc, connID, createdBy sql.NullString err := row.Scan(&q.ID, &q.Name, &desc, &q.Query, &connID, &createdBy, &q.CreatedAt, &q.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get saved query by id: %w", err) } q.Description = nullStringToPtr(desc) q.ConnectionID = nullStringToPtr(connID) q.CreatedBy = nullStringToPtr(createdBy) return &q, nil } // CreateSavedQuery creates a new saved query and returns its ID. func (db *DB) CreateSavedQuery(params CreateSavedQueryParams) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var desc, connID, createdBy interface{} if params.Description != "" { desc = params.Description } if params.ConnectionID != "" { connID = params.ConnectionID } if params.CreatedBy != "" { createdBy = params.CreatedBy } _, err := db.conn.Exec( `INSERT INTO saved_queries (id, name, description, query, connection_id, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, id, params.Name, desc, params.Query, connID, createdBy, now, now, ) if err != nil { return "", fmt.Errorf("create saved query: %w", err) } return id, nil } // UpdateSavedQuery updates an existing saved query. func (db *DB) UpdateSavedQuery(id, name, description, query, connectionID string) error { now := time.Now().UTC().Format(time.RFC3339) var desc, connID interface{} if description != "" { desc = description } if connectionID != "" { connID = connectionID } _, err := db.conn.Exec( `UPDATE saved_queries SET name = ?, description = ?, query = ?, connection_id = ?, updated_at = ? WHERE id = ?`, name, desc, query, connID, now, id, ) if err != nil { return fmt.Errorf("update saved query: %w", err) } return nil } // DeleteSavedQuery deletes a saved query by ID. func (db *DB) DeleteSavedQuery(id string) error { _, err := db.conn.Exec("DELETE FROM saved_queries WHERE id = ?", id) if err != nil { return fmt.Errorf("delete saved query: %w", err) } return nil } ================================================ FILE: internal/database/schedules.go ================================================ package database import ( "database/sql" "fmt" "time" "github.com/google/uuid" ) // Schedule represents a scheduled query. type Schedule struct { ID string `json:"id"` Name string `json:"name"` SavedQueryID string `json:"saved_query_id"` ConnectionID *string `json:"connection_id"` Cron string `json:"cron"` Timezone string `json:"timezone"` Enabled bool `json:"enabled"` TimeoutMs int `json:"timeout_ms"` LastRunAt *string `json:"last_run_at"` NextRunAt *string `json:"next_run_at"` LastStatus *string `json:"last_status"` LastError *string `json:"last_error"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } // ScheduleRun represents a single execution of a scheduled query. type ScheduleRun struct { ID string `json:"id"` ScheduleID string `json:"schedule_id"` StartedAt string `json:"started_at"` FinishedAt *string `json:"finished_at"` Status string `json:"status"` RowsAffected int `json:"rows_affected"` ElapsedMs int `json:"elapsed_ms"` Error *string `json:"error"` CreatedAt string `json:"created_at"` } // GetSchedules retrieves all schedules. func (db *DB) GetSchedules() ([]Schedule, error) { rows, err := db.conn.Query( `SELECT id, name, saved_query_id, connection_id, cron, timezone, enabled, timeout_ms, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at FROM schedules ORDER BY created_at DESC`, ) if err != nil { return nil, fmt.Errorf("get schedules: %w", err) } defer rows.Close() var schedules []Schedule for rows.Next() { s, err := scanSchedule(rows) if err != nil { return nil, err } schedules = append(schedules, s) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate schedule rows: %w", err) } return schedules, nil } // GetEnabledSchedules retrieves all enabled schedules. func (db *DB) GetEnabledSchedules() ([]Schedule, error) { rows, err := db.conn.Query( `SELECT id, name, saved_query_id, connection_id, cron, timezone, enabled, timeout_ms, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at FROM schedules WHERE enabled = 1 ORDER BY created_at DESC`, ) if err != nil { return nil, fmt.Errorf("get enabled schedules: %w", err) } defer rows.Close() var schedules []Schedule for rows.Next() { s, err := scanSchedule(rows) if err != nil { return nil, err } schedules = append(schedules, s) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate enabled schedule rows: %w", err) } return schedules, nil } // GetScheduleByID retrieves a schedule by ID. func (db *DB) GetScheduleByID(id string) (*Schedule, error) { row := db.conn.QueryRow( `SELECT id, name, saved_query_id, connection_id, cron, timezone, enabled, timeout_ms, last_run_at, next_run_at, last_status, last_error, created_by, created_at, updated_at FROM schedules WHERE id = ?`, id, ) var s Schedule var connID, lastRun, nextRun, lastStatus, lastError, createdBy sql.NullString var enabled int err := row.Scan(&s.ID, &s.Name, &s.SavedQueryID, &connID, &s.Cron, &s.Timezone, &enabled, &s.TimeoutMs, &lastRun, &nextRun, &lastStatus, &lastError, &createdBy, &s.CreatedAt, &s.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get schedule by id: %w", err) } s.Enabled = enabled == 1 s.ConnectionID = nullStringToPtr(connID) s.LastRunAt = nullStringToPtr(lastRun) s.NextRunAt = nullStringToPtr(nextRun) s.LastStatus = nullStringToPtr(lastStatus) s.LastError = nullStringToPtr(lastError) s.CreatedBy = nullStringToPtr(createdBy) return &s, nil } // CreateSchedule creates a new schedule and returns its ID. func (db *DB) CreateSchedule(name, savedQueryID, connectionID, cron, timezone, createdBy string, timeoutMs int) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) var connID, creator interface{} if connectionID != "" { connID = connectionID } if createdBy != "" { creator = createdBy } if timezone == "" { timezone = "UTC" } if timeoutMs <= 0 { timeoutMs = 60000 } _, err := db.conn.Exec( `INSERT INTO schedules (id, name, saved_query_id, connection_id, cron, timezone, enabled, timeout_ms, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, 1, ?, ?, ?, ?)`, id, name, savedQueryID, connID, cron, timezone, timeoutMs, creator, now, now, ) if err != nil { return "", fmt.Errorf("create schedule: %w", err) } return id, nil } // UpdateSchedule updates a schedule. func (db *DB) UpdateSchedule(id, name, cron, timezone string, enabled bool, timeoutMs int) error { now := time.Now().UTC().Format(time.RFC3339) enabledInt := 0 if enabled { enabledInt = 1 } _, err := db.conn.Exec( `UPDATE schedules SET name = ?, cron = ?, timezone = ?, enabled = ?, timeout_ms = ?, updated_at = ? WHERE id = ?`, name, cron, timezone, enabledInt, timeoutMs, now, id, ) if err != nil { return fmt.Errorf("update schedule: %w", err) } return nil } // UpdateScheduleStatus updates the last run info for a schedule. func (db *DB) UpdateScheduleStatus(id, status, lastError string, nextRunAt *time.Time) error { now := time.Now().UTC().Format(time.RFC3339) var errVal, nextVal interface{} if lastError != "" { errVal = lastError } if nextRunAt != nil { nextVal = nextRunAt.UTC().Format(time.RFC3339) } _, err := db.conn.Exec( `UPDATE schedules SET last_run_at = ?, last_status = ?, last_error = ?, next_run_at = ?, updated_at = ? WHERE id = ?`, now, status, errVal, nextVal, now, id, ) if err != nil { return fmt.Errorf("update schedule status: %w", err) } return nil } // DeleteSchedule deletes a schedule and all its runs (cascade). func (db *DB) DeleteSchedule(id string) error { _, err := db.conn.Exec("DELETE FROM schedules WHERE id = ?", id) if err != nil { return fmt.Errorf("delete schedule: %w", err) } return nil } // CreateScheduleRun creates a new schedule run record and returns its ID. func (db *DB) CreateScheduleRun(scheduleID, status string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `INSERT INTO schedule_runs (id, schedule_id, started_at, status, created_at) VALUES (?, ?, ?, ?, ?)`, id, scheduleID, now, status, now, ) if err != nil { return "", fmt.Errorf("create schedule run: %w", err) } return id, nil } // UpdateScheduleRun updates a schedule run with results. func (db *DB) UpdateScheduleRun(id, status string, rowsAffected, elapsedMs int, runError string) error { now := time.Now().UTC().Format(time.RFC3339) var errVal interface{} if runError != "" { errVal = runError } _, err := db.conn.Exec( `UPDATE schedule_runs SET finished_at = ?, status = ?, rows_affected = ?, elapsed_ms = ?, error = ? WHERE id = ?`, now, status, rowsAffected, elapsedMs, errVal, id, ) if err != nil { return fmt.Errorf("update schedule run: %w", err) } return nil } // GetScheduleRuns retrieves runs for a schedule, most recent first. func (db *DB) GetScheduleRuns(scheduleID string, limit, offset int) ([]ScheduleRun, error) { if limit <= 0 { limit = 50 } if offset < 0 { offset = 0 } rows, err := db.conn.Query( `SELECT id, schedule_id, started_at, finished_at, status, rows_affected, elapsed_ms, error, created_at FROM schedule_runs WHERE schedule_id = ? ORDER BY started_at DESC LIMIT ? OFFSET ?`, scheduleID, limit, offset, ) if err != nil { return nil, fmt.Errorf("get schedule runs: %w", err) } defer rows.Close() var runs []ScheduleRun for rows.Next() { var r ScheduleRun var finishedAt, runError sql.NullString if err := rows.Scan(&r.ID, &r.ScheduleID, &r.StartedAt, &finishedAt, &r.Status, &r.RowsAffected, &r.ElapsedMs, &runError, &r.CreatedAt); err != nil { return nil, fmt.Errorf("scan schedule run: %w", err) } r.FinishedAt = nullStringToPtr(finishedAt) r.Error = nullStringToPtr(runError) runs = append(runs, r) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate schedule run rows: %w", err) } return runs, nil } // scanSchedule is a helper for scanning schedule rows. func scanSchedule(rows *sql.Rows) (Schedule, error) { var s Schedule var connID, lastRun, nextRun, lastStatus, lastError, createdBy sql.NullString var enabled int err := rows.Scan(&s.ID, &s.Name, &s.SavedQueryID, &connID, &s.Cron, &s.Timezone, &enabled, &s.TimeoutMs, &lastRun, &nextRun, &lastStatus, &lastError, &createdBy, &s.CreatedAt, &s.UpdatedAt) if err != nil { return s, fmt.Errorf("scan schedule: %w", err) } s.Enabled = enabled == 1 s.ConnectionID = nullStringToPtr(connID) s.LastRunAt = nullStringToPtr(lastRun) s.NextRunAt = nullStringToPtr(nextRun) s.LastStatus = nullStringToPtr(lastStatus) s.LastError = nullStringToPtr(lastError) s.CreatedBy = nullStringToPtr(createdBy) return s, nil } ================================================ FILE: internal/database/sessions.go ================================================ package database import ( "database/sql" "fmt" "time" "github.com/google/uuid" ) // Session represents an authenticated session. type Session struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` ClickhouseUser string `json:"clickhouse_user"` EncryptedPassword string `json:"encrypted_password"` Token string `json:"token"` ExpiresAt string `json:"expires_at"` UserRole *string `json:"user_role"` CreatedAt string `json:"created_at"` } // CreateSessionParams holds parameters for creating a session. type CreateSessionParams struct { ConnectionID string ClickhouseUser string EncryptedPassword string Token string ExpiresAt string UserRole string // defaults to "viewer" if empty } // SessionUser represents an aggregated user from sessions. type SessionUser struct { Username string `json:"username"` UserRole string `json:"user_role"` LastLogin string `json:"last_login"` SessionCount int `json:"session_count"` } // GetSession retrieves a session by token. Deletes and returns nil if expired. func (db *DB) GetSession(token string) (*Session, error) { row := db.conn.QueryRow( "SELECT id, connection_id, clickhouse_user, encrypted_password, token, expires_at, user_role, created_at FROM sessions WHERE token = ?", token, ) var s Session var userRole sql.NullString err := row.Scan( &s.ID, &s.ConnectionID, &s.ClickhouseUser, &s.EncryptedPassword, &s.Token, &s.ExpiresAt, &userRole, &s.CreatedAt, ) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get session: %w", err) } s.UserRole = nullStringToPtr(userRole) // Check if session has expired expiresAt, err := time.Parse(time.RFC3339, s.ExpiresAt) if err != nil { expiresAt, err = time.Parse("2006-01-02T15:04:05.000Z", s.ExpiresAt) if err != nil { db.conn.Exec("DELETE FROM sessions WHERE id = ?", s.ID) return nil, nil } } if time.Now().UTC().After(expiresAt) { db.conn.Exec("DELETE FROM sessions WHERE id = ?", s.ID) return nil, nil } return &s, nil } // CreateSession creates a new session and returns its ID. func (db *DB) CreateSession(params CreateSessionParams) (string, error) { id := uuid.NewString() userRole := params.UserRole if userRole == "" { userRole = "viewer" } _, err := db.conn.Exec( `INSERT INTO sessions (id, connection_id, clickhouse_user, encrypted_password, token, expires_at, user_role) VALUES (?, ?, ?, ?, ?, ?, ?)`, id, params.ConnectionID, params.ClickhouseUser, params.EncryptedPassword, params.Token, params.ExpiresAt, userRole, ) if err != nil { return "", fmt.Errorf("create session: %w", err) } return id, nil } // DeleteSession deletes a session by its token. func (db *DB) DeleteSession(token string) error { _, err := db.conn.Exec("DELETE FROM sessions WHERE token = ?", token) if err != nil { return fmt.Errorf("delete session: %w", err) } return nil } // SetSessionsUserRole updates the cached app role for all active/inactive sessions of a user. func (db *DB) SetSessionsUserRole(username, role string) error { if role == "" { role = "viewer" } _, err := db.conn.Exec("UPDATE sessions SET user_role = ? WHERE clickhouse_user = ?", role, username) if err != nil { return fmt.Errorf("set sessions user role: %w", err) } return nil } // GetUsers returns aggregated user data from sessions. func (db *DB) GetUsers() ([]SessionUser, error) { rows, err := db.conn.Query(` SELECT clickhouse_user, user_role, MAX(created_at) as last_login, COUNT(*) as session_count FROM sessions GROUP BY clickhouse_user ORDER BY last_login DESC `) if err != nil { return nil, fmt.Errorf("get users: %w", err) } defer rows.Close() var users []SessionUser for rows.Next() { var u SessionUser var userRole sql.NullString if err := rows.Scan(&u.Username, &userRole, &u.LastLogin, &u.SessionCount); err != nil { return nil, fmt.Errorf("scan user: %w", err) } if userRole.Valid { u.UserRole = userRole.String } else { u.UserRole = "viewer" } users = append(users, u) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate user rows: %w", err) } return users, nil } // GetActiveSessionsByConnection returns up to limit active sessions for a connection, // ordered by most recently created first. func (db *DB) GetActiveSessionsByConnection(connectionID string, limit int) ([]Session, error) { if limit <= 0 { limit = 5 } now := time.Now().UTC().Format(time.RFC3339) rows, err := db.conn.Query( `SELECT id, connection_id, clickhouse_user, encrypted_password, token, expires_at, user_role, created_at FROM sessions WHERE connection_id = ? AND expires_at > ? ORDER BY created_at DESC LIMIT ?`, connectionID, now, limit, ) if err != nil { return nil, fmt.Errorf("get active sessions by connection: %w", err) } defer rows.Close() sessions := make([]Session, 0, limit) for rows.Next() { var s Session var role sql.NullString if err := rows.Scan( &s.ID, &s.ConnectionID, &s.ClickhouseUser, &s.EncryptedPassword, &s.Token, &s.ExpiresAt, &role, &s.CreatedAt, ); err != nil { return nil, fmt.Errorf("scan active session: %w", err) } s.UserRole = nullStringToPtr(role) sessions = append(sessions, s) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate active sessions: %w", err) } return sessions, nil } ================================================ FILE: internal/database/settings.go ================================================ package database import ( "database/sql" "fmt" "strings" "time" ) // Setting keys for governance features. const ( SettingGovernanceSyncEnabled = "governance.sync_enabled" SettingGovernanceUpgradeBanner = "governance.upgrade_banner_dismissed" SettingGovernanceSyncUpdatedBy = "governance.sync_updated_by" SettingGovernanceSyncUpdatedAt = "governance.sync_updated_at" ) // GovernanceSyncEnabled reports whether admins have opted in to the governance // background sync. Unset keys default to false (opt-in semantics). func (db *DB) GovernanceSyncEnabled() bool { v, _ := db.GetSetting(SettingGovernanceSyncEnabled) return strings.EqualFold(strings.TrimSpace(v), "true") } // SetGovernanceSyncEnabled stores the opt-in flag plus who/when toggled it. func (db *DB) SetGovernanceSyncEnabled(enabled bool, actor string) error { val := "false" if enabled { val = "true" } if err := db.SetSetting(SettingGovernanceSyncEnabled, val); err != nil { return err } if err := db.SetSetting(SettingGovernanceSyncUpdatedBy, actor); err != nil { return err } return db.SetSetting(SettingGovernanceSyncUpdatedAt, time.Now().UTC().Format(time.RFC3339)) } // GetSetting retrieves a setting value by key. Returns empty string if not found. func (db *DB) GetSetting(key string) (string, error) { var value string err := db.conn.QueryRow("SELECT value FROM settings WHERE key = ?", key).Scan(&value) if err == sql.ErrNoRows { return "", nil } if err != nil { return "", fmt.Errorf("get setting: %w", err) } return value, nil } // SetSetting sets or updates a setting value (upsert). func (db *DB) SetSetting(key, value string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := db.conn.Exec( `INSERT INTO settings (key, value, updated_at) VALUES (?, ?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value, updated_at = excluded.updated_at`, key, value, now, ) if err != nil { return fmt.Errorf("set setting: %w", err) } return nil } // GetAllSettings retrieves all settings as a map. func (db *DB) GetAllSettings() (map[string]string, error) { rows, err := db.conn.Query("SELECT key, value FROM settings") if err != nil { return nil, fmt.Errorf("get all settings: %w", err) } defer rows.Close() settings := make(map[string]string) for rows.Next() { var key, value string if err := rows.Scan(&key, &value); err != nil { return nil, fmt.Errorf("scan setting: %w", err) } settings[key] = value } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate setting rows: %w", err) } return settings, nil } // DeleteSetting removes a setting by key. func (db *DB) DeleteSetting(key string) error { _, err := db.conn.Exec("DELETE FROM settings WHERE key = ?", key) if err != nil { return fmt.Errorf("delete setting: %w", err) } return nil } ================================================ FILE: internal/database/user_roles.go ================================================ package database import ( "database/sql" "fmt" ) // UserRole represents a CH-UI role assignment for a ClickHouse user. type UserRole struct { Username string `json:"username"` Role string `json:"role"` CreatedAt string `json:"created_at"` } // GetUserRole retrieves the CH-UI role for a user. // Returns empty string if not set (meaning auto-detect from ClickHouse). func (db *DB) GetUserRole(username string) (string, error) { var role string err := db.conn.QueryRow("SELECT role FROM user_roles WHERE username = ?", username).Scan(&role) if err != nil { if err.Error() == "sql: no rows in result set" { return "", nil } return "", fmt.Errorf("get user role: %w", err) } return role, nil } // SetUserRole sets or updates the CH-UI role for a user (upsert). func (db *DB) SetUserRole(username, role string) error { _, err := db.conn.Exec( `INSERT INTO user_roles (username, role) VALUES (?, ?) ON CONFLICT(username) DO UPDATE SET role = excluded.role`, username, role, ) if err != nil { return fmt.Errorf("set user role: %w", err) } return nil } // DeleteUserRole removes the CH-UI role assignment for a user (reverts to auto-detect). func (db *DB) DeleteUserRole(username string) error { _, err := db.conn.Exec("DELETE FROM user_roles WHERE username = ?", username) if err != nil { return fmt.Errorf("delete user role: %w", err) } return nil } // GetAllUserRoles retrieves all CH-UI role assignments. func (db *DB) GetAllUserRoles() ([]UserRole, error) { rows, err := db.conn.Query("SELECT username, role, created_at FROM user_roles ORDER BY username ASC") if err != nil { return nil, fmt.Errorf("get all user roles: %w", err) } defer rows.Close() var roles []UserRole for rows.Next() { var r UserRole if err := rows.Scan(&r.Username, &r.Role, &r.CreatedAt); err != nil { return nil, fmt.Errorf("scan user role: %w", err) } roles = append(roles, r) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate user role rows: %w", err) } return roles, nil } // CountUsersWithRole returns the number of users currently assigned a given CH-UI role. func (db *DB) CountUsersWithRole(role string) (int, error) { var count int err := db.conn.QueryRow("SELECT COUNT(*) FROM user_roles WHERE role = ?", role).Scan(&count) if err != nil { return 0, fmt.Errorf("count users with role: %w", err) } return count, nil } // IsUserRole returns true if username currently has the given explicit role in CH-UI. func (db *DB) IsUserRole(username, role string) (bool, error) { var exists int err := db.conn.QueryRow( "SELECT 1 FROM user_roles WHERE username = ? AND role = ? LIMIT 1", username, role, ).Scan(&exists) if err != nil { if err == sql.ErrNoRows { return false, nil } return false, fmt.Errorf("is user role: %w", err) } return exists == 1, nil } ================================================ FILE: internal/embedded/embedded.go ================================================ package embedded import ( "fmt" "log/slog" "strings" "time" "github.com/caioricciuti/ch-ui/connector" connconfig "github.com/caioricciuti/ch-ui/connector/config" "github.com/caioricciuti/ch-ui/connector/ui" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/license" ) // EmbeddedAgent manages an in-process tunnel connector that connects // to the local CH-UI server for the embedded ClickHouse connection. type EmbeddedAgent struct { conn *connector.Connector db *database.DB } // Start creates the embedded connection record if needed and launches // the in-process connector. It should be called after the HTTP server is // listening so the WebSocket endpoint is available. func Start(db *database.DB, port int, clickhouseURL, connectionName string) (*EmbeddedAgent, error) { if clickhouseURL == "" { slog.Info("No CLICKHOUSE_URL configured, skipping embedded agent") return nil, nil } connectionName = strings.TrimSpace(connectionName) if connectionName == "" { connectionName = "Local ClickHouse" } // Ensure an embedded connection record exists dbConn, err := db.GetEmbeddedConnection() if err != nil { return nil, fmt.Errorf("check embedded connection: %w", err) } if dbConn == nil { token := license.GenerateTunnelToken() id, err := db.CreateConnection(connectionName, token, true) if err != nil { return nil, fmt.Errorf("create embedded connection: %w", err) } slog.Info("Created embedded connection", "id", id) dbConn, err = db.GetConnectionByID(id) if err != nil || dbConn == nil { return nil, fmt.Errorf("fetch created embedded connection: %w", err) } } else if strings.TrimSpace(dbConn.Name) != connectionName { if err := db.UpdateConnectionName(dbConn.ID, connectionName); err != nil { return nil, fmt.Errorf("update embedded connection name: %w", err) } dbConn.Name = connectionName slog.Info("Updated embedded connection name", "id", dbConn.ID, "name", connectionName) } tunnelURL := fmt.Sprintf("ws://127.0.0.1:%d/connect", port) cfg := &connconfig.Config{ TunnelURL: tunnelURL, Token: dbConn.TunnelToken, ClickHouseURL: clickhouseURL, Takeover: true, // Always takeover on startup HeartbeatInterval: 30 * time.Second, ReconnectDelay: 2 * time.Second, MaxReconnectDelay: 30 * time.Second, } // Use quiet mode for the embedded agent (suppresses terminal output) u := ui.New(true, true, false, false) conn := connector.New(cfg, u) ea := &EmbeddedAgent{ conn: conn, db: db, } go func() { // Small delay to let the HTTP server start accepting connections time.Sleep(500 * time.Millisecond) slog.Info("Starting embedded agent", "clickhouse_url", clickhouseURL, "tunnel_url", tunnelURL) if err := conn.Run(); err != nil { slog.Error("Embedded agent exited with error", "error", err) } }() return ea, nil } // Stop gracefully shuts down the embedded agent. func (ea *EmbeddedAgent) Stop() { if ea != nil && ea.conn != nil { slog.Info("Stopping embedded connector") ea.conn.Shutdown() } } ================================================ FILE: internal/governance/guardrails.go ================================================ package governance import ( "fmt" "log/slog" "regexp" "sort" "strings" "time" "github.com/caioricciuti/ch-ui/internal/alerts" "github.com/caioricciuti/ch-ui/internal/database" ) const defaultGuardrailStaleAfter = 10 * time.Minute var showTablesFromRe = regexp.MustCompile(`(?i)\bSHOW\s+TABLES\s+(?:FROM|IN)\s+` + tableRefPattern) type guardrailStore interface { GetEnabledPolicies(connectionID string) ([]Policy, error) GetAccessMatrixForUser(connectionID, userName string) ([]AccessMatrixEntry, error) GetSyncState(connectionID string, syncType string) (*SyncState, error) CreateViolation(connectionID, policyID, queryLogID, user, detail, severity, detectionPhase, requestEndpoint string) (string, error) UpsertIncidentFromViolation(connectionID, sourceRef, policyName, user, severity, detail string) (string, bool, error) } type alertEventWriter interface { CreateAlertEvent(connectionID *string, eventType, severity, title, message string, payload interface{}, fingerprint, sourceRef string) (string, error) } type GuardrailService struct { store guardrailStore alerts alertEventWriter staleAfter time.Duration now func() time.Time } type GuardrailDecision struct { Allowed bool Block *GuardrailBlock } type GuardrailBlock struct { PolicyID string PolicyName string Severity string EnforcementMode string ViolationID string Detail string } func NewGuardrailService(store *Store, db *database.DB) *GuardrailService { return &GuardrailService{ store: store, alerts: db, staleAfter: defaultGuardrailStaleAfter, now: time.Now, } } func (s *GuardrailService) EvaluateQuery(connectionID, user, queryText, requestEndpoint string) (GuardrailDecision, error) { tablesUsed := extractPolicyTablesFromQuery(queryText) return s.evaluate(connectionID, user, queryText, tablesUsed, requestEndpoint) } func (s *GuardrailService) EvaluateTable(connectionID, user, databaseName, tableName, requestEndpoint string) (GuardrailDecision, error) { db := strings.TrimSpace(databaseName) tbl := strings.TrimSpace(tableName) if db == "" || tbl == "" { return GuardrailDecision{Allowed: true}, nil } queryText := fmt.Sprintf("SELECT * FROM `%s`.`%s`", db, tbl) tablesUsed := []string{db + "." + tbl} return s.evaluate(connectionID, user, queryText, tablesUsed, requestEndpoint) } func (s *GuardrailService) evaluate(connectionID, user, queryText string, tablesUsed []string, requestEndpoint string) (GuardrailDecision, error) { policies, err := s.store.GetEnabledPolicies(connectionID) if err != nil { return GuardrailDecision{}, fmt.Errorf("load enabled policies: %w", err) } if len(policies) == 0 { return GuardrailDecision{Allowed: true}, nil } uncertain, uncertainReason := s.isAccessStateUncertain(connectionID) if uncertain { s.emitUncertainGuardrailEvent(connectionID, user, queryText, requestEndpoint, uncertainReason) return GuardrailDecision{Allowed: true}, nil } matrixEntries, err := s.store.GetAccessMatrixForUser(connectionID, user) if err != nil { s.emitUncertainGuardrailEvent(connectionID, user, queryText, requestEndpoint, "access matrix lookup failed") return GuardrailDecision{Allowed: true}, nil } userRoles := collectUserRoles(matrixEntries) blockingPolicies := make([]Policy, 0) for _, policy := range policies { if !policy.Enabled { continue } if !queryTouchesObject(tablesUsed, queryText, policy) { continue } if hasRole(userRoles, policy.RequiredRole) { continue } if normalizePolicyEnforcementMode(policy.EnforcementMode) == "block" { blockingPolicies = append(blockingPolicies, policy) } } if len(blockingPolicies) == 0 { return GuardrailDecision{Allowed: true}, nil } selected := pickBlockingPolicy(blockingPolicies) detail := fmt.Sprintf( "Query blocked before execution: user %q touched %s without required role %q", user, describePolicyObject(selected), selected.RequiredRole, ) severity := normalizeGuardrailSeverity(selected.Severity) violationID := "" createdViolationID, err := s.store.CreateViolation( connectionID, selected.ID, "", user, detail, severity, "pre_exec_block", requestEndpoint, ) if err != nil { slog.Warn("Failed to persist pre-exec guardrail violation", "connection", connectionID, "policy_id", selected.ID, "error", err) } else { violationID = createdViolationID if _, _, err := s.store.UpsertIncidentFromViolation(connectionID, violationID, selected.Name, user, severity, detail); err != nil { slog.Warn("Failed to upsert incident from pre-exec guardrail violation", "violation_id", violationID, "error", err) } } if s.alerts != nil { fingerprint := fmt.Sprintf("policy:%s:user:%s:hash:%s", selected.ID, user, hashNormalized(normalizeQuery(queryText))) payload := map[string]interface{}{ "guardrail_status": "blocked", "policy_id": selected.ID, "policy_name": selected.Name, "query_hash": hashNormalized(normalizeQuery(queryText)), "request_endpoint": requestEndpoint, "violation_id": violationID, "violation_severity": severity, "detection_phase": "pre_exec_block", "enforcement_mode": "block", "blocked_user": user, } sourceRef := violationID if _, err := s.alerts.CreateAlertEvent( &connectionID, alerts.EventTypePolicyViolation, severity, fmt.Sprintf("Policy blocked query: %s", strings.TrimSpace(selected.Name)), detail, payload, fingerprint, sourceRef, ); err != nil { slog.Warn("Failed to create blocked guardrail alert event", "connection", connectionID, "policy_id", selected.ID, "error", err) } } return GuardrailDecision{ Allowed: false, Block: &GuardrailBlock{ PolicyID: selected.ID, PolicyName: selected.Name, Severity: severity, EnforcementMode: "block", ViolationID: violationID, Detail: detail, }, }, nil } func (s *GuardrailService) isAccessStateUncertain(connectionID string) (bool, string) { state, err := s.store.GetSyncState(connectionID, string(SyncAccess)) if err != nil { return true, "failed to read governance access sync state" } if state == nil { return true, "governance access sync state missing" } if strings.EqualFold(strings.TrimSpace(state.Status), "error") { return true, "governance access sync state is error" } if state.LastSyncedAt == nil || strings.TrimSpace(*state.LastSyncedAt) == "" { return true, "governance access sync has no successful sync timestamp" } lastSyncedAt, err := time.Parse(time.RFC3339, strings.TrimSpace(*state.LastSyncedAt)) if err != nil { return true, "governance access sync timestamp is invalid" } if s.now().UTC().Sub(lastSyncedAt.UTC()) > s.staleAfter { return true, "governance access sync state is stale" } return false, "" } func (s *GuardrailService) emitUncertainGuardrailEvent(connectionID, user, queryText, requestEndpoint, reason string) { if s.alerts == nil { return } queryHash := hashNormalized(normalizeQuery(queryText)) fingerprint := fmt.Sprintf("guardrail:uncertain:user:%s:hash:%s:endpoint:%s", user, queryHash, requestEndpoint) payload := map[string]interface{}{ "guardrail_status": "uncertain", "reason": reason, "request_endpoint": requestEndpoint, "query_hash": queryHash, "ch_user": user, } if _, err := s.alerts.CreateAlertEvent( &connectionID, alerts.EventTypePolicyViolation, alerts.SeverityWarn, "Guardrail evaluation uncertain", "Guardrail pre-execution evaluation could not be trusted; query was allowed", payload, fingerprint, "", ); err != nil { slog.Warn("Failed to create uncertain guardrail alert event", "connection", connectionID, "error", err) } } func pickBlockingPolicy(policies []Policy) Policy { ordered := make([]Policy, len(policies)) copy(ordered, policies) sort.SliceStable(ordered, func(i, j int) bool { left := ordered[i] right := ordered[j] lp := guardrailSeverityPriority(left.Severity) rp := guardrailSeverityPriority(right.Severity) if lp != rp { return lp > rp } ln := strings.ToLower(strings.TrimSpace(left.Name)) rn := strings.ToLower(strings.TrimSpace(right.Name)) if ln != rn { return ln < rn } return strings.ToLower(strings.TrimSpace(left.ID)) < strings.ToLower(strings.TrimSpace(right.ID)) }) return ordered[0] } func guardrailSeverityPriority(v string) int { switch normalizeGuardrailSeverity(v) { case "critical": return 4 case "error": return 3 case "warn": return 2 case "info": return 1 default: return 0 } } func normalizeGuardrailSeverity(v string) string { switch strings.ToLower(strings.TrimSpace(v)) { case "critical": return "critical" case "error": return "error" case "info": return "info" default: return "warn" } } func extractPolicyTablesFromQuery(queryText string) []string { query := normaliseWhitespace(queryText) seen := make(map[string]bool, 16) out := make([]string, 0, 8) isShowTablesQuery := showTablesFromRe.MatchString(query) addTable := func(dbName, tableName string) { dbName = strings.TrimSpace(dbName) tableName = strings.TrimSpace(tableName) if tableName == "" { return } key := tableName val := tableName if dbName != "" { key = strings.ToLower(dbName + "." + tableName) val = dbName + "." + tableName } else { key = strings.ToLower(tableName) } if seen[key] { return } seen[key] = true out = append(out, val) } addDatabase := func(dbName string) { dbName = strings.TrimSpace(dbName) if dbName == "" { return } key := strings.ToLower(dbName + ".__all_tables__") if seen[key] { return } seen[key] = true out = append(out, dbName+".__all_tables__") } if !isShowTablesQuery { for _, src := range extractSourceTables(query) { addTable(src.Database, src.Table) } } if target := extractTarget(query); target != nil { addTable(target.Database, target.Table) } for _, match := range showTablesFromRe.FindAllStringSubmatch(query, -1) { if len(match) < 2 { continue } raw := stripBackticks(strings.TrimSpace(match[1])) if raw == "" { continue } parts := strings.SplitN(raw, ".", 2) if len(parts) == 2 { addDatabase(stripBackticks(parts[0])) continue } addDatabase(stripBackticks(raw)) } return out } ================================================ FILE: internal/governance/guardrails_test.go ================================================ package governance import ( "path/filepath" "testing" "github.com/caioricciuti/ch-ui/internal/database" ) type guardrailTestContext struct { db *database.DB store *Store service *GuardrailService connID string } func newGuardrailTestContext(t *testing.T) *guardrailTestContext { t.Helper() dbPath := filepath.Join(t.TempDir(), "guardrails.db") db, err := database.Open(dbPath) if err != nil { t.Fatalf("open db: %v", err) } t.Cleanup(func() { _ = db.Close() }) ctx := &guardrailTestContext{ db: db, store: NewStore(db), connID: "conn-1", } ctx.service = NewGuardrailService(ctx.store, db) if _, err := db.Conn().Exec( `INSERT INTO connections (id, name, tunnel_token, status) VALUES (?, ?, ?, ?)`, ctx.connID, "Local", "token-1", "connected", ); err != nil { t.Fatalf("insert connection: %v", err) } return ctx } func (c *guardrailTestContext) setAccessSyncFresh(t *testing.T) { t.Helper() if err := c.store.UpsertSyncState(c.connID, string(SyncAccess), "idle", nil, nil, 0); err != nil { t.Fatalf("upsert access sync state: %v", err) } } func (c *guardrailTestContext) createPolicy(t *testing.T, name, severity, mode string) string { t.Helper() id, err := c.store.CreatePolicy( c.connID, name, "", "table", "db", "tbl", "", "analyst", severity, mode, "admin", ) if err != nil { t.Fatalf("create policy: %v", err) } return id } func TestGuardrailsWarnPolicyAllowsExecution(t *testing.T) { ctx := newGuardrailTestContext(t) ctx.setAccessSyncFresh(t) ctx.createPolicy(t, "warn-policy", "warn", "warn") decision, err := ctx.service.EvaluateQuery(ctx.connID, "alice", "SELECT * FROM db.tbl", "/api/query/run") if err != nil { t.Fatalf("evaluate query: %v", err) } if !decision.Allowed { t.Fatalf("expected query to be allowed, got blocked: %+v", decision.Block) } } func TestGuardrailsBlockPolicyBlocksAndPersistsViolation(t *testing.T) { ctx := newGuardrailTestContext(t) ctx.setAccessSyncFresh(t) policyID := ctx.createPolicy(t, "block-policy", "critical", "block") decision, err := ctx.service.EvaluateQuery(ctx.connID, "alice", "SELECT * FROM db.tbl", "/api/query/run") if err != nil { t.Fatalf("evaluate query: %v", err) } if decision.Allowed || decision.Block == nil { t.Fatalf("expected blocked decision, got %+v", decision) } if decision.Block.PolicyID != policyID { t.Fatalf("unexpected blocked policy: got %s want %s", decision.Block.PolicyID, policyID) } if decision.Block.EnforcementMode != "block" { t.Fatalf("unexpected enforcement mode: %s", decision.Block.EnforcementMode) } var detectionPhase, requestEndpoint string if err := ctx.db.Conn().QueryRow( `SELECT detection_phase, COALESCE(request_endpoint, '') FROM gov_policy_violations WHERE id = ?`, decision.Block.ViolationID, ).Scan(&detectionPhase, &requestEndpoint); err != nil { t.Fatalf("load persisted violation: %v", err) } if detectionPhase != "pre_exec_block" { t.Fatalf("unexpected detection phase: %s", detectionPhase) } if requestEndpoint != "/api/query/run" { t.Fatalf("unexpected request endpoint: %s", requestEndpoint) } } func TestGuardrailsPickDeterministicBlockingPolicy(t *testing.T) { ctx := newGuardrailTestContext(t) ctx.setAccessSyncFresh(t) ctx.createPolicy(t, "zzz", "warn", "block") expected := ctx.createPolicy(t, "aaa", "warn", "block") ctx.createPolicy(t, "high", "critical", "warn") decision, err := ctx.service.EvaluateQuery(ctx.connID, "alice", "SELECT * FROM db.tbl", "/api/query/run") if err != nil { t.Fatalf("evaluate query: %v", err) } if decision.Allowed || decision.Block == nil { t.Fatalf("expected blocked decision, got %+v", decision) } if decision.Block.PolicyID != expected { t.Fatalf("expected lexical tiebreak policy %s, got %s", expected, decision.Block.PolicyID) } } func TestGuardrailsUncertainAccessStateAllowsAndEmitsAlert(t *testing.T) { ctx := newGuardrailTestContext(t) ctx.createPolicy(t, "block-policy", "warn", "block") decision, err := ctx.service.EvaluateQuery(ctx.connID, "alice", "SELECT * FROM db.tbl", "/api/query/run") if err != nil { t.Fatalf("evaluate query: %v", err) } if !decision.Allowed { t.Fatalf("expected allowed decision when access sync state is uncertain") } var count int if err := ctx.db.Conn().QueryRow( `SELECT COUNT(*) FROM alert_events WHERE event_type = ? AND title = ?`, "policy.violation", "Guardrail evaluation uncertain", ).Scan(&count); err != nil { t.Fatalf("count uncertain guardrail alerts: %v", err) } if count != 1 { t.Fatalf("expected 1 uncertain guardrail alert event, got %d", count) } } func TestExtractPolicyTablesFromQuery(t *testing.T) { tests := []struct { name string query string want []string }{ {name: "select join", query: "SELECT * FROM db.tbl a JOIN db2.tbl2 b ON a.id=b.id", want: []string{"db.tbl", "db2.tbl2"}}, {name: "insert select", query: "INSERT INTO db.target SELECT * FROM db.source", want: []string{"db.source", "db.target"}}, {name: "show tables from", query: "SHOW TABLES FROM db", want: []string{"db.__all_tables__"}}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got := extractPolicyTablesFromQuery(tc.query) if len(got) != len(tc.want) { t.Fatalf("unexpected result size: got=%v want=%v", got, tc.want) } for i := range tc.want { if got[i] != tc.want[i] { t.Fatalf("unexpected table at %d: got=%s want=%s", i, got[i], tc.want[i]) } } }) } } ================================================ FILE: internal/governance/harvester_access.go ================================================ package governance import ( "context" "fmt" "log/slog" "time" "github.com/google/uuid" ) // Default number of inactive days to consider a permission as "over-granted". const overPermissionInactiveDays = 30 // syncAccess harvests user, role, grant, and role_grant data from ClickHouse // system tables, upserts them into SQLite, and rebuilds the access matrix. func (s *Syncer) syncAccess(ctx context.Context, creds CHCredentials) (*AccessSyncResult, error) { connID := creds.ConnectionID now := time.Now().UTC().Format(time.RFC3339) // Update sync state to running if err := s.store.UpsertSyncState(connID, string(SyncAccess), "running", nil, nil, 0); err != nil { slog.Error("Failed to update sync state", "error", err) } result := &AccessSyncResult{} var syncErr error defer func() { status := "idle" var errMsg *string if syncErr != nil { status = "error" e := syncErr.Error() errMsg = &e } totalRows := result.UsersSynced + result.RolesSynced + result.GrantsSynced if err := s.store.UpsertSyncState(connID, string(SyncAccess), status, &now, errMsg, totalRows); err != nil { slog.Error("Failed to update sync state after access sync", "error", err) } }() // ── Phase 1: Users ────────────────────────────────────────────────────── userRows, err := s.executeQuery(creds, `SELECT name, toString(auth_type) AS auth_type, toString(host_ip) AS host_ip, default_roles_all, toString(default_roles_list) AS default_roles_list FROM system.users ORDER BY name`) if err != nil { slog.Warn("Access sync: failed to query system.users with role fields, trying fallback", "connection", connID, "error", err) userRows, err = s.executeQuery(creds, `SELECT name, toString(auth_type) AS auth_type, toString(host_ip) AS host_ip, 0 AS default_roles_all, '' AS default_roles_list FROM system.users ORDER BY name`) if err != nil { slog.Warn("Access sync: fallback query for system.users failed", "connection", connID, "error", err) userRows = nil } } else { // no-op; rows handled below } usersFetched := err == nil if usersFetched { if err := s.store.DeleteChUsersForConnection(connID); err != nil { syncErr = err return nil, syncErr } for _, row := range userRows { name := fmt.Sprintf("%v", row["name"]) var defaultRoles *string if allRoles, ok := row["default_roles_all"]; ok && fmt.Sprintf("%v", allRoles) == "1" { dr := "ALL" defaultRoles = &dr } else if roleList, ok := row["default_roles_list"]; ok { rl := fmt.Sprintf("%v", roleList) if rl != "" && rl != "" && rl != "[]" { defaultRoles = &rl } } authType := toStringPtr(row["auth_type"]) hostIP := toStringPtr(row["host_ip"]) if err := s.store.UpsertChUser(ChUser{ ID: uuid.NewString(), ConnectionID: connID, Name: name, AuthType: authType, HostIP: hostIP, DefaultRoles: defaultRoles, FirstSeen: now, LastUpdated: now, }); err != nil { slog.Error("Failed to upsert CH user", "name", name, "error", err) continue } result.UsersSynced++ } } if !usersFetched { slog.Warn("Access sync: keeping previous cached users because source query failed", "connection", connID) } // ── Phase 2: Roles ────────────────────────────────────────────────────── roleRows, err := s.executeQuery(creds, `SELECT name FROM system.roles ORDER BY name`) if err != nil { slog.Warn("Access sync: failed to query system.roles", "connection", connID, "error", err) } else { if err := s.store.DeleteChRolesForConnection(connID); err != nil { syncErr = err return nil, syncErr } for _, row := range roleRows { name := fmt.Sprintf("%v", row["name"]) if err := s.store.UpsertChRole(ChRole{ ID: uuid.NewString(), ConnectionID: connID, Name: name, FirstSeen: now, LastUpdated: now, }); err != nil { slog.Error("Failed to upsert CH role", "name", name, "error", err) continue } result.RolesSynced++ } } // ── Phase 3: Role grants ──────────────────────────────────────────────── roleGrantRows, err := s.executeQuery(creds, `SELECT user_name, granted_role_name, granted_role_is_default, with_admin_option FROM system.role_grants ORDER BY user_name, granted_role_name`) if err != nil { slog.Warn("Access sync: failed to query system.role_grants", "connection", connID, "error", err) } else { if err := s.store.DeleteRoleGrantsForConnection(connID); err != nil { syncErr = err return nil, syncErr } for _, row := range roleGrantRows { userName := fmt.Sprintf("%v", row["user_name"]) roleName := fmt.Sprintf("%v", row["granted_role_name"]) isDefault := toBool(row["granted_role_is_default"]) withAdmin := toBool(row["with_admin_option"]) if err := s.store.UpsertRoleGrant(RoleGrant{ ID: uuid.NewString(), ConnectionID: connID, UserName: userName, GrantedRoleName: roleName, IsDefault: isDefault, WithAdminOption: withAdmin, FirstSeen: now, LastUpdated: now, }); err != nil { slog.Error("Failed to upsert role grant", "user", userName, "role", roleName, "error", err) continue } } } // ── Phase 4: Grants (privileges) ──────────────────────────────────────── grantRows, err := s.executeQuery(creds, `SELECT user_name, role_name, access_type, database AS grant_database, table AS grant_table, column AS grant_column, is_partial_revoke, grant_option FROM system.grants ORDER BY user_name, role_name, access_type`) if err != nil { slog.Warn("Access sync: failed to query system.grants", "connection", connID, "error", err) } else { if err := s.store.DeleteGrantsForConnection(connID); err != nil { syncErr = err return nil, syncErr } for _, row := range grantRows { grant := Grant{ ID: uuid.NewString(), ConnectionID: connID, UserName: toStringPtr(row["user_name"]), RoleName: toStringPtr(row["role_name"]), AccessType: fmt.Sprintf("%v", row["access_type"]), GrantDatabase: toStringPtr(row["grant_database"]), GrantTable: toStringPtr(row["grant_table"]), GrantColumn: toStringPtr(row["grant_column"]), IsPartialRevoke: toBool(row["is_partial_revoke"]), GrantOption: toBool(row["grant_option"]), FirstSeen: now, LastUpdated: now, } if err := s.store.UpsertGrant(grant); err != nil { slog.Error("Failed to upsert grant", "user", grant.UserName, "role", grant.RoleName, "access_type", grant.AccessType, "error", err, ) continue } result.GrantsSynced++ } } // ── Phase 5: Rebuild access matrix ────────────────────────────────────── matrixCount, err := s.store.RebuildAccessMatrix(connID) if err != nil { slog.Error("Failed to rebuild access matrix", "connection", connID, "error", err) } else { result.MatrixEntries = matrixCount } // ── Phase 6: Count over-permissions ───────────────────────────────────── overPerms, err := s.store.GetOverPermissionsWithDays(connID, overPermissionInactiveDays) if err != nil { slog.Warn("Failed to count over-permissions", "connection", connID, "error", err) } else { result.OverPermissions = len(overPerms) } slog.Info("Access sync completed", "connection", connID, "users", result.UsersSynced, "roles", result.RolesSynced, "grants", result.GrantsSynced, "matrix_entries", result.MatrixEntries, "over_permissions", result.OverPermissions, ) return result, nil } // toBool converts an interface{} to bool. Handles ClickHouse-style values: // 0/1 (as float64 or string), true/false, etc. func toBool(v interface{}) bool { if v == nil { return false } switch val := v.(type) { case bool: return val case float64: return val != 0 case int64: return val != 0 case int: return val != 0 case string: return val == "1" || val == "true" || val == "True" default: return fmt.Sprintf("%v", v) == "1" } } ================================================ FILE: internal/governance/harvester_metadata.go ================================================ package governance import ( "context" "encoding/json" "fmt" "log/slog" "strconv" "time" "github.com/google/uuid" ) // syncMetadata harvests database/table/column metadata from ClickHouse system tables, // diffs against existing SQLite state, and records schema changes. func (s *Syncer) syncMetadata(ctx context.Context, creds CHCredentials) (*MetadataSyncResult, error) { connID := creds.ConnectionID now := time.Now().UTC().Format(time.RFC3339) // Update sync state to running if err := s.store.UpsertSyncState(connID, string(SyncMetadata), "running", nil, nil, 0); err != nil { slog.Error("Failed to update sync state", "error", err) } result := &MetadataSyncResult{} var syncErr error defer func() { status := "idle" var errMsg *string if syncErr != nil { status = "error" e := syncErr.Error() errMsg = &e } rowCount := result.DatabasesSynced + result.TablesSynced + result.ColumnsSynced if err := s.store.UpsertSyncState(connID, string(SyncMetadata), status, &now, errMsg, rowCount); err != nil { slog.Error("Failed to update sync state after metadata sync", "error", err) } }() // ── Phase 1: Databases ────────────────────────────────────────────────── dbRows, err := s.executeQuery(creds, `SELECT name, engine FROM system.databases WHERE name NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema') ORDER BY name`) if err != nil { slog.Warn("Metadata sync: failed to query databases", "connection", connID, "error", err) syncErr = fmt.Errorf("databases query failed: %w", err) return result, syncErr } existingDBs, err := s.store.GetDatabases(connID) if err != nil { syncErr = fmt.Errorf("failed to load existing databases: %w", err) return result, syncErr } existingDBMap := make(map[string]*GovDatabase, len(existingDBs)) for i := range existingDBs { existingDBMap[existingDBs[i].Name] = &existingDBs[i] } seenDBs := make(map[string]bool) for _, row := range dbRows { name := fmt.Sprintf("%v", row["name"]) engine := fmt.Sprintf("%v", row["engine"]) seenDBs[name] = true _, found := existingDBMap[name] if err := s.store.UpsertDatabase(GovDatabase{ ID: uuid.NewString(), ConnectionID: connID, Name: name, Engine: engine, FirstSeen: now, LastUpdated: now, }); err != nil { slog.Error("Failed to upsert database", "name", name, "error", err) continue } if !found { s.store.CreateSchemaChange(connID, ChangeDatabaseAdded, name, "", "", "", name) result.SchemaChanges++ } result.DatabasesSynced++ } // Mark removed databases for name, existing := range existingDBMap { if !seenDBs[name] && !existing.IsDeleted { if err := s.store.MarkDatabaseDeleted(connID, name); err != nil { slog.Error("Failed to mark database deleted", "name", name, "error", err) } s.store.CreateSchemaChange(connID, ChangeDatabaseRemoved, name, "", "", name, "") result.SchemaChanges++ } } // ── Phase 2: Tables with stats ────────────────────────────────────────── tableRows, err := s.executeQuery(creds, `SELECT t.database AS database_name, t.name AS table_name, t.engine AS engine, t.uuid AS table_uuid, COALESCE(sum(p.rows), 0) AS total_rows, COALESCE(sum(p.bytes_on_disk), 0) AS total_bytes, COALESCE(count(DISTINCT p.partition), 0) AS partition_count FROM system.tables t LEFT JOIN system.parts p ON p.database = t.database AND p.table = t.name AND p.active = 1 WHERE t.database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema') GROUP BY t.database, t.name, t.engine, t.uuid ORDER BY t.database, t.name`) if err != nil { slog.Warn("Metadata sync: failed to query tables", "connection", connID, "error", err) // Continue — tables query failure is non-fatal } else { existingTables, err := s.store.GetTables(connID) if err != nil { slog.Error("Failed to load existing tables", "error", err) } existingTableMap := make(map[string]*GovTable) for i := range existingTables { key := existingTables[i].DatabaseName + "." + existingTables[i].TableName existingTableMap[key] = &existingTables[i] } seenTables := make(map[string]bool) for _, row := range tableRows { dbName := fmt.Sprintf("%v", row["database_name"]) tableName := fmt.Sprintf("%v", row["table_name"]) engine := fmt.Sprintf("%v", row["engine"]) tableUUID := fmt.Sprintf("%v", row["table_uuid"]) totalRows := toInt64(row["total_rows"]) totalBytes := toInt64(row["total_bytes"]) partCount := int(toInt64(row["partition_count"])) key := dbName + "." + tableName seenTables[key] = true _, found := existingTableMap[key] if err := s.store.UpsertTable(GovTable{ ID: uuid.NewString(), ConnectionID: connID, DatabaseName: dbName, TableName: tableName, Engine: engine, TableUUID: tableUUID, TotalRows: totalRows, TotalBytes: totalBytes, PartitionCount: partCount, FirstSeen: now, LastUpdated: now, }); err != nil { slog.Error("Failed to upsert table", "table", key, "error", err) continue } if !found { s.store.CreateSchemaChange(connID, ChangeTableAdded, dbName, tableName, "", "", tableName) result.SchemaChanges++ } result.TablesSynced++ } // Mark removed tables for key, existing := range existingTableMap { if !seenTables[key] && !existing.IsDeleted { if err := s.store.MarkTableDeleted(connID, existing.DatabaseName, existing.TableName); err != nil { slog.Error("Failed to mark table deleted", "table", key, "error", err) } s.store.CreateSchemaChange(connID, ChangeTableRemoved, existing.DatabaseName, existing.TableName, "", existing.TableName, "") result.SchemaChanges++ } } } // ── Phase 3: Columns ──────────────────────────────────────────────────── colRows, err := s.executeQuery(creds, `SELECT database AS database_name, table AS table_name, name AS column_name, type AS column_type, position AS column_position, default_kind, default_expression, comment FROM system.columns WHERE database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema') ORDER BY database, table, position`) if err != nil { slog.Warn("Metadata sync: failed to query columns", "connection", connID, "error", err) } else { existingColMap := make(map[string]*GovColumn) tables, tblErr := s.store.GetTables(connID) if tblErr == nil { for _, tbl := range tables { cols, colErr := s.store.GetColumns(connID, tbl.DatabaseName, tbl.TableName) if colErr != nil { continue } for i := range cols { key := cols[i].DatabaseName + "." + cols[i].TableName + "." + cols[i].ColumnName existingColMap[key] = &cols[i] } } } seenCols := make(map[string]bool) for _, row := range colRows { dbName := fmt.Sprintf("%v", row["database_name"]) tableName := fmt.Sprintf("%v", row["table_name"]) colName := fmt.Sprintf("%v", row["column_name"]) colType := fmt.Sprintf("%v", row["column_type"]) position := int(toInt64(row["column_position"])) key := dbName + "." + tableName + "." + colName seenCols[key] = true defaultKind := toStringPtr(row["default_kind"]) defaultExpr := toStringPtr(row["default_expression"]) comment := toStringPtr(row["comment"]) existing, found := existingColMap[key] if err := s.store.UpsertColumn(GovColumn{ ID: uuid.NewString(), ConnectionID: connID, DatabaseName: dbName, TableName: tableName, ColumnName: colName, ColumnType: colType, ColumnPosition: position, DefaultKind: defaultKind, DefaultExpression: defaultExpr, Comment: comment, FirstSeen: now, LastUpdated: now, }); err != nil { slog.Error("Failed to upsert column", "column", key, "error", err) continue } if !found { s.store.CreateSchemaChange(connID, ChangeColumnAdded, dbName, tableName, colName, "", colName) result.SchemaChanges++ } else if existing.ColumnType != colType { s.store.CreateSchemaChange(connID, ChangeColumnTypeChanged, dbName, tableName, colName, existing.ColumnType, colType) result.SchemaChanges++ } result.ColumnsSynced++ } // Mark removed columns for key, existing := range existingColMap { if !seenCols[key] && !existing.IsDeleted { if err := s.store.MarkColumnDeleted(connID, existing.DatabaseName, existing.TableName, existing.ColumnName); err != nil { slog.Error("Failed to mark column deleted", "column", key, "error", err) } s.store.CreateSchemaChange(connID, ChangeColumnRemoved, existing.DatabaseName, existing.TableName, existing.ColumnName, existing.ColumnName, "") result.SchemaChanges++ } } } slog.Info("Metadata sync completed", "connection", connID, "databases", result.DatabasesSynced, "tables", result.TablesSynced, "columns", result.ColumnsSynced, "changes", result.SchemaChanges, ) return result, nil } // toInt64 converts interface{} values (float64, string, json.Number) to int64. func toInt64(v interface{}) int64 { if v == nil { return 0 } switch val := v.(type) { case float64: return int64(val) case int64: return val case int: return int64(val) case json.Number: n, _ := val.Int64() return n case string: n, _ := strconv.ParseInt(val, 10, 64) return n default: s := fmt.Sprintf("%v", v) n, _ := strconv.ParseInt(s, 10, 64) return n } } // toStringPtr converts interface{} to *string. Returns nil for nil or empty strings. func toStringPtr(v interface{}) *string { if v == nil { return nil } s := fmt.Sprintf("%v", v) if s == "" || s == "" { return nil } return &s } ================================================ FILE: internal/governance/harvester_querylog.go ================================================ package governance import ( "context" "crypto/sha256" "encoding/json" "fmt" "log/slog" "regexp" "strings" "time" "github.com/caioricciuti/ch-ui/internal/alerts" "github.com/google/uuid" ) const queryLogBatchLimit = 5000 const defaultQueryLogWatermark = "2000-01-01 00:00:00" // syncQueryLog harvests recent queries from system.query_log, classifies them, // extracts lineage, and evaluates access policies. func (s *Syncer) syncQueryLog(ctx context.Context, creds CHCredentials) (*QueryLogSyncResult, error) { connID := creds.ConnectionID now := time.Now().UTC().Format(time.RFC3339) // Update sync state to running if err := s.store.UpsertSyncState(connID, string(SyncQueryLog), "running", nil, nil, 0); err != nil { slog.Error("Failed to update sync state", "error", err) } result := &QueryLogSyncResult{} var syncErr error defer func() { status := "idle" var errMsg *string if syncErr != nil { status = "error" e := syncErr.Error() errMsg = &e } watermark := result.NewWatermark if watermark == "" { watermark = now } if err := s.store.UpsertSyncState(connID, string(SyncQueryLog), status, &watermark, errMsg, result.QueriesIngested); err != nil { slog.Error("Failed to update sync state after query log sync", "error", err) } }() // Get current watermark from sync state watermark := defaultQueryLogWatermark state, err := s.store.GetSyncState(connID, string(SyncQueryLog)) if err == nil && state != nil && state.Watermark != nil && *state.Watermark != "" { watermark = sanitizeQueryLogWatermark(*state.Watermark) } // Query system.query_log for finished queries since watermark sql := fmt.Sprintf(`SELECT query_id, user AS ch_user, query, event_time, query_duration_ms, read_rows, read_bytes, result_rows, written_rows, written_bytes, memory_usage, tables, exception_code, exception FROM system.query_log WHERE type = 'QueryFinish' AND is_initial_query = 1 AND event_time > parseDateTimeBestEffort('%s') AND query_duration_ms >= 10 AND query NOT LIKE '%%system.query_log%%' AND query NOT LIKE '%%system.tables%%' AND query NOT LIKE '%%system.columns%%' AND query NOT LIKE '%%system.grants%%' ORDER BY event_time ASC LIMIT %d`, watermark, queryLogBatchLimit) rows, err := s.executeQuery(creds, sql) if err != nil { // Fallback for older CH setups where the "tables" column may be unavailable. fallbackSQL := fmt.Sprintf(`SELECT query_id, user AS ch_user, query, event_time, query_duration_ms, read_rows, read_bytes, result_rows, written_rows, written_bytes, memory_usage, CAST([], 'Array(String)') AS tables, exception_code, exception FROM system.query_log WHERE type = 'QueryFinish' AND is_initial_query = 1 AND event_time > parseDateTimeBestEffort('%s') AND query_duration_ms >= 10 AND query NOT LIKE '%%system.query_log%%' AND query NOT LIKE '%%system.tables%%' AND query NOT LIKE '%%system.columns%%' AND query NOT LIKE '%%system.grants%%' ORDER BY event_time ASC LIMIT %d`, watermark, queryLogBatchLimit) rows, err = s.executeQuery(creds, fallbackSQL) if err != nil { syncErr = fmt.Errorf("query_log query failed: %w", err) return result, syncErr } } if len(rows) == 0 { result.NewWatermark = watermark return result, nil } // Build QueryLogEntry batch var entries []QueryLogEntry var latestEventTime string for _, row := range rows { queryText := fmt.Sprintf("%v", row["query"]) normalized := normalizeQuery(queryText) hash := hashNormalized(normalized) kind := classifyQuery(queryText) eventTime := fmt.Sprintf("%v", row["event_time"]) isError := false var errorMsg *string if exCode := toInt64(row["exception_code"]); exCode != 0 { isError = true ex := fmt.Sprintf("%v", row["exception"]) errorMsg = &ex } entry := QueryLogEntry{ ID: uuid.NewString(), ConnectionID: connID, QueryID: fmt.Sprintf("%v", row["query_id"]), User: fmt.Sprintf("%v", row["ch_user"]), QueryText: queryText, NormalizedHash: hash, QueryKind: kind, EventTime: eventTime, DurationMs: toInt64(row["query_duration_ms"]), ReadRows: toInt64(row["read_rows"]), ReadBytes: toInt64(row["read_bytes"]), ResultRows: toInt64(row["result_rows"]), WrittenRows: toInt64(row["written_rows"]), WrittenBytes: toInt64(row["written_bytes"]), MemoryUsage: toInt64(row["memory_usage"]), TablesUsed: extractTablesJSON(row["tables"]), IsError: isError, ErrorMessage: errorMsg, CreatedAt: now, } entries = append(entries, entry) latestEventTime = eventTime } // Batch insert into SQLite inserted, err := s.store.InsertQueryLogBatch(entries) if err != nil { syncErr = fmt.Errorf("failed to batch insert query log: %w", err) return result, syncErr } result.QueriesIngested = inserted // Update watermark to the latest event time if latestEventTime != "" { result.NewWatermark = latestEventTime } else { result.NewWatermark = watermark } // Extract lineage (table + column level) from each entry lineageCount := 0 for _, entry := range entries { results := ExtractLineageWithColumns(connID, entry) for _, lr := range results { if err := s.store.InsertLineageEdge(lr.Edge); err != nil { slog.Error("Failed to insert lineage edge", "error", err) continue } lineageCount++ // Insert column-level mappings for _, cm := range lr.ColumnMappings { colEdge := ColumnLineageEdge{ ID: uuid.New().String(), LineageEdgeID: lr.Edge.ID, ConnectionID: connID, SourceColumn: cm.SourceColumn, TargetColumn: cm.TargetColumn, } if err := s.store.InsertColumnLineageEdge(colEdge); err != nil { slog.Error("Failed to insert column lineage edge", "error", err) } } } } result.LineageEdgesFound = lineageCount // Evaluate policies against each entry violationCount := 0 policies, err := s.store.GetPolicies(connID) if err != nil { slog.Warn("Failed to load policies for violation check", "error", err) } else if len(policies) > 0 { for _, entry := range entries { violations := EvaluatePolicies(connID, entry, policies, s.store) for _, v := range violations { violationID, err := s.store.CreateViolation(connID, v.PolicyID, v.QueryLogID, v.User, v.ViolationDetail, v.Severity, "post_exec", "") if err != nil { slog.Error("Failed to insert policy violation", "error", err) continue } policyName := strings.TrimSpace(v.PolicyName) if policyName == "" { policyName = v.PolicyID } alertSeverity := strings.ToLower(strings.TrimSpace(v.Severity)) if alertSeverity == "" { alertSeverity = alerts.SeverityWarn } fingerprint := fmt.Sprintf("policy:%s:user:%s:hash:%s", v.PolicyID, v.User, entry.NormalizedHash) if _, err := s.db.CreateAlertEvent( &connID, alerts.EventTypePolicyViolation, alertSeverity, fmt.Sprintf("Policy violation: %s", policyName), v.ViolationDetail, map[string]interface{}{ "violation_id": violationID, "policy_id": v.PolicyID, "policy_name": v.PolicyName, "query_id": entry.QueryID, "query_kind": entry.QueryKind, "ch_user": entry.User, "query_hash": entry.NormalizedHash, "event_time": entry.EventTime, "violation_severity": v.Severity, }, fingerprint, violationID, ); err != nil { slog.Warn("Failed to create alert event for policy violation", "error", err) } if _, created, err := s.store.UpsertIncidentFromViolation( connID, violationID, policyName, v.User, alertSeverity, v.ViolationDetail, ); err != nil { slog.Warn("Failed to upsert incident for policy violation", "violation", violationID, "error", err) } else if created { slog.Info("Governance incident created from violation", "violation", violationID) } violationCount++ } } } result.ViolationsFound = violationCount slog.Info("Query log sync completed", "connection", connID, "ingested", result.QueriesIngested, "lineage_edges", result.LineageEdgesFound, "violations", result.ViolationsFound, "new_watermark", result.NewWatermark, ) return result, nil } // ── Query helper functions ────────────────────────────────────────────────── // classifyQuery returns a classification string for the query type. func classifyQuery(query string) string { trimmed := strings.TrimSpace(query) for strings.HasPrefix(trimmed, "--") { if idx := strings.Index(trimmed, "\n"); idx >= 0 { trimmed = strings.TrimSpace(trimmed[idx+1:]) } else { break } } upper := strings.ToUpper(trimmed) switch { case strings.HasPrefix(upper, "SELECT") || strings.HasPrefix(upper, "WITH"): return "Select" case strings.HasPrefix(upper, "INSERT"): return "Insert" case strings.HasPrefix(upper, "CREATE"): return "Create" case strings.HasPrefix(upper, "ALTER"): return "Alter" case strings.HasPrefix(upper, "DROP"): return "Drop" default: return "Other" } } var ( stringLiteralRe = regexp.MustCompile(`'[^']*'`) numberLiteralRe = regexp.MustCompile(`\b\d+\.?\d*\b`) multiSpaceRe = regexp.MustCompile(`\s+`) ) func normalizeQuery(query string) string { normalized := stringLiteralRe.ReplaceAllString(query, "'?'") normalized = numberLiteralRe.ReplaceAllString(normalized, "?") normalized = multiSpaceRe.ReplaceAllString(normalized, " ") normalized = strings.TrimSpace(normalized) return strings.ToUpper(normalized) } func hashNormalized(normalized string) string { h := sha256.Sum256([]byte(normalized)) return fmt.Sprintf("%x", h)[:32] } func extractTablesJSON(v interface{}) string { if v == nil { return "[]" } switch val := v.(type) { case string: if strings.HasPrefix(val, "[") { return val } if val == "" { return "[]" } b, _ := json.Marshal([]string{val}) return string(b) case []interface{}: strs := make([]string, 0, len(val)) for _, item := range val { strs = append(strs, fmt.Sprintf("%v", item)) } b, _ := json.Marshal(strs) return string(b) case []string: b, _ := json.Marshal(val) return string(b) default: b, err := json.Marshal(v) if err != nil { return "[]" } return string(b) } } func sanitizeQueryLogWatermark(v string) string { s := strings.TrimSpace(v) if s == "" { return defaultQueryLogWatermark } layouts := []string{ time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05.999999999", "2006-01-02 15:04:05.999999", "2006-01-02 15:04:05", } for _, layout := range layouts { if t, err := time.Parse(layout, s); err == nil { return t.UTC().Format("2006-01-02 15:04:05") } } // Last-resort hardening against accidental malformed/corrupt values. s = strings.ReplaceAll(s, "'", "") if s == "" { return defaultQueryLogWatermark } return s } ================================================ FILE: internal/governance/incidents.go ================================================ package governance import ( "database/sql" "fmt" "strings" "time" "github.com/google/uuid" ) // ── Object comments ────────────────────────────────────────────────────────── func (s *Store) CreateObjectComment(connectionID, objectType, dbName, tableName, columnName, commentText, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) if objectType == "table" { columnName = "" } _, err := s.conn().Exec( `INSERT INTO gov_object_comments (id, connection_id, object_type, database_name, table_name, column_name, comment_text, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, connectionID, strings.ToLower(strings.TrimSpace(objectType)), strings.TrimSpace(dbName), strings.TrimSpace(tableName), strings.TrimSpace(columnName), strings.TrimSpace(commentText), nullableValue(createdBy), now, now, ) if err != nil { return "", fmt.Errorf("create object comment: %w", err) } return id, nil } func (s *Store) ListObjectComments(connectionID, objectType, dbName, tableName, columnName string, limit int) ([]ObjectComment, error) { if limit <= 0 { limit = 100 } if limit > 1000 { limit = 1000 } where := []string{"connection_id = ?"} args := []interface{}{connectionID} if ot := strings.TrimSpace(strings.ToLower(objectType)); ot != "" { where = append(where, "object_type = ?") args = append(args, ot) } if db := strings.TrimSpace(dbName); db != "" { where = append(where, "database_name = ?") args = append(args, db) } if tbl := strings.TrimSpace(tableName); tbl != "" { where = append(where, "table_name = ?") args = append(args, tbl) } if col := strings.TrimSpace(columnName); col != "" { where = append(where, "column_name = ?") args = append(args, col) } args = append(args, limit) query := fmt.Sprintf( `SELECT id, connection_id, object_type, database_name, table_name, column_name, comment_text, created_by, created_at, updated_at FROM gov_object_comments WHERE %s ORDER BY created_at DESC LIMIT ?`, strings.Join(where, " AND "), ) rows, err := s.conn().Query(query, args...) if err != nil { return nil, fmt.Errorf("list object comments: %w", err) } defer rows.Close() out := make([]ObjectComment, 0) for rows.Next() { var c ObjectComment var createdBy sql.NullString if err := rows.Scan( &c.ID, &c.ConnectionID, &c.ObjectType, &c.DatabaseName, &c.TableName, &c.ColumnName, &c.CommentText, &createdBy, &c.CreatedAt, &c.UpdatedAt, ); err != nil { return nil, fmt.Errorf("scan object comment: %w", err) } c.CreatedBy = nullStringToPtr(createdBy) out = append(out, c) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate object comments: %w", err) } return out, nil } func (s *Store) DeleteObjectComment(connectionID, id string) error { res, err := s.conn().Exec( `DELETE FROM gov_object_comments WHERE id = ? AND connection_id = ?`, id, connectionID, ) if err != nil { return fmt.Errorf("delete object comment: %w", err) } affected, _ := res.RowsAffected() if affected == 0 { return sql.ErrNoRows } return nil } // ── Incidents ──────────────────────────────────────────────────────────────── func (s *Store) ListIncidents(connectionID, status, severity string, limit int) ([]Incident, error) { if limit <= 0 { limit = 100 } if limit > 1000 { limit = 1000 } where := []string{"connection_id = ?"} args := []interface{}{connectionID} if v := strings.TrimSpace(strings.ToLower(status)); v != "" { where = append(where, "status = ?") args = append(args, v) } if v := strings.TrimSpace(strings.ToLower(severity)); v != "" { where = append(where, "severity = ?") args = append(args, v) } args = append(args, limit) query := fmt.Sprintf( `SELECT id, connection_id, source_type, source_ref, dedupe_key, title, severity, status, assignee, details, resolution_note, occurrence_count, first_seen_at, last_seen_at, resolved_at, created_by, created_at, updated_at FROM gov_incidents WHERE %s ORDER BY last_seen_at DESC LIMIT ?`, strings.Join(where, " AND "), ) rows, err := s.conn().Query(query, args...) if err != nil { return nil, fmt.Errorf("list incidents: %w", err) } defer rows.Close() out := make([]Incident, 0) for rows.Next() { item, err := scanIncident(rows) if err != nil { return nil, err } out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate incidents: %w", err) } return out, nil } func (s *Store) GetIncidentByID(id string) (*Incident, error) { row := s.conn().QueryRow( `SELECT id, connection_id, source_type, source_ref, dedupe_key, title, severity, status, assignee, details, resolution_note, occurrence_count, first_seen_at, last_seen_at, resolved_at, created_by, created_at, updated_at FROM gov_incidents WHERE id = ?`, id, ) item, err := scanIncident(row) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, err } return &item, nil } func (s *Store) CreateIncident(connectionID, sourceType, sourceRef, dedupeKey, title, severity, status, assignee, details, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `INSERT INTO gov_incidents (id, connection_id, source_type, source_ref, dedupe_key, title, severity, status, assignee, details, occurrence_count, first_seen_at, last_seen_at, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1, ?, ?, ?, ?, ?)`, id, connectionID, nullableValue(strings.ToLower(strings.TrimSpace(sourceType))), nullableValue(strings.TrimSpace(sourceRef)), nullableValue(strings.TrimSpace(dedupeKey)), strings.TrimSpace(title), strings.ToLower(strings.TrimSpace(severity)), strings.ToLower(strings.TrimSpace(status)), nullableValue(strings.TrimSpace(assignee)), nullableValue(strings.TrimSpace(details)), now, now, nullableValue(strings.TrimSpace(createdBy)), now, now, ) if err != nil { return "", fmt.Errorf("create incident: %w", err) } return id, nil } func (s *Store) UpdateIncident(id, title, severity, status, assignee, details, resolutionNote string) error { now := time.Now().UTC().Format(time.RFC3339) var resolvedAt interface{} if status == "resolved" || status == "dismissed" { resolvedAt = now } if _, err := s.conn().Exec( `UPDATE gov_incidents SET title = ?, severity = ?, status = ?, assignee = ?, details = ?, resolution_note = ?, resolved_at = ?, updated_at = ? WHERE id = ?`, strings.TrimSpace(title), strings.ToLower(strings.TrimSpace(severity)), strings.ToLower(strings.TrimSpace(status)), nullableValue(strings.TrimSpace(assignee)), nullableValue(strings.TrimSpace(details)), nullableValue(strings.TrimSpace(resolutionNote)), resolvedAt, now, id, ); err != nil { return fmt.Errorf("update incident: %w", err) } return nil } func (s *Store) UpsertIncidentFromViolation(connectionID, sourceRef, policyName, user, severity, detail string) (string, bool, error) { now := time.Now().UTC().Format(time.RFC3339) dedupeKey := strings.ToLower(strings.TrimSpace(fmt.Sprintf("violation:%s:%s:%s", policyName, user, severity))) row := s.conn().QueryRow( `SELECT id FROM gov_incidents WHERE connection_id = ? AND dedupe_key = ? AND status IN ('open', 'triaged', 'in_progress') ORDER BY last_seen_at DESC LIMIT 1`, connectionID, dedupeKey, ) var incidentID string switch err := row.Scan(&incidentID); err { case nil: if _, err := s.conn().Exec( `UPDATE gov_incidents SET occurrence_count = occurrence_count + 1, last_seen_at = ?, details = COALESCE(?, details), updated_at = ? WHERE id = ?`, now, nullableValue(strings.TrimSpace(detail)), now, incidentID, ); err != nil { return "", false, fmt.Errorf("update existing incident from violation: %w", err) } return incidentID, false, nil case sql.ErrNoRows: title := fmt.Sprintf("Policy violation: %s (%s)", strings.TrimSpace(policyName), strings.TrimSpace(user)) if strings.TrimSpace(policyName) == "" { title = fmt.Sprintf("Policy violation (%s)", strings.TrimSpace(user)) } id, err := s.CreateIncident( connectionID, "violation", sourceRef, dedupeKey, title, strings.ToLower(strings.TrimSpace(severity)), "open", "", detail, "system", ) if err != nil { return "", false, err } return id, true, nil default: return "", false, fmt.Errorf("find existing incident from violation: %w", err) } } func (s *Store) GetViolationByID(id string) (*PolicyViolation, error) { row := s.conn().QueryRow( `SELECT v.id, v.connection_id, v.policy_id, v.query_log_id, v.ch_user, v.violation_detail, v.severity, v.detection_phase, v.request_endpoint, v.detected_at, v.created_at, COALESCE(p.name, '') FROM gov_policy_violations v LEFT JOIN gov_policies p ON p.id = v.policy_id WHERE v.id = ?`, id, ) var v PolicyViolation var queryLogID, requestEndpoint sql.NullString err := row.Scan(&v.ID, &v.ConnectionID, &v.PolicyID, &queryLogID, &v.User, &v.ViolationDetail, &v.Severity, &v.DetectionPhase, &requestEndpoint, &v.DetectedAt, &v.CreatedAt, &v.PolicyName) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get violation by id: %w", err) } v.QueryLogID = queryLogID.String v.RequestEndpoint = nullStringToPtr(requestEndpoint) v.DetectionPhase = normalizeDetectionPhase(v.DetectionPhase) return &v, nil } func (s *Store) ListIncidentComments(incidentID string, limit int) ([]IncidentComment, error) { if limit <= 0 { limit = 200 } if limit > 2000 { limit = 2000 } rows, err := s.conn().Query( `SELECT id, incident_id, comment_text, created_by, created_at FROM gov_incident_comments WHERE incident_id = ? ORDER BY created_at ASC LIMIT ?`, incidentID, limit, ) if err != nil { return nil, fmt.Errorf("list incident comments: %w", err) } defer rows.Close() out := make([]IncidentComment, 0) for rows.Next() { var item IncidentComment var createdBy sql.NullString if err := rows.Scan(&item.ID, &item.IncidentID, &item.CommentText, &createdBy, &item.CreatedAt); err != nil { return nil, fmt.Errorf("scan incident comment: %w", err) } item.CreatedBy = nullStringToPtr(createdBy) out = append(out, item) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate incident comments: %w", err) } return out, nil } func (s *Store) CreateIncidentComment(incidentID, commentText, createdBy string) (string, error) { id := uuid.NewString() now := time.Now().UTC().Format(time.RFC3339) if _, err := s.conn().Exec( `INSERT INTO gov_incident_comments (id, incident_id, comment_text, created_by, created_at) VALUES (?, ?, ?, ?, ?)`, id, incidentID, strings.TrimSpace(commentText), nullableValue(strings.TrimSpace(createdBy)), now, ); err != nil { return "", fmt.Errorf("create incident comment: %w", err) } if _, err := s.conn().Exec( `UPDATE gov_incidents SET updated_at = ? WHERE id = ?`, now, incidentID, ); err != nil { return "", fmt.Errorf("touch incident after comment: %w", err) } return id, nil } func scanIncident(scanner interface { Scan(dest ...interface{}) error }) (Incident, error) { var item Incident var sourceRef, dedupeKey, assignee, details, resolutionNote, resolvedAt, createdBy sql.NullString err := scanner.Scan( &item.ID, &item.ConnectionID, &item.SourceType, &sourceRef, &dedupeKey, &item.Title, &item.Severity, &item.Status, &assignee, &details, &resolutionNote, &item.OccurrenceCount, &item.FirstSeenAt, &item.LastSeenAt, &resolvedAt, &createdBy, &item.CreatedAt, &item.UpdatedAt, ) if err != nil { return item, err } item.SourceRef = nullStringToPtr(sourceRef) item.DedupeKey = nullStringToPtr(dedupeKey) item.Assignee = nullStringToPtr(assignee) item.Details = nullStringToPtr(details) item.ResolutionNote = nullStringToPtr(resolutionNote) item.ResolvedAt = nullStringToPtr(resolvedAt) item.CreatedBy = nullStringToPtr(createdBy) return item, nil } func nullableValue(v string) interface{} { trimmed := strings.TrimSpace(v) if trimmed == "" { return nil } return trimmed } ================================================ FILE: internal/governance/lineage.go ================================================ package governance import ( "regexp" "strings" "time" "github.com/google/uuid" ) // ── Regex patterns for table references ───────────────────────────────────── // tableRef matches a possibly qualified table name: [db.]table // Handles both backtick-quoted and unquoted identifiers. const tableRefPattern = `((?:` + "`" + `[^` + "`" + `]+` + "`" + `|[a-zA-Z_][a-zA-Z0-9_]*)(?:\.(?:` + "`" + `[^` + "`" + `]+` + "`" + `|[a-zA-Z_][a-zA-Z0-9_]*))?)` var ( fromRe = regexp.MustCompile(`(?i)\bFROM\s+` + tableRefPattern) joinRe = regexp.MustCompile(`(?i)\bJOIN\s+` + tableRefPattern) insertRe = regexp.MustCompile(`(?i)\bINSERT\s+INTO\s+` + tableRefPattern) createRe = regexp.MustCompile(`(?i)\bCREATE\s+(?:TABLE|MATERIALIZED\s+VIEW)\s+(?:IF\s+NOT\s+EXISTS\s+)?` + tableRefPattern) ) // tableRefParsed holds a parsed database.table reference. type tableRefParsed struct { Database string Table string } // ── Public API ────────────────────────────────────────────────────────────── // ExtractLineage analyses a query log entry and returns any lineage edges // that can be inferred from the SQL text. Only INSERT INTO ... SELECT and // CREATE TABLE/MATERIALIZED VIEW ... AS SELECT produce edges; plain SELECTs // are reads and do not generate edges. func ExtractLineage(connectionID string, entry QueryLogEntry) []LineageEdge { query := normaliseWhitespace(entry.QueryText) // Determine target table (INSERT INTO / CREATE TABLE|MV). target := extractTarget(query) if target == nil { // Plain SELECT or DDL without a target — no lineage edges. return nil } // Determine edge type. edgeType := classifyEdgeType(query) // Collect source tables (FROM / JOIN), excluding the target itself and // system tables. sources := extractSourceTables(query) if len(sources) == 0 { return nil } now := time.Now().UTC().Format(time.RFC3339) var edges []LineageEdge for _, src := range sources { // Skip self-references and system tables. if src.Database == target.Database && src.Table == target.Table { continue } if isSystemTable(src.Database, src.Table) { continue } edges = append(edges, LineageEdge{ ID: uuid.New().String(), ConnectionID: connectionID, SourceDatabase: src.Database, SourceTable: src.Table, TargetDatabase: target.Database, TargetTable: target.Table, QueryID: entry.QueryID, User: entry.User, EdgeType: string(edgeType), DetectedAt: now, }) } return edges } // ── Column-level lineage ─────────────────────────────────────────────────── // ColumnMapping represents a source→target column mapping within a lineage edge. type ColumnMapping struct { SourceColumn string TargetColumn string } // LineageResult bundles a table-level edge with its column mappings. type LineageResult struct { Edge LineageEdge ColumnMappings []ColumnMapping } var ( // Matches INSERT INTO table (col1, col2, ...) — captures the parenthesized column list. insertColsRe = regexp.MustCompile(`(?i)\bINSERT\s+INTO\s+` + tableRefPattern + `\s*\(([^)]+)\)`) // Matches SELECT ... FROM — captures everything between SELECT and FROM. selectClauseRe = regexp.MustCompile(`(?i)\bSELECT\s+(.*?)\s+FROM\b`) ) // ExtractColumnLineage attempts to extract column-level mappings from an // INSERT INTO (cols) SELECT cols FROM ... pattern. Returns nil when the // pattern doesn't match or column counts differ (graceful degradation). func ExtractColumnLineage(query string) []ColumnMapping { normalized := normaliseWhitespace(query) // Extract target columns from INSERT INTO table (col1, col2, ...) insertMatch := insertColsRe.FindStringSubmatch(normalized) if insertMatch == nil { return nil } // The column list is in the last capture group. targetColsRaw := insertMatch[len(insertMatch)-1] targetCols := splitAndTrimColumns(targetColsRaw) if len(targetCols) == 0 { return nil } // Extract source columns from SELECT clause. selectMatch := selectClauseRe.FindStringSubmatch(normalized) if selectMatch == nil || len(selectMatch) < 2 { return nil } sourceExprs := splitSelectExpressions(selectMatch[1]) if len(sourceExprs) == 0 { return nil } // Only zip when counts match — avoids incorrect mappings. if len(targetCols) != len(sourceExprs) { return nil } mappings := make([]ColumnMapping, 0, len(targetCols)) for i, target := range targetCols { source := extractColumnName(sourceExprs[i]) if source == "" || source == "*" { return nil // SELECT * or unparseable expression } mappings = append(mappings, ColumnMapping{ SourceColumn: source, TargetColumn: target, }) } return mappings } // ExtractLineageWithColumns is like ExtractLineage but also returns column mappings. func ExtractLineageWithColumns(connectionID string, entry QueryLogEntry) []LineageResult { edges := ExtractLineage(connectionID, entry) if len(edges) == 0 { return nil } columnMappings := ExtractColumnLineage(entry.QueryText) results := make([]LineageResult, 0, len(edges)) for _, edge := range edges { results = append(results, LineageResult{ Edge: edge, ColumnMappings: columnMappings, // Same mappings apply to all edges from this query }) } return results } // splitAndTrimColumns splits a comma-separated column list and trims whitespace/backticks. func splitAndTrimColumns(s string) []string { parts := strings.Split(s, ",") var result []string for _, p := range parts { col := strings.TrimSpace(p) col = stripBackticks(col) if col != "" { result = append(result, col) } } return result } // splitSelectExpressions splits SELECT expressions by commas, respecting // parenthesized sub-expressions (e.g., function calls). func splitSelectExpressions(s string) []string { var result []string depth := 0 start := 0 for i := 0; i < len(s); i++ { switch s[i] { case '(': depth++ case ')': depth-- case ',': if depth == 0 { expr := strings.TrimSpace(s[start:i]) if expr != "" { result = append(result, expr) } start = i + 1 } } } // Last expression expr := strings.TrimSpace(s[start:]) if expr != "" { result = append(result, expr) } return result } // extractColumnName extracts the effective column name from a SELECT expression. // Handles: "col", "t.col", "expr AS alias", "expr alias". func extractColumnName(expr string) string { expr = strings.TrimSpace(expr) if expr == "" { return "" } // Check for AS alias (case-insensitive). asRe := regexp.MustCompile(`(?i)\bAS\s+` + "(`[^`]+`|[a-zA-Z_][a-zA-Z0-9_]*)" + `\s*$`) if m := asRe.FindStringSubmatch(expr); len(m) > 1 { return stripBackticks(m[1]) } // If no parens and no operators, take the last dotted part. if !strings.ContainsAny(expr, "()+*/-") { parts := strings.Fields(expr) last := parts[len(parts)-1] dotParts := strings.Split(last, ".") return stripBackticks(dotParts[len(dotParts)-1]) } return "" } // ── Internal helpers ──────────────────────────────────────────────────────── // extractTarget returns the target table for INSERT INTO or CREATE TABLE/MV // statements. Returns nil when none is found. func extractTarget(query string) *tableRefParsed { // Try INSERT INTO first. if m := insertRe.FindStringSubmatch(query); len(m) > 1 { db, tbl := parseTableRef(m[1:]) return &tableRefParsed{Database: db, Table: tbl} } // Try CREATE TABLE / MATERIALIZED VIEW. if m := createRe.FindStringSubmatch(query); len(m) > 1 { db, tbl := parseTableRef(m[1:]) return &tableRefParsed{Database: db, Table: tbl} } return nil } // classifyEdgeType returns the edge type string based on the SQL verb. func classifyEdgeType(query string) EdgeType { upper := strings.ToUpper(query) if strings.Contains(upper, "INSERT") { return EdgeInsertSelect } if strings.Contains(upper, "CREATE") { return EdgeCreateAsSelect } return EdgeSelectFrom } // extractSourceTables finds all FROM and JOIN table references in the query. func extractSourceTables(query string) []tableRefParsed { seen := map[string]bool{} var results []tableRefParsed addMatches := func(re *regexp.Regexp) { for _, m := range re.FindAllStringSubmatch(query, -1) { if len(m) < 2 { continue } db, tbl := parseTableRef(m[1:]) if isSystemTable(db, tbl) { continue } key := db + "." + tbl if seen[key] { continue } seen[key] = true results = append(results, tableRefParsed{Database: db, Table: tbl}) } } addMatches(fromRe) addMatches(joinRe) return results } // parseTableRef takes the captured groups from a table-reference regex match // and splits them into (database, table). If no database qualifier is // present, database is returned as an empty string. func parseTableRef(groups []string) (database, table string) { if len(groups) == 0 { return "", "" } raw := groups[0] raw = stripBackticks(raw) parts := strings.SplitN(raw, ".", 2) if len(parts) == 2 { return stripBackticks(parts[0]), stripBackticks(parts[1]) } return "", stripBackticks(parts[0]) } // stripBackticks removes surrounding backticks from an identifier. func stripBackticks(s string) string { if len(s) >= 2 && s[0] == '`' && s[len(s)-1] == '`' { return s[1 : len(s)-1] } return s } // isSystemTable returns true for ClickHouse system and information_schema // databases that should be excluded from lineage graphs. func isSystemTable(db, table string) bool { lower := strings.ToLower(db) switch lower { case "system", "information_schema", "information_schema_upper", "INFORMATION_SCHEMA": return true } // Also filter tables that look like system tables when no db is specified. if db == "" { lowerT := strings.ToLower(table) if strings.HasPrefix(lowerT, "system.") || strings.HasPrefix(lowerT, "information_schema.") { return true } } return false } // normaliseWhitespace collapses runs of whitespace into single spaces and // trims the result. This simplifies regex matching. func normaliseWhitespace(s string) string { return strings.Join(strings.Fields(s), " ") } ================================================ FILE: internal/governance/policy_engine.go ================================================ package governance import ( "encoding/json" "fmt" "strings" "time" "github.com/google/uuid" ) // ── Store interface for policy evaluation ─────────────────────────────────── // Store is expected to be defined elsewhere in the package (e.g., store.go). // For now we declare a minimal interface so the policy engine compiles // independently. Replace with the concrete type once the store is wired up. // PolicyStore is the interface the policy engine needs from the governance // data store. type PolicyStore interface { GetAccessMatrixForUser(connectionID, userName string) ([]AccessMatrixEntry, error) } // ── Public API ────────────────────────────────────────────────────────────── // EvaluatePolicies checks a query log entry against all provided policies and // returns any violations. It uses the store to look up the user's roles in // the access matrix. func EvaluatePolicies(connectionID string, entry QueryLogEntry, policies []Policy, store PolicyStore) []PolicyViolation { // Parse the tables_used JSON field. tablesUsed := parseTablesUsed(entry.TablesUsed) // Retrieve the user's roles / privileges from the access matrix. matrixEntries, err := store.GetAccessMatrixForUser(connectionID, entry.User) if err != nil { // If we can't resolve roles we can't evaluate — return empty. return nil } userRoles := collectUserRoles(matrixEntries) now := time.Now().UTC().Format(time.RFC3339) var violations []PolicyViolation for _, policy := range policies { if !policy.Enabled { continue } if normalizePolicyEnforcementMode(policy.EnforcementMode) == "block" { continue } // Check whether the query touches the object protected by this policy. if !queryTouchesObject(tablesUsed, entry.QueryText, policy) { continue } // Check whether the user holds the required role. if hasRole(userRoles, policy.RequiredRole) { continue } // No required role found → create a violation. detail := fmt.Sprintf( "User %q executed a query touching %s without required role %q", entry.User, describePolicyObject(policy), policy.RequiredRole, ) violations = append(violations, PolicyViolation{ ID: uuid.New().String(), ConnectionID: connectionID, PolicyID: policy.ID, QueryLogID: entry.ID, User: entry.User, ViolationDetail: detail, Severity: policy.Severity, DetectedAt: now, CreatedAt: now, PolicyName: policy.Name, }) } return violations } // ── Internal helpers ──────────────────────────────────────────────────────── // parseTablesUsed deserialises the JSON array stored in QueryLogEntry.TablesUsed. // It returns an empty slice on error or empty input. func parseTablesUsed(raw string) []string { if raw == "" || raw == "[]" { return nil } var tables []string if err := json.Unmarshal([]byte(raw), &tables); err != nil { return nil } return tables } // collectUserRoles extracts the set of distinct role names from access matrix // entries. Both direct grants (role_name is set) and privilege names are // collected so we can match on either. func collectUserRoles(entries []AccessMatrixEntry) map[string]bool { roles := make(map[string]bool, len(entries)) for _, e := range entries { if e.RoleName != nil && *e.RoleName != "" { roles[strings.ToLower(*e.RoleName)] = true } } return roles } // hasRole checks whether the user's role set contains the required role // (case-insensitive comparison). func hasRole(userRoles map[string]bool, requiredRole string) bool { return userRoles[strings.ToLower(requiredRole)] } // queryTouchesObject determines whether a query (identified by its list of // tables used and the raw SQL text) accesses the object described by a policy. func queryTouchesObject(tablesUsed []string, queryText string, policy Policy) bool { switch strings.ToLower(policy.ObjectType) { case "database": return touchesDatabase(tablesUsed, deref(policy.ObjectDatabase)) case "table": return touchesTable(tablesUsed, deref(policy.ObjectDatabase), deref(policy.ObjectTable)) case "column": if !touchesTable(tablesUsed, deref(policy.ObjectDatabase), deref(policy.ObjectTable)) { return false } // For column-level policies, check if the column name appears in the // query text. This is a heuristic — a full parser would be needed // for perfect accuracy. col := deref(policy.ObjectColumn) if col == "" { return false } return columnMentioned(queryText, col) default: return false } } // touchesDatabase returns true if any entry in tablesUsed belongs to the // given database. tablesUsed entries are expected in "db.table" format. func touchesDatabase(tablesUsed []string, database string) bool { if database == "" { return false } lowerDB := strings.ToLower(database) for _, t := range tablesUsed { parts := strings.SplitN(t, ".", 2) if len(parts) == 2 && strings.ToLower(parts[0]) == lowerDB { return true } } return false } // touchesTable returns true if the specific db.table combination appears in // tablesUsed. func touchesTable(tablesUsed []string, database, table string) bool { if table == "" { return false } lowerDB := strings.ToLower(database) lowerTbl := strings.ToLower(table) for _, t := range tablesUsed { parts := strings.SplitN(t, ".", 2) switch { case len(parts) == 2: if strings.ToLower(parts[0]) == lowerDB && strings.ToLower(parts[1]) == lowerTbl { return true } case len(parts) == 1: // No database qualifier in tablesUsed — match on table name alone // only when the policy also has no database. if lowerDB == "" && strings.ToLower(parts[0]) == lowerTbl { return true } } } return false } // columnMentioned does a case-insensitive check for the column identifier in // the query text. It looks for the column name as a whole word (surrounded // by non-identifier characters or string boundaries). func columnMentioned(queryText, column string) bool { lower := strings.ToLower(queryText) col := strings.ToLower(column) idx := 0 for { pos := strings.Index(lower[idx:], col) if pos < 0 { return false } pos += idx // Check word boundaries. startOK := pos == 0 || !isIdentChar(lower[pos-1]) endPos := pos + len(col) endOK := endPos >= len(lower) || !isIdentChar(lower[endPos]) if startOK && endOK { return true } idx = pos + 1 } } // isIdentChar returns true for characters that can appear in a SQL identifier. func isIdentChar(c byte) bool { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' } // describePolicyObject returns a human-readable description of the object a // policy protects, for use in violation messages. func describePolicyObject(p Policy) string { switch strings.ToLower(p.ObjectType) { case "database": return fmt.Sprintf("database %q", deref(p.ObjectDatabase)) case "table": db := deref(p.ObjectDatabase) tbl := deref(p.ObjectTable) if db != "" { return fmt.Sprintf("table %q.%q", db, tbl) } return fmt.Sprintf("table %q", tbl) case "column": db := deref(p.ObjectDatabase) tbl := deref(p.ObjectTable) col := deref(p.ObjectColumn) if db != "" { return fmt.Sprintf("column %q.%q.%q", db, tbl, col) } return fmt.Sprintf("column %q.%q", tbl, col) default: return p.ObjectType } } // deref safely dereferences a string pointer, returning the empty string for nil. func deref(s *string) string { if s == nil { return "" } return *s } ================================================ FILE: internal/governance/store.go ================================================ package governance import ( "database/sql" "fmt" "strings" "time" "github.com/caioricciuti/ch-ui/internal/database" "github.com/google/uuid" ) // nullStringToPtr converts a sql.NullString to a *string (nil if not valid). func nullStringToPtr(ns sql.NullString) *string { if ns.Valid { return &ns.String } return nil } // nullIntToPtr converts a sql.NullInt64 to an *int (nil if not valid). func nullIntToPtr(ni sql.NullInt64) *int { if ni.Valid { v := int(ni.Int64) return &v } return nil } // ptrToNullString converts a *string to a sql.NullString. func ptrToNullString(s *string) sql.NullString { if s == nil { return sql.NullString{} } return sql.NullString{String: *s, Valid: true} } // Store provides all governance CRUD operations against SQLite. type Store struct { db *database.DB } // NewStore creates a new governance Store. func NewStore(db *database.DB) *Store { return &Store{db: db} } // Ensure Store satisfies the PolicyStore interface used by the policy engine. var _ PolicyStore = (*Store)(nil) // conn returns the underlying *sql.DB for running queries. func (s *Store) conn() *sql.DB { return s.db.Conn() } // ── Sync State ─────────────────────────────────────────────────────────────── // GetSyncStates returns all sync states for a connection. func (s *Store) GetSyncStates(connectionID string) ([]SyncState, error) { rows, err := s.conn().Query( `SELECT id, connection_id, sync_type, last_synced_at, watermark, status, last_error, row_count, created_at, updated_at FROM gov_sync_state WHERE connection_id = ? ORDER BY sync_type`, connectionID, ) if err != nil { return nil, fmt.Errorf("get sync states: %w", err) } defer rows.Close() var results []SyncState for rows.Next() { var ss SyncState var lastSynced, watermark, lastError sql.NullString if err := rows.Scan(&ss.ID, &ss.ConnectionID, &ss.SyncType, &lastSynced, &watermark, &ss.Status, &lastError, &ss.RowCount, &ss.CreatedAt, &ss.UpdatedAt); err != nil { return nil, fmt.Errorf("scan sync state: %w", err) } ss.LastSyncedAt = nullStringToPtr(lastSynced) ss.Watermark = nullStringToPtr(watermark) ss.LastError = nullStringToPtr(lastError) results = append(results, ss) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate sync state rows: %w", err) } return results, nil } // GetSyncState returns a specific sync state for a connection and sync type. func (s *Store) GetSyncState(connectionID string, syncType string) (*SyncState, error) { row := s.conn().QueryRow( `SELECT id, connection_id, sync_type, last_synced_at, watermark, status, last_error, row_count, created_at, updated_at FROM gov_sync_state WHERE connection_id = ? AND sync_type = ?`, connectionID, syncType, ) var ss SyncState var lastSynced, watermark, lastError sql.NullString err := row.Scan(&ss.ID, &ss.ConnectionID, &ss.SyncType, &lastSynced, &watermark, &ss.Status, &lastError, &ss.RowCount, &ss.CreatedAt, &ss.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get sync state: %w", err) } ss.LastSyncedAt = nullStringToPtr(lastSynced) ss.Watermark = nullStringToPtr(watermark) ss.LastError = nullStringToPtr(lastError) return &ss, nil } // UpsertSyncState inserts or updates a sync state record. func (s *Store) UpsertSyncState(connectionID string, syncType string, status string, watermark *string, lastError *string, rowCount int) error { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() _, err := s.conn().Exec( `INSERT INTO gov_sync_state (id, connection_id, sync_type, last_synced_at, watermark, status, last_error, row_count, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(connection_id, sync_type) DO UPDATE SET last_synced_at = excluded.last_synced_at, watermark = COALESCE(excluded.watermark, gov_sync_state.watermark), status = excluded.status, last_error = excluded.last_error, row_count = excluded.row_count, updated_at = excluded.updated_at`, id, connectionID, syncType, now, ptrToNullString(watermark), status, ptrToNullString(lastError), rowCount, now, now, ) if err != nil { return fmt.Errorf("upsert sync state: %w", err) } return nil } // UpdateSyncWatermark updates only the watermark for a specific sync state. func (s *Store) UpdateSyncWatermark(connectionID string, syncType string, watermark string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `UPDATE gov_sync_state SET watermark = ?, updated_at = ? WHERE connection_id = ? AND sync_type = ?`, watermark, now, connectionID, syncType, ) if err != nil { return fmt.Errorf("update sync watermark: %w", err) } return nil } // ── Databases ──────────────────────────────────────────────────────────────── // GetDatabases returns all databases for a connection. func (s *Store) GetDatabases(connectionID string) ([]GovDatabase, error) { rows, err := s.conn().Query( `SELECT id, connection_id, name, engine, first_seen, last_updated, is_deleted FROM gov_databases WHERE connection_id = ? ORDER BY name`, connectionID, ) if err != nil { return nil, fmt.Errorf("get databases: %w", err) } defer rows.Close() var results []GovDatabase for rows.Next() { var d GovDatabase if err := rows.Scan(&d.ID, &d.ConnectionID, &d.Name, &d.Engine, &d.FirstSeen, &d.LastUpdated, &d.IsDeleted); err != nil { return nil, fmt.Errorf("scan database: %w", err) } results = append(results, d) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate database rows: %w", err) } return results, nil } // UpsertDatabase inserts or updates a database record from a GovDatabase struct. func (s *Store) UpsertDatabase(d GovDatabase) error { isDeleted := 0 if d.IsDeleted { isDeleted = 1 } _, err := s.conn().Exec( `INSERT INTO gov_databases (id, connection_id, name, engine, first_seen, last_updated, is_deleted) VALUES (?, ?, ?, ?, ?, ?, ?) ON CONFLICT(connection_id, name) DO UPDATE SET engine = excluded.engine, last_updated = excluded.last_updated, is_deleted = excluded.is_deleted`, d.ID, d.ConnectionID, d.Name, d.Engine, d.FirstSeen, d.LastUpdated, isDeleted, ) if err != nil { return fmt.Errorf("upsert database: %w", err) } return nil } // MarkDatabaseDeleted soft-deletes a database record. func (s *Store) MarkDatabaseDeleted(connectionID, name string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `UPDATE gov_databases SET is_deleted = 1, last_updated = ? WHERE connection_id = ? AND name = ?`, now, connectionID, name, ) if err != nil { return fmt.Errorf("mark database deleted: %w", err) } return nil } // ── Tables ─────────────────────────────────────────────────────────────────── // GetTables returns all non-deleted tables for a connection. func (s *Store) GetTables(connectionID string) ([]GovTable, error) { rows, err := s.conn().Query( `SELECT id, connection_id, database_name, table_name, engine, table_uuid, total_rows, total_bytes, partition_count, first_seen, last_updated, is_deleted FROM gov_tables WHERE connection_id = ? AND is_deleted = 0 ORDER BY database_name, table_name`, connectionID, ) if err != nil { return nil, fmt.Errorf("get tables: %w", err) } defer rows.Close() return scanTables(rows) } // GetTablesByDatabase returns all non-deleted tables for a specific database. func (s *Store) GetTablesByDatabase(connectionID, databaseName string) ([]GovTable, error) { rows, err := s.conn().Query( `SELECT id, connection_id, database_name, table_name, engine, table_uuid, total_rows, total_bytes, partition_count, first_seen, last_updated, is_deleted FROM gov_tables WHERE connection_id = ? AND database_name = ? AND is_deleted = 0 ORDER BY table_name`, connectionID, databaseName, ) if err != nil { return nil, fmt.Errorf("get tables by database: %w", err) } defer rows.Close() return scanTables(rows) } // GetTableByName returns a single table by connection, database, and table name. func (s *Store) GetTableByName(connectionID, dbName, tableName string) (*GovTable, error) { row := s.conn().QueryRow( `SELECT id, connection_id, database_name, table_name, engine, table_uuid, total_rows, total_bytes, partition_count, first_seen, last_updated, is_deleted FROM gov_tables WHERE connection_id = ? AND database_name = ? AND table_name = ?`, connectionID, dbName, tableName, ) var t GovTable err := row.Scan(&t.ID, &t.ConnectionID, &t.DatabaseName, &t.TableName, &t.Engine, &t.TableUUID, &t.TotalRows, &t.TotalBytes, &t.PartitionCount, &t.FirstSeen, &t.LastUpdated, &t.IsDeleted) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get table by name: %w", err) } return &t, nil } // UpsertTable inserts or updates a table record from a GovTable struct. func (s *Store) UpsertTable(t GovTable) error { isDeleted := 0 if t.IsDeleted { isDeleted = 1 } _, err := s.conn().Exec( `INSERT INTO gov_tables (id, connection_id, database_name, table_name, engine, table_uuid, total_rows, total_bytes, partition_count, first_seen, last_updated, is_deleted) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(connection_id, database_name, table_name) DO UPDATE SET engine = excluded.engine, table_uuid = excluded.table_uuid, total_rows = excluded.total_rows, total_bytes = excluded.total_bytes, partition_count = excluded.partition_count, last_updated = excluded.last_updated, is_deleted = excluded.is_deleted`, t.ID, t.ConnectionID, t.DatabaseName, t.TableName, t.Engine, t.TableUUID, t.TotalRows, t.TotalBytes, t.PartitionCount, t.FirstSeen, t.LastUpdated, isDeleted, ) if err != nil { return fmt.Errorf("upsert table: %w", err) } return nil } // MarkTableDeleted soft-deletes a table record. func (s *Store) MarkTableDeleted(connectionID, dbName, tableName string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `UPDATE gov_tables SET is_deleted = 1, last_updated = ? WHERE connection_id = ? AND database_name = ? AND table_name = ?`, now, connectionID, dbName, tableName, ) if err != nil { return fmt.Errorf("mark table deleted: %w", err) } return nil } func scanTables(rows *sql.Rows) ([]GovTable, error) { var results []GovTable for rows.Next() { var t GovTable if err := rows.Scan(&t.ID, &t.ConnectionID, &t.DatabaseName, &t.TableName, &t.Engine, &t.TableUUID, &t.TotalRows, &t.TotalBytes, &t.PartitionCount, &t.FirstSeen, &t.LastUpdated, &t.IsDeleted); err != nil { return nil, fmt.Errorf("scan table: %w", err) } results = append(results, t) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate table rows: %w", err) } return results, nil } // ── Columns ────────────────────────────────────────────────────────────────── // GetColumns returns columns for a connection, optionally filtered by database and table. // If dbName and tableName are empty strings, all non-deleted columns for the connection are returned. func (s *Store) GetColumns(connectionID, dbName, tableName string) ([]GovColumn, error) { var query string var args []interface{} if dbName == "" && tableName == "" { query = `SELECT id, connection_id, database_name, table_name, column_name, column_type, column_position, default_kind, default_expression, comment, first_seen, last_updated, is_deleted FROM gov_columns WHERE connection_id = ? AND is_deleted = 0 ORDER BY database_name, table_name, column_position` args = []interface{}{connectionID} } else { query = `SELECT id, connection_id, database_name, table_name, column_name, column_type, column_position, default_kind, default_expression, comment, first_seen, last_updated, is_deleted FROM gov_columns WHERE connection_id = ? AND database_name = ? AND table_name = ? AND is_deleted = 0 ORDER BY column_position` args = []interface{}{connectionID, dbName, tableName} } rows, err := s.conn().Query(query, args...) if err != nil { return nil, fmt.Errorf("get columns: %w", err) } defer rows.Close() var results []GovColumn for rows.Next() { var c GovColumn var defaultKind, defaultExpr, comment sql.NullString if err := rows.Scan(&c.ID, &c.ConnectionID, &c.DatabaseName, &c.TableName, &c.ColumnName, &c.ColumnType, &c.ColumnPosition, &defaultKind, &defaultExpr, &comment, &c.FirstSeen, &c.LastUpdated, &c.IsDeleted); err != nil { return nil, fmt.Errorf("scan column: %w", err) } c.DefaultKind = nullStringToPtr(defaultKind) c.DefaultExpression = nullStringToPtr(defaultExpr) c.Comment = nullStringToPtr(comment) results = append(results, c) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate column rows: %w", err) } return results, nil } // UpsertColumn inserts or updates a column record from a GovColumn struct. func (s *Store) UpsertColumn(c GovColumn) error { isDeleted := 0 if c.IsDeleted { isDeleted = 1 } _, err := s.conn().Exec( `INSERT INTO gov_columns (id, connection_id, database_name, table_name, column_name, column_type, column_position, default_kind, default_expression, comment, first_seen, last_updated, is_deleted) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(connection_id, database_name, table_name, column_name) DO UPDATE SET column_type = excluded.column_type, column_position = excluded.column_position, default_kind = excluded.default_kind, default_expression = excluded.default_expression, comment = excluded.comment, last_updated = excluded.last_updated, is_deleted = excluded.is_deleted`, c.ID, c.ConnectionID, c.DatabaseName, c.TableName, c.ColumnName, c.ColumnType, c.ColumnPosition, ptrToNullString(c.DefaultKind), ptrToNullString(c.DefaultExpression), ptrToNullString(c.Comment), c.FirstSeen, c.LastUpdated, isDeleted, ) if err != nil { return fmt.Errorf("upsert column: %w", err) } return nil } // MarkColumnDeleted soft-deletes a column record. func (s *Store) MarkColumnDeleted(connectionID, dbName, tableName, colName string) error { now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `UPDATE gov_columns SET is_deleted = 1, last_updated = ? WHERE connection_id = ? AND database_name = ? AND table_name = ? AND column_name = ?`, now, connectionID, dbName, tableName, colName, ) if err != nil { return fmt.Errorf("mark column deleted: %w", err) } return nil } // ── Schema Changes ─────────────────────────────────────────────────────────── // InsertSchemaChange inserts a schema change record from a SchemaChange struct. func (s *Store) InsertSchemaChange(sc SchemaChange) error { _, err := s.conn().Exec( `INSERT INTO gov_schema_changes (id, connection_id, change_type, database_name, table_name, column_name, old_value, new_value, detected_at, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, sc.ID, sc.ConnectionID, sc.ChangeType, sc.DatabaseName, sc.TableName, sc.ColumnName, sc.OldValue, sc.NewValue, sc.DetectedAt, sc.CreatedAt, ) if err != nil { return fmt.Errorf("insert schema change: %w", err) } return nil } // CreateSchemaChange creates a new schema change record with auto-generated ID and timestamps. func (s *Store) CreateSchemaChange(connectionID string, changeType SchemaChangeType, dbName, tableName, colName, oldVal, newVal string) error { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() _, err := s.conn().Exec( `INSERT INTO gov_schema_changes (id, connection_id, change_type, database_name, table_name, column_name, old_value, new_value, detected_at, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, connectionID, string(changeType), dbName, tableName, colName, oldVal, newVal, now, now, ) if err != nil { return fmt.Errorf("create schema change: %w", err) } return nil } // GetSchemaChanges returns recent schema changes for a connection. func (s *Store) GetSchemaChanges(connectionID string, limit int) ([]SchemaChange, error) { rows, err := s.conn().Query( `SELECT id, connection_id, change_type, database_name, table_name, column_name, old_value, new_value, detected_at, created_at FROM gov_schema_changes WHERE connection_id = ? ORDER BY detected_at DESC LIMIT ?`, connectionID, limit, ) if err != nil { return nil, fmt.Errorf("get schema changes: %w", err) } defer rows.Close() var results []SchemaChange for rows.Next() { var sc SchemaChange if err := rows.Scan(&sc.ID, &sc.ConnectionID, &sc.ChangeType, &sc.DatabaseName, &sc.TableName, &sc.ColumnName, &sc.OldValue, &sc.NewValue, &sc.DetectedAt, &sc.CreatedAt); err != nil { return nil, fmt.Errorf("scan schema change: %w", err) } results = append(results, sc) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate schema change rows: %w", err) } return results, nil } // ── Query Log ──────────────────────────────────────────────────────────────── // BatchInsertQueryLog inserts a batch of query log entries using INSERT OR IGNORE // (idempotent by connection_id + query_id). func (s *Store) BatchInsertQueryLog(connectionID string, entries []QueryLogEntry) error { if len(entries) == 0 { return nil } tx, err := s.conn().Begin() if err != nil { return fmt.Errorf("begin query log batch: %w", err) } defer tx.Rollback() stmt, err := tx.Prepare( `INSERT OR IGNORE INTO gov_query_log (id, connection_id, query_id, ch_user, query_text, normalized_hash, query_kind, event_time, duration_ms, read_rows, read_bytes, result_rows, written_rows, written_bytes, memory_usage, tables_used, is_error, error_message, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, ) if err != nil { return fmt.Errorf("prepare query log insert: %w", err) } defer stmt.Close() for _, e := range entries { isError := 0 if e.IsError { isError = 1 } _, err := stmt.Exec( e.ID, e.ConnectionID, e.QueryID, e.User, e.QueryText, e.NormalizedHash, e.QueryKind, e.EventTime, e.DurationMs, e.ReadRows, e.ReadBytes, e.ResultRows, e.WrittenRows, e.WrittenBytes, e.MemoryUsage, e.TablesUsed, isError, ptrToNullString(e.ErrorMessage), e.CreatedAt, ) if err != nil { return fmt.Errorf("insert query log entry: %w", err) } } if err := tx.Commit(); err != nil { return fmt.Errorf("commit query log batch: %w", err) } return nil } // InsertQueryLogBatch is an alias for BatchInsertQueryLog that also returns inserted count. func (s *Store) InsertQueryLogBatch(entries []QueryLogEntry) (int, error) { if len(entries) == 0 { return 0, nil } tx, err := s.conn().Begin() if err != nil { return 0, fmt.Errorf("begin query log batch: %w", err) } defer tx.Rollback() stmt, err := tx.Prepare( `INSERT OR IGNORE INTO gov_query_log (id, connection_id, query_id, ch_user, query_text, normalized_hash, query_kind, event_time, duration_ms, read_rows, read_bytes, result_rows, written_rows, written_bytes, memory_usage, tables_used, is_error, error_message, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, ) if err != nil { return 0, fmt.Errorf("prepare query log insert: %w", err) } defer stmt.Close() now := time.Now().UTC().Format(time.RFC3339) inserted := 0 for _, e := range entries { id := e.ID if id == "" { id = uuid.NewString() } isError := 0 if e.IsError { isError = 1 } createdAt := e.CreatedAt if createdAt == "" { createdAt = now } result, err := stmt.Exec( id, e.ConnectionID, e.QueryID, e.User, e.QueryText, e.NormalizedHash, e.QueryKind, e.EventTime, e.DurationMs, e.ReadRows, e.ReadBytes, e.ResultRows, e.WrittenRows, e.WrittenBytes, e.MemoryUsage, e.TablesUsed, isError, ptrToNullString(e.ErrorMessage), createdAt, ) if err != nil { return 0, fmt.Errorf("insert query log entry: %w", err) } affected, _ := result.RowsAffected() inserted += int(affected) } if err := tx.Commit(); err != nil { return 0, fmt.Errorf("commit query log batch: %w", err) } return inserted, nil } // GetQueryLog returns paginated query log entries with optional user/table filters. // Returns the entries, total count, and any error. func (s *Store) GetQueryLog(connectionID string, limit, offset int, user, table string) ([]QueryLogEntry, int, error) { where := "connection_id = ?" args := []interface{}{connectionID} if user != "" { where += " AND ch_user = ?" args = append(args, user) } if table != "" { where += " AND tables_used LIKE ?" args = append(args, "%"+table+"%") } // Get total count var total int countArgs := make([]interface{}, len(args)) copy(countArgs, args) err := s.conn().QueryRow("SELECT COUNT(*) FROM gov_query_log WHERE "+where, countArgs...).Scan(&total) if err != nil { return nil, 0, fmt.Errorf("count query log: %w", err) } // Get page query := fmt.Sprintf( `SELECT id, connection_id, query_id, ch_user, query_text, normalized_hash, query_kind, event_time, duration_ms, read_rows, read_bytes, result_rows, written_rows, written_bytes, memory_usage, tables_used, is_error, error_message, created_at FROM gov_query_log WHERE %s ORDER BY event_time DESC LIMIT ? OFFSET ?`, where, ) args = append(args, limit, offset) rows, err := s.conn().Query(query, args...) if err != nil { return nil, 0, fmt.Errorf("get query log: %w", err) } defer rows.Close() var results []QueryLogEntry for rows.Next() { var e QueryLogEntry var errorMsg sql.NullString if err := rows.Scan(&e.ID, &e.ConnectionID, &e.QueryID, &e.User, &e.QueryText, &e.NormalizedHash, &e.QueryKind, &e.EventTime, &e.DurationMs, &e.ReadRows, &e.ReadBytes, &e.ResultRows, &e.WrittenRows, &e.WrittenBytes, &e.MemoryUsage, &e.TablesUsed, &e.IsError, &errorMsg, &e.CreatedAt); err != nil { return nil, 0, fmt.Errorf("scan query log entry: %w", err) } e.ErrorMessage = nullStringToPtr(errorMsg) results = append(results, e) } if err := rows.Err(); err != nil { return nil, 0, fmt.Errorf("iterate query log rows: %w", err) } return results, total, nil } // GetTopQueries returns the top queries grouped by normalized_hash. func (s *Store) GetTopQueries(connectionID string, limit int) ([]map[string]interface{}, error) { rows, err := s.conn().Query( `SELECT normalized_hash, COUNT(*) AS cnt, ROUND(AVG(duration_ms), 2) AS avg_duration_ms, COALESCE(SUM(read_rows), 0) AS total_read_rows, MIN(query_text) AS sample_query, MAX(event_time) AS last_seen FROM gov_query_log WHERE connection_id = ? AND normalized_hash != '' GROUP BY normalized_hash ORDER BY cnt DESC LIMIT ?`, connectionID, limit, ) if err != nil { return nil, fmt.Errorf("get top queries: %w", err) } defer rows.Close() var results []map[string]interface{} for rows.Next() { var hash, sampleQuery, lastSeen string var cnt int var avgDurationMs float64 var totalReadRows int64 if err := rows.Scan(&hash, &cnt, &avgDurationMs, &totalReadRows, &sampleQuery, &lastSeen); err != nil { return nil, fmt.Errorf("scan top query: %w", err) } results = append(results, map[string]interface{}{ "normalized_hash": hash, "count": cnt, "avg_duration_ms": avgDurationMs, "total_read_rows": totalReadRows, "sample_query": sampleQuery, "last_seen": lastSeen, }) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate top query rows: %w", err) } return results, nil } // ── Lineage ────────────────────────────────────────────────────────────────── // InsertLineageEdge inserts a lineage edge using INSERT OR IGNORE. func (s *Store) InsertLineageEdge(edge LineageEdge) error { now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `INSERT OR IGNORE INTO gov_lineage_edges (id, connection_id, source_database, source_table, target_database, target_table, query_id, ch_user, edge_type, detected_at, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, edge.ID, edge.ConnectionID, edge.SourceDatabase, edge.SourceTable, edge.TargetDatabase, edge.TargetTable, edge.QueryID, edge.User, edge.EdgeType, edge.DetectedAt, now, ) if err != nil { return fmt.Errorf("insert lineage edge: %w", err) } return nil } // UpsertLineageEdge is an alias for InsertLineageEdge (INSERT OR IGNORE is idempotent). func (s *Store) UpsertLineageEdge(edge LineageEdge) error { return s.InsertLineageEdge(edge) } // GetLineageForTable returns upstream and downstream edges for a specific table. func (s *Store) GetLineageForTable(connectionID, dbName, tableName string) ([]LineageEdge, []LineageEdge, error) { upstreamRows, err := s.conn().Query( `SELECT id, connection_id, source_database, source_table, target_database, target_table, query_id, ch_user, edge_type, detected_at FROM gov_lineage_edges WHERE connection_id = ? AND target_database = ? AND target_table = ?`, connectionID, dbName, tableName, ) if err != nil { return nil, nil, fmt.Errorf("get upstream lineage: %w", err) } defer upstreamRows.Close() upstream, err := scanLineageEdges(upstreamRows) if err != nil { return nil, nil, err } downstreamRows, err := s.conn().Query( `SELECT id, connection_id, source_database, source_table, target_database, target_table, query_id, ch_user, edge_type, detected_at FROM gov_lineage_edges WHERE connection_id = ? AND source_database = ? AND source_table = ?`, connectionID, dbName, tableName, ) if err != nil { return nil, nil, fmt.Errorf("get downstream lineage: %w", err) } defer downstreamRows.Close() downstream, err := scanLineageEdges(downstreamRows) if err != nil { return nil, nil, err } return upstream, downstream, nil } // GetFullLineageGraph returns all lineage edges for a connection. func (s *Store) GetFullLineageGraph(connectionID string) ([]LineageEdge, error) { rows, err := s.conn().Query( `SELECT id, connection_id, source_database, source_table, target_database, target_table, query_id, ch_user, edge_type, detected_at FROM gov_lineage_edges WHERE connection_id = ? ORDER BY detected_at DESC`, connectionID, ) if err != nil { return nil, fmt.Errorf("get full lineage graph: %w", err) } defer rows.Close() return scanLineageEdges(rows) } func scanLineageEdges(rows *sql.Rows) ([]LineageEdge, error) { var results []LineageEdge for rows.Next() { var e LineageEdge if err := rows.Scan(&e.ID, &e.ConnectionID, &e.SourceDatabase, &e.SourceTable, &e.TargetDatabase, &e.TargetTable, &e.QueryID, &e.User, &e.EdgeType, &e.DetectedAt); err != nil { return nil, fmt.Errorf("scan lineage edge: %w", err) } results = append(results, e) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate lineage edge rows: %w", err) } return results, nil } // ── Column Lineage ───────────────────────────────────────────────────────── // InsertColumnLineageEdge inserts a column-level lineage edge using INSERT OR IGNORE. func (s *Store) InsertColumnLineageEdge(edge ColumnLineageEdge) error { now := time.Now().UTC().Format(time.RFC3339) _, err := s.conn().Exec( `INSERT OR IGNORE INTO gov_lineage_column_edges (id, lineage_edge_id, connection_id, source_column, target_column, created_at) VALUES (?, ?, ?, ?, ?, ?)`, edge.ID, edge.LineageEdgeID, edge.ConnectionID, edge.SourceColumn, edge.TargetColumn, now, ) if err != nil { return fmt.Errorf("insert column lineage edge: %w", err) } return nil } // GetColumnEdgesForEdgeIDs batch-loads column lineage edges for a set of lineage edge IDs. func (s *Store) GetColumnEdgesForEdgeIDs(edgeIDs []string) (map[string][]ColumnLineageEdge, error) { result := make(map[string][]ColumnLineageEdge) if len(edgeIDs) == 0 { return result, nil } placeholders := make([]string, len(edgeIDs)) args := make([]interface{}, len(edgeIDs)) for i, id := range edgeIDs { placeholders[i] = "?" args[i] = id } query := `SELECT id, lineage_edge_id, connection_id, source_column, target_column FROM gov_lineage_column_edges WHERE lineage_edge_id IN (` + strings.Join(placeholders, ",") + `)` rows, err := s.conn().Query(query, args...) if err != nil { return nil, fmt.Errorf("get column lineage edges: %w", err) } defer rows.Close() for rows.Next() { var e ColumnLineageEdge if err := rows.Scan(&e.ID, &e.LineageEdgeID, &e.ConnectionID, &e.SourceColumn, &e.TargetColumn); err != nil { return nil, fmt.Errorf("scan column lineage edge: %w", err) } result[e.LineageEdgeID] = append(result[e.LineageEdgeID], e) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate column lineage edge rows: %w", err) } return result, nil } // GetQueryByQueryID returns a single query log entry by its ClickHouse query_id. func (s *Store) GetQueryByQueryID(connectionID, queryID string) (*QueryLogEntry, error) { row := s.conn().QueryRow( `SELECT id, connection_id, query_id, ch_user, query_text, normalized_hash, query_kind, event_time, duration_ms, read_rows, read_bytes, result_rows, written_rows, written_bytes, memory_usage, tables_used, is_error, error_message, created_at FROM gov_query_log WHERE connection_id = ? AND query_id = ? LIMIT 1`, connectionID, queryID, ) var e QueryLogEntry if err := row.Scan(&e.ID, &e.ConnectionID, &e.QueryID, &e.User, &e.QueryText, &e.NormalizedHash, &e.QueryKind, &e.EventTime, &e.DurationMs, &e.ReadRows, &e.ReadBytes, &e.ResultRows, &e.WrittenRows, &e.WrittenBytes, &e.MemoryUsage, &e.TablesUsed, &e.IsError, &e.ErrorMessage, &e.CreatedAt); err != nil { return nil, fmt.Errorf("get query by query_id: %w", err) } return &e, nil } // ── Tags ───────────────────────────────────────────────────────────────────── // GetTags returns all tags for a connection. func (s *Store) GetTags(connectionID string) ([]TagEntry, error) { rows, err := s.conn().Query( `SELECT id, connection_id, object_type, database_name, table_name, column_name, tag, tagged_by, created_at FROM gov_tags WHERE connection_id = ? ORDER BY created_at DESC`, connectionID, ) if err != nil { return nil, fmt.Errorf("get tags: %w", err) } defer rows.Close() return scanTags(rows) } // GetTagsForTable returns all tags for a specific table. func (s *Store) GetTagsForTable(connectionID, dbName, tableName string) ([]TagEntry, error) { rows, err := s.conn().Query( `SELECT id, connection_id, object_type, database_name, table_name, column_name, tag, tagged_by, created_at FROM gov_tags WHERE connection_id = ? AND database_name = ? AND table_name = ? ORDER BY created_at DESC`, connectionID, dbName, tableName, ) if err != nil { return nil, fmt.Errorf("get tags for table: %w", err) } defer rows.Close() return scanTags(rows) } // GetTagsForColumn returns all tags for a specific column. func (s *Store) GetTagsForColumn(connectionID, dbName, tableName, colName string) ([]TagEntry, error) { rows, err := s.conn().Query( `SELECT id, connection_id, object_type, database_name, table_name, column_name, tag, tagged_by, created_at FROM gov_tags WHERE connection_id = ? AND database_name = ? AND table_name = ? AND column_name = ? ORDER BY created_at DESC`, connectionID, dbName, tableName, colName, ) if err != nil { return nil, fmt.Errorf("get tags for column: %w", err) } defer rows.Close() return scanTags(rows) } // CreateTag creates a new tag entry and returns its ID. func (s *Store) CreateTag(connectionID, objectType, dbName, tableName, colName string, tag SensitivityTag, taggedBy string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() _, err := s.conn().Exec( `INSERT INTO gov_tags (id, connection_id, object_type, database_name, table_name, column_name, tag, tagged_by, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, connectionID, objectType, dbName, tableName, colName, string(tag), taggedBy, now, ) if err != nil { return "", fmt.Errorf("create tag: %w", err) } return id, nil } // DeleteTag deletes a tag by ID. func (s *Store) DeleteTag(id string) error { _, err := s.conn().Exec("DELETE FROM gov_tags WHERE id = ?", id) if err != nil { return fmt.Errorf("delete tag: %w", err) } return nil } // GetTaggedTableCount returns the count of distinct tables that have at least one tag. func (s *Store) GetTaggedTableCount(connectionID string) (int, error) { var count int err := s.conn().QueryRow( `SELECT COUNT(DISTINCT database_name || '.' || table_name) FROM gov_tags WHERE connection_id = ?`, connectionID, ).Scan(&count) if err != nil { return 0, fmt.Errorf("get tagged table count: %w", err) } return count, nil } func scanTags(rows *sql.Rows) ([]TagEntry, error) { var results []TagEntry for rows.Next() { var t TagEntry if err := rows.Scan(&t.ID, &t.ConnectionID, &t.ObjectType, &t.DatabaseName, &t.TableName, &t.ColumnName, &t.Tag, &t.TaggedBy, &t.CreatedAt); err != nil { return nil, fmt.Errorf("scan tag: %w", err) } results = append(results, t) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate tag rows: %w", err) } return results, nil } // DeleteChUsersForConnection removes all cached ClickHouse users for a connection. func (s *Store) DeleteChUsersForConnection(connectionID string) error { if _, err := s.conn().Exec(`DELETE FROM gov_ch_users WHERE connection_id = ?`, connectionID); err != nil { return fmt.Errorf("delete ch users for connection: %w", err) } return nil } // DeleteChRolesForConnection removes all cached ClickHouse roles for a connection. func (s *Store) DeleteChRolesForConnection(connectionID string) error { if _, err := s.conn().Exec(`DELETE FROM gov_ch_roles WHERE connection_id = ?`, connectionID); err != nil { return fmt.Errorf("delete ch roles for connection: %w", err) } return nil } // DeleteRoleGrantsForConnection removes all cached role grants for a connection. func (s *Store) DeleteRoleGrantsForConnection(connectionID string) error { if _, err := s.conn().Exec(`DELETE FROM gov_role_grants WHERE connection_id = ?`, connectionID); err != nil { return fmt.Errorf("delete role grants for connection: %w", err) } return nil } // DeleteGrantsForConnection removes all cached grants for a connection. func (s *Store) DeleteGrantsForConnection(connectionID string) error { if _, err := s.conn().Exec(`DELETE FROM gov_grants WHERE connection_id = ?`, connectionID); err != nil { return fmt.Errorf("delete grants for connection: %w", err) } return nil } // ── CH Users ───────────────────────────────────────────────────────────────── // UpsertChUser inserts or replaces a ClickHouse user record from a ChUser struct. func (s *Store) UpsertChUser(u ChUser) error { _, err := s.conn().Exec( `INSERT INTO gov_ch_users (id, connection_id, name, auth_type, host_ip, default_roles, first_seen, last_updated) VALUES (?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(connection_id, name) DO UPDATE SET auth_type = excluded.auth_type, host_ip = excluded.host_ip, default_roles = excluded.default_roles, last_updated = excluded.last_updated`, u.ID, u.ConnectionID, u.Name, ptrToNullString(u.AuthType), ptrToNullString(u.HostIP), ptrToNullString(u.DefaultRoles), u.FirstSeen, u.LastUpdated, ) if err != nil { return fmt.Errorf("upsert ch user: %w", err) } return nil } // GetChUsers returns all ClickHouse users for a connection. func (s *Store) GetChUsers(connectionID string) ([]ChUser, error) { rows, err := s.conn().Query( `SELECT id, connection_id, name, auth_type, host_ip, default_roles, first_seen, last_updated FROM gov_ch_users WHERE connection_id = ? ORDER BY name`, connectionID, ) if err != nil { return nil, fmt.Errorf("get ch users: %w", err) } defer rows.Close() var results []ChUser for rows.Next() { var u ChUser var authType, hostIP, defaultRoles sql.NullString if err := rows.Scan(&u.ID, &u.ConnectionID, &u.Name, &authType, &hostIP, &defaultRoles, &u.FirstSeen, &u.LastUpdated); err != nil { return nil, fmt.Errorf("scan ch user: %w", err) } u.AuthType = nullStringToPtr(authType) u.HostIP = nullStringToPtr(hostIP) u.DefaultRoles = nullStringToPtr(defaultRoles) results = append(results, u) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate ch user rows: %w", err) } return results, nil } // ── CH Roles ───────────────────────────────────────────────────────────────── // UpsertChRole inserts or replaces a ClickHouse role record from a ChRole struct. func (s *Store) UpsertChRole(r ChRole) error { _, err := s.conn().Exec( `INSERT INTO gov_ch_roles (id, connection_id, name, first_seen, last_updated) VALUES (?, ?, ?, ?, ?) ON CONFLICT(connection_id, name) DO UPDATE SET last_updated = excluded.last_updated`, r.ID, r.ConnectionID, r.Name, r.FirstSeen, r.LastUpdated, ) if err != nil { return fmt.Errorf("upsert ch role: %w", err) } return nil } // GetChRoles returns all ClickHouse roles for a connection. func (s *Store) GetChRoles(connectionID string) ([]ChRole, error) { rows, err := s.conn().Query( `SELECT id, connection_id, name, first_seen, last_updated FROM gov_ch_roles WHERE connection_id = ? ORDER BY name`, connectionID, ) if err != nil { return nil, fmt.Errorf("get ch roles: %w", err) } defer rows.Close() var results []ChRole for rows.Next() { var r ChRole if err := rows.Scan(&r.ID, &r.ConnectionID, &r.Name, &r.FirstSeen, &r.LastUpdated); err != nil { return nil, fmt.Errorf("scan ch role: %w", err) } results = append(results, r) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate ch role rows: %w", err) } return results, nil } // ── Role Grants ────────────────────────────────────────────────────────────── // UpsertRoleGrant inserts or replaces a role grant record from a RoleGrant struct. func (s *Store) UpsertRoleGrant(rg RoleGrant) error { isDefaultInt := 0 if rg.IsDefault { isDefaultInt = 1 } withAdminInt := 0 if rg.WithAdminOption { withAdminInt = 1 } _, err := s.conn().Exec( `INSERT INTO gov_role_grants (id, connection_id, user_name, granted_role_name, is_default, with_admin_option, first_seen, last_updated) VALUES (?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(connection_id, user_name, granted_role_name) DO UPDATE SET is_default = excluded.is_default, with_admin_option = excluded.with_admin_option, last_updated = excluded.last_updated`, rg.ID, rg.ConnectionID, rg.UserName, rg.GrantedRoleName, isDefaultInt, withAdminInt, rg.FirstSeen, rg.LastUpdated, ) if err != nil { return fmt.Errorf("upsert role grant: %w", err) } return nil } // GetRoleGrants returns all role grants for a connection. func (s *Store) GetRoleGrants(connectionID string) ([]RoleGrant, error) { rows, err := s.conn().Query( `SELECT id, connection_id, user_name, granted_role_name, is_default, with_admin_option, first_seen, last_updated FROM gov_role_grants WHERE connection_id = ? ORDER BY user_name, granted_role_name`, connectionID, ) if err != nil { return nil, fmt.Errorf("get role grants: %w", err) } defer rows.Close() var results []RoleGrant for rows.Next() { var rg RoleGrant if err := rows.Scan(&rg.ID, &rg.ConnectionID, &rg.UserName, &rg.GrantedRoleName, &rg.IsDefault, &rg.WithAdminOption, &rg.FirstSeen, &rg.LastUpdated); err != nil { return nil, fmt.Errorf("scan role grant: %w", err) } results = append(results, rg) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate role grant rows: %w", err) } return results, nil } // ── Grants ─────────────────────────────────────────────────────────────────── // UpsertGrant inserts or replaces a grant record from a Grant struct. func (s *Store) UpsertGrant(g Grant) error { isPartialRevoke := 0 if g.IsPartialRevoke { isPartialRevoke = 1 } grantOption := 0 if g.GrantOption { grantOption = 1 } _, err := s.conn().Exec( `INSERT OR REPLACE INTO gov_grants (id, connection_id, user_name, role_name, access_type, grant_database, grant_table, grant_column, is_partial_revoke, grant_option, first_seen, last_updated) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, g.ID, g.ConnectionID, ptrToNullString(g.UserName), ptrToNullString(g.RoleName), g.AccessType, ptrToNullString(g.GrantDatabase), ptrToNullString(g.GrantTable), ptrToNullString(g.GrantColumn), isPartialRevoke, grantOption, g.FirstSeen, g.LastUpdated, ) if err != nil { return fmt.Errorf("upsert grant: %w", err) } return nil } // GetGrants returns all grants for a connection. func (s *Store) GetGrants(connectionID string) ([]Grant, error) { rows, err := s.conn().Query( `SELECT id, connection_id, user_name, role_name, access_type, grant_database, grant_table, grant_column, is_partial_revoke, grant_option, first_seen, last_updated FROM gov_grants WHERE connection_id = ? ORDER BY access_type`, connectionID, ) if err != nil { return nil, fmt.Errorf("get grants: %w", err) } defer rows.Close() var results []Grant for rows.Next() { var g Grant var userName, roleName, grantDB, grantTable, grantCol sql.NullString if err := rows.Scan(&g.ID, &g.ConnectionID, &userName, &roleName, &g.AccessType, &grantDB, &grantTable, &grantCol, &g.IsPartialRevoke, &g.GrantOption, &g.FirstSeen, &g.LastUpdated); err != nil { return nil, fmt.Errorf("scan grant: %w", err) } g.UserName = nullStringToPtr(userName) g.RoleName = nullStringToPtr(roleName) g.GrantDatabase = nullStringToPtr(grantDB) g.GrantTable = nullStringToPtr(grantTable) g.GrantColumn = nullStringToPtr(grantCol) results = append(results, g) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate grant rows: %w", err) } return results, nil } // ── Access Matrix ──────────────────────────────────────────────────────────── // RebuildAccessMatrix deletes all access matrix entries for a connection and rebuilds // from grants and role_grants joined data. Returns the number of entries created. func (s *Store) RebuildAccessMatrix(connectionID string) (int, error) { tx, err := s.conn().Begin() if err != nil { return 0, fmt.Errorf("begin access matrix rebuild: %w", err) } defer tx.Rollback() if _, err := tx.Exec("DELETE FROM gov_access_matrix WHERE connection_id = ?", connectionID); err != nil { return 0, fmt.Errorf("delete access matrix: %w", err) } now := time.Now().UTC().Format(time.RFC3339) // Preload last query timestamps per user once to avoid N extra lookups while inserting. lastQueryByUser := make(map[string]sql.NullString) lastQueryRows, err := tx.Query( `SELECT ch_user, MAX(event_time) FROM gov_query_log WHERE connection_id = ? GROUP BY ch_user`, connectionID, ) if err != nil { return 0, fmt.Errorf("query last query times: %w", err) } for lastQueryRows.Next() { var userName string var lastQueryTime sql.NullString if err := lastQueryRows.Scan(&userName, &lastQueryTime); err != nil { lastQueryRows.Close() return 0, fmt.Errorf("scan last query time: %w", err) } lastQueryByUser[userName] = lastQueryTime } if err := lastQueryRows.Err(); err != nil { lastQueryRows.Close() return 0, fmt.Errorf("iterate last query rows: %w", err) } if err := lastQueryRows.Close(); err != nil { return 0, fmt.Errorf("close last query rows: %w", err) } insertStmt, err := tx.Prepare( `INSERT INTO gov_access_matrix (id, connection_id, user_name, role_name, database_name, table_name, privilege, is_direct_grant, last_query_time, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, ) if err != nil { return 0, fmt.Errorf("prepare access matrix insert: %w", err) } defer insertStmt.Close() count := 0 // Insert direct user grants directRows, err := tx.Query( `SELECT g.user_name, g.grant_database, g.grant_table, g.access_type FROM gov_grants g WHERE g.connection_id = ? AND g.user_name IS NOT NULL AND g.is_partial_revoke = 0`, connectionID, ) if err != nil { return 0, fmt.Errorf("query direct grants: %w", err) } for directRows.Next() { var userName string var grantDB, grantTable sql.NullString var accessType string if err := directRows.Scan(&userName, &grantDB, &grantTable, &accessType); err != nil { directRows.Close() return 0, fmt.Errorf("scan direct grant: %w", err) } lastQueryTime := lastQueryByUser[userName] if _, err = insertStmt.Exec( uuid.NewString(), connectionID, userName, nil, grantDB, grantTable, accessType, 1, lastQueryTime, now, ); err != nil { directRows.Close() return 0, fmt.Errorf("insert direct grant matrix: %w", err) } count++ } if err := directRows.Err(); err != nil { directRows.Close() return 0, fmt.Errorf("iterate direct grant rows: %w", err) } if err := directRows.Close(); err != nil { return 0, fmt.Errorf("close direct grant rows: %w", err) } // Insert role-based grants roleRows, err := tx.Query( `SELECT rg.user_name, rg.granted_role_name, g.grant_database, g.grant_table, g.access_type FROM gov_role_grants rg JOIN gov_grants g ON g.connection_id = rg.connection_id AND g.role_name = rg.granted_role_name WHERE rg.connection_id = ? AND g.is_partial_revoke = 0`, connectionID, ) if err != nil { return 0, fmt.Errorf("query role-based grants: %w", err) } for roleRows.Next() { var userName, roleName, accessType string var grantDB, grantTable sql.NullString if err := roleRows.Scan(&userName, &roleName, &grantDB, &grantTable, &accessType); err != nil { roleRows.Close() return 0, fmt.Errorf("scan role grant: %w", err) } lastQueryTime := lastQueryByUser[userName] if _, err = insertStmt.Exec( uuid.NewString(), connectionID, userName, roleName, grantDB, grantTable, accessType, 0, lastQueryTime, now, ); err != nil { roleRows.Close() return 0, fmt.Errorf("insert role grant matrix: %w", err) } count++ } if err := roleRows.Err(); err != nil { roleRows.Close() return 0, fmt.Errorf("iterate role grant rows: %w", err) } if err := roleRows.Close(); err != nil { return 0, fmt.Errorf("close role grant rows: %w", err) } if err := tx.Commit(); err != nil { return 0, fmt.Errorf("commit access matrix rebuild: %w", err) } return count, nil } // GetAccessMatrix returns all access matrix entries for a connection. func (s *Store) GetAccessMatrix(connectionID string) ([]AccessMatrixEntry, error) { rows, err := s.conn().Query( `SELECT id, connection_id, user_name, role_name, database_name, table_name, privilege, is_direct_grant, last_query_time FROM gov_access_matrix WHERE connection_id = ? ORDER BY user_name, privilege`, connectionID, ) if err != nil { return nil, fmt.Errorf("get access matrix: %w", err) } defer rows.Close() return scanAccessMatrix(rows) } // GetAccessMatrixForUser returns access matrix entries for a specific user. func (s *Store) GetAccessMatrixForUser(connectionID, userName string) ([]AccessMatrixEntry, error) { rows, err := s.conn().Query( `SELECT id, connection_id, user_name, role_name, database_name, table_name, privilege, is_direct_grant, last_query_time FROM gov_access_matrix WHERE connection_id = ? AND user_name = ? ORDER BY privilege`, connectionID, userName, ) if err != nil { return nil, fmt.Errorf("get access matrix for user: %w", err) } defer rows.Close() return scanAccessMatrix(rows) } // UserHasRole checks whether a user has been granted a specific role. func (s *Store) UserHasRole(connectionID, userName, roleName string) (bool, error) { var count int err := s.conn().QueryRow( `SELECT COUNT(*) FROM gov_role_grants WHERE connection_id = ? AND user_name = ? AND granted_role_name = ?`, connectionID, userName, roleName, ).Scan(&count) if err != nil { return false, fmt.Errorf("check user role: %w", err) } return count > 0, nil } // GetOverPermissions finds access matrix entries where last_query_time is null // or older than a default inactivity threshold (30 days). func (s *Store) GetOverPermissions(connectionID string) ([]OverPermission, error) { return s.GetOverPermissionsWithDays(connectionID, 30) } // GetOverPermissionsWithDays finds access matrix entries where last_query_time is null // or older than inactiveDays. func (s *Store) GetOverPermissionsWithDays(connectionID string, inactiveDays int) ([]OverPermission, error) { cutoff := time.Now().UTC().AddDate(0, 0, -inactiveDays).Format(time.RFC3339) rows, err := s.conn().Query( `SELECT user_name, role_name, database_name, table_name, privilege, last_query_time FROM gov_access_matrix WHERE connection_id = ? AND (last_query_time IS NULL OR last_query_time < ?) ORDER BY user_name, privilege`, connectionID, cutoff, ) if err != nil { return nil, fmt.Errorf("get over permissions: %w", err) } defer rows.Close() var results []OverPermission for rows.Next() { var op OverPermission var roleName, dbName, tableName, lastQueryTime sql.NullString if err := rows.Scan(&op.UserName, &roleName, &dbName, &tableName, &op.Privilege, &lastQueryTime); err != nil { return nil, fmt.Errorf("scan over permission: %w", err) } op.RoleName = nullStringToPtr(roleName) op.DatabaseName = nullStringToPtr(dbName) op.TableName = nullStringToPtr(tableName) op.LastQueryTime = nullStringToPtr(lastQueryTime) if lastQueryTime.Valid { t, parseErr := time.Parse(time.RFC3339, lastQueryTime.String) if parseErr == nil { days := int(time.Since(t).Hours() / 24) op.DaysSinceQuery = &days } op.Reason = fmt.Sprintf("no queries in %d+ days", inactiveDays) } else { op.Reason = "never queried" } results = append(results, op) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate over permission rows: %w", err) } return results, nil } func scanAccessMatrix(rows *sql.Rows) ([]AccessMatrixEntry, error) { var results []AccessMatrixEntry for rows.Next() { var am AccessMatrixEntry var roleName, dbName, tableName, lastQueryTime sql.NullString if err := rows.Scan(&am.ID, &am.ConnectionID, &am.UserName, &roleName, &dbName, &tableName, &am.Privilege, &am.IsDirectGrant, &lastQueryTime); err != nil { return nil, fmt.Errorf("scan access matrix entry: %w", err) } am.RoleName = nullStringToPtr(roleName) am.DatabaseName = nullStringToPtr(dbName) am.TableName = nullStringToPtr(tableName) am.LastQueryTime = nullStringToPtr(lastQueryTime) results = append(results, am) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate access matrix rows: %w", err) } return results, nil } // ── Policies ───────────────────────────────────────────────────────────────── // GetPolicies returns all policies for a connection. func (s *Store) GetPolicies(connectionID string) ([]Policy, error) { return s.scanPolicies( `SELECT id, connection_id, name, description, object_type, object_database, object_table, object_column, required_role, severity, enforcement_mode, enabled, created_by, created_at, updated_at FROM gov_policies WHERE connection_id = ? ORDER BY name`, connectionID, ) } // GetEnabledPolicies returns all enabled policies for a connection. func (s *Store) GetEnabledPolicies(connectionID string) ([]Policy, error) { return s.scanPolicies( `SELECT id, connection_id, name, description, object_type, object_database, object_table, object_column, required_role, severity, enforcement_mode, enabled, created_by, created_at, updated_at FROM gov_policies WHERE connection_id = ? AND enabled = 1 ORDER BY name`, connectionID, ) } func (s *Store) scanPolicies(query string, args ...interface{}) ([]Policy, error) { rows, err := s.conn().Query(query, args...) if err != nil { return nil, fmt.Errorf("get policies: %w", err) } defer rows.Close() var results []Policy for rows.Next() { var p Policy var desc, objDB, objTable, objCol, createdBy, enforcementMode sql.NullString if err := rows.Scan(&p.ID, &p.ConnectionID, &p.Name, &desc, &p.ObjectType, &objDB, &objTable, &objCol, &p.RequiredRole, &p.Severity, &enforcementMode, &p.Enabled, &createdBy, &p.CreatedAt, &p.UpdatedAt); err != nil { return nil, fmt.Errorf("scan policy: %w", err) } p.Description = nullStringToPtr(desc) p.ObjectDatabase = nullStringToPtr(objDB) p.ObjectTable = nullStringToPtr(objTable) p.ObjectColumn = nullStringToPtr(objCol) p.EnforcementMode = normalizePolicyEnforcementMode(enforcementMode.String) p.CreatedBy = nullStringToPtr(createdBy) results = append(results, p) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate policy rows: %w", err) } return results, nil } // GetPolicyByID returns a single policy by ID. func (s *Store) GetPolicyByID(id string) (*Policy, error) { row := s.conn().QueryRow( `SELECT id, connection_id, name, description, object_type, object_database, object_table, object_column, required_role, severity, enforcement_mode, enabled, created_by, created_at, updated_at FROM gov_policies WHERE id = ?`, id, ) var p Policy var desc, objDB, objTable, objCol, createdBy, enforcementMode sql.NullString err := row.Scan(&p.ID, &p.ConnectionID, &p.Name, &desc, &p.ObjectType, &objDB, &objTable, &objCol, &p.RequiredRole, &p.Severity, &enforcementMode, &p.Enabled, &createdBy, &p.CreatedAt, &p.UpdatedAt) if err == sql.ErrNoRows { return nil, nil } if err != nil { return nil, fmt.Errorf("get policy by id: %w", err) } p.Description = nullStringToPtr(desc) p.ObjectDatabase = nullStringToPtr(objDB) p.ObjectTable = nullStringToPtr(objTable) p.ObjectColumn = nullStringToPtr(objCol) p.EnforcementMode = normalizePolicyEnforcementMode(enforcementMode.String) p.CreatedBy = nullStringToPtr(createdBy) return &p, nil } // CreatePolicy creates a new policy and returns its ID. func (s *Store) CreatePolicy(connectionID, name, description, objectType, objectDB, objectTable, objectCol, requiredRole, severity, enforcementMode, createdBy string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() var desc, oDB, oTable, oCol, cBy interface{} if description != "" { desc = description } if objectDB != "" { oDB = objectDB } if objectTable != "" { oTable = objectTable } if objectCol != "" { oCol = objectCol } if createdBy != "" { cBy = createdBy } _, err := s.conn().Exec( `INSERT INTO gov_policies (id, connection_id, name, description, object_type, object_database, object_table, object_column, required_role, severity, enforcement_mode, enabled, created_by, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1, ?, ?, ?)`, id, connectionID, name, desc, objectType, oDB, oTable, oCol, requiredRole, severity, normalizePolicyEnforcementMode(enforcementMode), cBy, now, now, ) if err != nil { return "", fmt.Errorf("create policy: %w", err) } return id, nil } // UpdatePolicy updates an existing policy. func (s *Store) UpdatePolicy(id, name, description, requiredRole, severity, enforcementMode string, enabled bool) error { now := time.Now().UTC().Format(time.RFC3339) var desc interface{} if description != "" { desc = description } enabledInt := 0 if enabled { enabledInt = 1 } _, err := s.conn().Exec( `UPDATE gov_policies SET name = ?, description = ?, required_role = ?, severity = ?, enforcement_mode = ?, enabled = ?, updated_at = ? WHERE id = ?`, name, desc, requiredRole, severity, normalizePolicyEnforcementMode(enforcementMode), enabledInt, now, id, ) if err != nil { return fmt.Errorf("update policy: %w", err) } return nil } // DeletePolicy deletes a policy by ID (cascades to violations). func (s *Store) DeletePolicy(id string) error { _, err := s.conn().Exec("DELETE FROM gov_policies WHERE id = ?", id) if err != nil { return fmt.Errorf("delete policy: %w", err) } return nil } // ── Violations ─────────────────────────────────────────────────────────────── // InsertPolicyViolation inserts a policy violation from a PolicyViolation struct. func (s *Store) InsertPolicyViolation(v PolicyViolation) error { _, err := s.conn().Exec( `INSERT INTO gov_policy_violations (id, connection_id, policy_id, query_log_id, ch_user, violation_detail, severity, detection_phase, request_endpoint, detected_at, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, v.ID, v.ConnectionID, v.PolicyID, nullableValue(v.QueryLogID), v.User, v.ViolationDetail, v.Severity, normalizeDetectionPhase(v.DetectionPhase), nullableValue(deref(v.RequestEndpoint)), v.DetectedAt, v.CreatedAt, ) if err != nil { return fmt.Errorf("insert policy violation: %w", err) } return nil } // CreateViolation creates a new policy violation and returns its ID. func (s *Store) CreateViolation(connectionID, policyID, queryLogID, user, detail, severity, detectionPhase, requestEndpoint string) (string, error) { now := time.Now().UTC().Format(time.RFC3339) id := uuid.NewString() _, err := s.conn().Exec( `INSERT INTO gov_policy_violations (id, connection_id, policy_id, query_log_id, ch_user, violation_detail, severity, detection_phase, request_endpoint, detected_at, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, id, connectionID, policyID, nullableValue(queryLogID), user, detail, severity, normalizeDetectionPhase(detectionPhase), nullableValue(requestEndpoint), now, now, ) if err != nil { return "", fmt.Errorf("create violation: %w", err) } return id, nil } // GetViolations returns violations for a connection with optional policyID filter, // joined with the policy name. func (s *Store) GetViolations(connectionID string, limit int, policyID string) ([]PolicyViolation, error) { where := "v.connection_id = ?" args := []interface{}{connectionID} if policyID != "" { where += " AND v.policy_id = ?" args = append(args, policyID) } args = append(args, limit) query := fmt.Sprintf( `SELECT v.id, v.connection_id, v.policy_id, v.query_log_id, v.ch_user, v.violation_detail, v.severity, v.detection_phase, v.request_endpoint, v.detected_at, v.created_at, COALESCE(p.name, '') FROM gov_policy_violations v LEFT JOIN gov_policies p ON p.id = v.policy_id WHERE %s ORDER BY v.detected_at DESC LIMIT ?`, where, ) rows, err := s.conn().Query(query, args...) if err != nil { return nil, fmt.Errorf("get violations: %w", err) } defer rows.Close() var results []PolicyViolation for rows.Next() { var v PolicyViolation var queryLogID, requestEndpoint sql.NullString if err := rows.Scan(&v.ID, &v.ConnectionID, &v.PolicyID, &queryLogID, &v.User, &v.ViolationDetail, &v.Severity, &v.DetectionPhase, &requestEndpoint, &v.DetectedAt, &v.CreatedAt, &v.PolicyName); err != nil { return nil, fmt.Errorf("scan violation: %w", err) } v.QueryLogID = queryLogID.String v.RequestEndpoint = nullStringToPtr(requestEndpoint) v.DetectionPhase = normalizeDetectionPhase(v.DetectionPhase) results = append(results, v) } if err := rows.Err(); err != nil { return nil, fmt.Errorf("iterate violation rows: %w", err) } return results, nil } func normalizePolicyEnforcementMode(v string) string { mode := strings.ToLower(strings.TrimSpace(v)) switch mode { case "block": return "block" default: return "warn" } } func normalizeDetectionPhase(v string) string { phase := strings.ToLower(strings.TrimSpace(v)) switch phase { case "pre_exec_block": return "pre_exec_block" default: return "post_exec" } } // ── Overview ───────────────────────────────────────────────────────────────── // GetOverview returns aggregate counts from all governance tables for a connection. func (s *Store) GetOverview(connectionID string) (*GovernanceOverview, error) { o := &GovernanceOverview{} s.conn().QueryRow("SELECT COUNT(*) FROM gov_databases WHERE connection_id = ? AND is_deleted = 0", connectionID).Scan(&o.DatabaseCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_tables WHERE connection_id = ? AND is_deleted = 0", connectionID).Scan(&o.TableCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_columns WHERE connection_id = ? AND is_deleted = 0", connectionID).Scan(&o.ColumnCount) tagCount, err := s.GetTaggedTableCount(connectionID) if err == nil { o.TaggedTableCount = tagCount } s.conn().QueryRow("SELECT COUNT(*) FROM gov_ch_users WHERE connection_id = ?", connectionID).Scan(&o.UserCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_ch_roles WHERE connection_id = ?", connectionID).Scan(&o.RoleCount) cutoff24h := time.Now().UTC().Add(-24 * time.Hour).Format(time.RFC3339) s.conn().QueryRow("SELECT COUNT(*) FROM gov_query_log WHERE connection_id = ? AND event_time > ?", connectionID, cutoff24h).Scan(&o.QueryCount24h) s.conn().QueryRow("SELECT COUNT(*) FROM gov_lineage_edges WHERE connection_id = ?", connectionID).Scan(&o.LineageEdgeCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_policies WHERE connection_id = ?", connectionID).Scan(&o.PolicyCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_policy_violations WHERE connection_id = ?", connectionID).Scan(&o.ViolationCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_incidents WHERE connection_id = ? AND status IN ('open', 'triaged', 'in_progress')", connectionID).Scan(&o.IncidentCount) s.conn().QueryRow("SELECT COUNT(*) FROM gov_schema_changes WHERE connection_id = ?", connectionID).Scan(&o.SchemaChangeCount) syncStates, err := s.GetSyncStates(connectionID) if err == nil { o.SyncStates = syncStates } recentChanges, err := s.GetSchemaChanges(connectionID, 10) if err == nil { o.RecentChanges = recentChanges } recentViolations, err := s.GetViolations(connectionID, 10, "") if err == nil { o.RecentViolations = recentViolations } return o, nil } // ── Cleanup ────────────────────────────────────────────────────────────────── // CleanupOldQueryLogs deletes query logs older than the given timestamp. func (s *Store) CleanupOldQueryLogs(connectionID string, before string) (int64, error) { result, err := s.conn().Exec( "DELETE FROM gov_query_log WHERE connection_id = ? AND event_time < ?", connectionID, before, ) if err != nil { return 0, fmt.Errorf("cleanup old query logs: %w", err) } return result.RowsAffected() } // CleanupOldViolations deletes violations older than the given timestamp. func (s *Store) CleanupOldViolations(connectionID string, before string) (int64, error) { result, err := s.conn().Exec( "DELETE FROM gov_policy_violations WHERE connection_id = ? AND detected_at < ?", connectionID, before, ) if err != nil { return 0, fmt.Errorf("cleanup old violations: %w", err) } return result.RowsAffected() } ================================================ FILE: internal/governance/syncer.go ================================================ package governance import ( "context" "encoding/json" "fmt" "log/slog" "sync" "time" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/tunnel" ) const ( syncTickInterval = 5 * time.Minute queryTimeout = 60 * time.Second staleDuration = 10 * time.Minute retentionDays = 30 ) // Syncer orchestrates ClickHouse → SQLite governance synchronisation. // It runs periodic background syncs and supports on-demand sync for // individual connections. type Syncer struct { store *Store db *database.DB gateway *tunnel.Gateway secret string activeSyncs sync.Map // connectionID → bool (prevents concurrent syncs per connection) lastBorrowLog sync.Map // connectionID → time.Time (rate-limits credential borrow audit rows) mu sync.Mutex running bool stopCh chan struct{} } // NewSyncer creates a new governance Syncer. func NewSyncer(store *Store, db *database.DB, gw *tunnel.Gateway, secret string) *Syncer { return &Syncer{ store: store, db: db, gateway: gw, secret: secret, } } // GetStore returns the underlying governance store. func (s *Syncer) GetStore() *Store { return s.store } // StartBackground launches the background goroutine that ticks every 5 minutes // to sync governance data for all connected tunnels. Idempotent: a second call // while already running is a no-op. func (s *Syncer) StartBackground() { s.mu.Lock() if s.running { s.mu.Unlock() return } s.stopCh = make(chan struct{}) s.running = true stopCh := s.stopCh s.mu.Unlock() go func() { slog.Info("Governance syncer started", "interval", syncTickInterval) if connections, err := s.db.GetConnections(); err == nil { s.pruneRetention(connections) } ticker := time.NewTicker(syncTickInterval) defer ticker.Stop() for { select { case <-stopCh: slog.Info("Governance syncer stopped") return case <-ticker.C: s.backgroundTick() } } }() } // Stop signals the background goroutine to stop. Safe to call when the syncer // is not running — a no-op in that case. func (s *Syncer) Stop() { s.mu.Lock() defer s.mu.Unlock() if !s.running { return } close(s.stopCh) s.running = false } // IsRunning reports whether the background goroutine is currently active. func (s *Syncer) IsRunning() bool { s.mu.Lock() defer s.mu.Unlock() return s.running } // SyncConnection runs all three governance sync phases (metadata, querylog, access) // for a single connection. It prevents concurrent syncs per connection. func (s *Syncer) SyncConnection(ctx context.Context, creds CHCredentials) (*SyncResult, error) { // Prevent concurrent syncs for the same connection if _, loaded := s.activeSyncs.LoadOrStore(creds.ConnectionID, true); loaded { return nil, fmt.Errorf("sync already in progress for connection %s", creds.ConnectionID) } defer s.activeSyncs.Delete(creds.ConnectionID) result := &SyncResult{} // Phase 1: Metadata metaResult, err := s.syncMetadata(ctx, creds) if err != nil { result.MetadataError = err.Error() slog.Error("Metadata sync failed", "connection", creds.ConnectionID, "error", err) } else { result.MetadataResult = metaResult } // Phase 2: Query log qlResult, err := s.syncQueryLog(ctx, creds) if err != nil { result.QueryLogError = err.Error() slog.Error("Query log sync failed", "connection", creds.ConnectionID, "error", err) } else { result.QueryLogResult = qlResult } // Phase 3: Access accessResult, err := s.syncAccess(ctx, creds) if err != nil { result.AccessError = err.Error() slog.Error("Access sync failed", "connection", creds.ConnectionID, "error", err) } else { result.AccessResult = accessResult } return result, nil } // SyncSingle runs a single sync phase for a connection. func (s *Syncer) SyncSingle(ctx context.Context, creds CHCredentials, syncType SyncType) error { switch syncType { case SyncMetadata: _, err := s.syncMetadata(ctx, creds) return err case SyncQueryLog: _, err := s.syncQueryLog(ctx, creds) return err case SyncAccess: _, err := s.syncAccess(ctx, creds) return err default: return fmt.Errorf("unknown sync type: %s", syncType) } } // backgroundTick iterates over all connections, checks tunnel status and // sync staleness, borrows credentials from active sessions, and triggers // SyncConnection in goroutines. func (s *Syncer) backgroundTick() { connections, err := s.db.GetConnections() if err != nil { slog.Error("Governance sync: failed to load connections", "error", err) return } s.pruneRetention(connections) var wg sync.WaitGroup for _, conn := range connections { connID := conn.ID // Skip if tunnel is offline if !s.gateway.IsTunnelOnline(connID) { continue } // Skip if a sync is already running for this connection if _, loaded := s.activeSyncs.Load(connID); loaded { continue } // Check if any sync type is stale if !s.isSyncStale(connID) { continue } // Borrow credentials from an active session creds, err := s.findCredentials(connID) if err != nil { slog.Debug("Governance sync: no credentials for connection", "connection", connID, "error", err) continue } wg.Add(1) go func(c CHCredentials) { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() result, err := s.SyncConnection(ctx, c) if err != nil { slog.Error("Governance background sync failed", "connection", c.ConnectionID, "error", err) return } slog.Info("Governance background sync completed", "connection", c.ConnectionID, "metadata", result.MetadataResult != nil, "querylog", result.QueryLogResult != nil, "access", result.AccessResult != nil, ) }(creds) } wg.Wait() } // pruneRetention deletes query log and violation rows older than retentionDays // for every known connection, bounding SQLite growth on busy ClickHouse instances. func (s *Syncer) pruneRetention(connections []database.Connection) { cutoff := time.Now().UTC().AddDate(0, 0, -retentionDays).Format("2006-01-02 15:04:05") for _, conn := range connections { if n, err := s.store.CleanupOldQueryLogs(conn.ID, cutoff); err != nil { slog.Warn("Governance retention prune (query_log) failed", "connection", conn.ID, "error", err) } else if n > 0 { slog.Info("Governance retention pruned query_log", "connection", conn.ID, "rows", n, "older_than", cutoff) } if n, err := s.store.CleanupOldViolations(conn.ID, cutoff); err != nil { slog.Warn("Governance retention prune (violations) failed", "connection", conn.ID, "error", err) } else if n > 0 { slog.Info("Governance retention pruned violations", "connection", conn.ID, "rows", n, "older_than", cutoff) } } } // isSyncStale returns true if any sync type for the connection is older than staleDuration. func (s *Syncer) isSyncStale(connectionID string) bool { syncTypes := []SyncType{SyncMetadata, SyncQueryLog, SyncAccess} for _, st := range syncTypes { state, err := s.store.GetSyncState(connectionID, string(st)) if err != nil || state == nil { return true // no state yet → needs sync } if state.LastSyncedAt == nil { return true } lastSync, err := time.Parse(time.RFC3339, *state.LastSyncedAt) if err != nil { return true } if time.Since(lastSync) > staleDuration { return true } } return false } // findCredentials borrows credentials from an active session for the given connection. // It tries up to 3 recent sessions and returns the first one with a valid password. func (s *Syncer) findCredentials(connectionID string) (CHCredentials, error) { sessions, err := s.db.GetActiveSessionsByConnection(connectionID, 3) if err != nil { return CHCredentials{}, fmt.Errorf("failed to load sessions: %w", err) } for _, sess := range sessions { password, err := crypto.Decrypt(sess.EncryptedPassword, s.secret) if err != nil { continue } s.auditCredentialBorrow(connectionID, sess) return CHCredentials{ ConnectionID: connectionID, User: sess.ClickhouseUser, Password: password, }, nil } return CHCredentials{}, fmt.Errorf("no active sessions with valid credentials for connection %s", connectionID) } // auditCredentialBorrow writes one audit row per connection per hour when the // background syncer borrows credentials from an active session. A structured // debug log is emitted every time; the audit table only gets rate-limited // entries to avoid flooding it during frequent ticks. func (s *Syncer) auditCredentialBorrow(connectionID string, sess database.Session) { slog.Debug("Governance syncer borrowed session credentials", "connection", connectionID, "ch_user", sess.ClickhouseUser, "session_id", sess.ID) now := time.Now() if last, ok := s.lastBorrowLog.Load(connectionID); ok { if t, ok := last.(time.Time); ok && now.Sub(t) < time.Hour { return } } s.lastBorrowLog.Store(connectionID, now) details := fmt.Sprintf(`{"session_id":%q,"purpose":"background_sync"}`, sess.ID) connID := connectionID user := sess.ClickhouseUser if err := s.db.CreateAuditLog(database.AuditLogParams{ Action: "governance.credential_borrow", Username: &user, ConnectionID: &connID, Details: &details, }); err != nil { slog.Warn("Failed to write credential borrow audit log", "connection", connectionID, "error", err) } } // executeQuery sends a SQL query through the tunnel and returns parsed rows. // The ClickHouse JSON format returns data as an array of objects: // // {"data": [{"col1": "val1", "col2": "val2"}, ...], "meta": [...], ...} // // The tunnel's QueryResult.Data contains this "data" array as json.RawMessage. // We first try to unmarshal as []map[string]interface{} (JSON format). // If that fails we fall back to [][]interface{} (JSONCompact) and combine with meta. func (s *Syncer) executeQuery(creds CHCredentials, sql string) ([]map[string]interface{}, error) { result, err := s.gateway.ExecuteQuery(creds.ConnectionID, sql, creds.User, creds.Password, queryTimeout) if err != nil { return nil, fmt.Errorf("query execution failed: %w", err) } if result == nil || len(result.Data) == 0 { return nil, nil } // Try JSON format: array of objects var rows []map[string]interface{} if err := json.Unmarshal(result.Data, &rows); err == nil { return rows, nil } // Fallback: JSONCompact format — array of arrays + meta var arrays [][]interface{} if err := json.Unmarshal(result.Data, &arrays); err != nil { return nil, fmt.Errorf("failed to parse query result data: %w", err) } // Parse meta for column names type metaCol struct { Name string `json:"name"` Type string `json:"type"` } var meta []metaCol if result.Meta != nil { if err := json.Unmarshal(result.Meta, &meta); err != nil { return nil, fmt.Errorf("failed to parse query result meta: %w", err) } } if len(meta) == 0 && len(arrays) > 0 { // Generate placeholder column names for i := range arrays[0] { meta = append(meta, metaCol{Name: fmt.Sprintf("col%d", i), Type: "String"}) } } rows = make([]map[string]interface{}, 0, len(arrays)) for _, row := range arrays { m := make(map[string]interface{}, len(meta)) for i, col := range meta { if i < len(row) { m[col.Name] = row[i] } } rows = append(rows, m) } return rows, nil } ================================================ FILE: internal/governance/types.go ================================================ package governance // ── Sensitivity tag constants ──────────────────────────────────────────────── type SensitivityTag string const ( TagPII SensitivityTag = "PII" TagFinancial SensitivityTag = "FINANCIAL" TagInternal SensitivityTag = "INTERNAL" TagPublic SensitivityTag = "PUBLIC" TagCritical SensitivityTag = "CRITICAL" ) var ValidTags = map[SensitivityTag]bool{ TagPII: true, TagFinancial: true, TagInternal: true, TagPublic: true, TagCritical: true, } // ── Sync types ─────────────────────────────────────────────────────────────── type SyncType string const ( SyncMetadata SyncType = "metadata" SyncQueryLog SyncType = "query_log" SyncAccess SyncType = "access" ) // ── Schema change types ────────────────────────────────────────────────────── type SchemaChangeType string const ( ChangeDatabaseAdded SchemaChangeType = "database_added" ChangeDatabaseRemoved SchemaChangeType = "database_removed" ChangeTableAdded SchemaChangeType = "table_added" ChangeTableRemoved SchemaChangeType = "table_removed" ChangeColumnAdded SchemaChangeType = "column_added" ChangeColumnRemoved SchemaChangeType = "column_removed" ChangeColumnTypeChanged SchemaChangeType = "column_type_changed" ) // ── Edge types ─────────────────────────────────────────────────────────────── type EdgeType string const ( EdgeSelectFrom EdgeType = "select_from" EdgeInsertSelect EdgeType = "insert_select" EdgeCreateAsSelect EdgeType = "create_as_select" ) // ── Model structs ──────────────────────────────────────────────────────────── type SyncState struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` SyncType string `json:"sync_type"` LastSyncedAt *string `json:"last_synced_at"` Watermark *string `json:"watermark"` Status string `json:"status"` LastError *string `json:"last_error"` RowCount int `json:"row_count"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type GovDatabase struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` Name string `json:"name"` Engine string `json:"engine"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` IsDeleted bool `json:"is_deleted"` } type GovTable struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` DatabaseName string `json:"database_name"` TableName string `json:"table_name"` Engine string `json:"engine"` TableUUID string `json:"table_uuid"` TotalRows int64 `json:"total_rows"` TotalBytes int64 `json:"total_bytes"` PartitionCount int `json:"partition_count"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` IsDeleted bool `json:"is_deleted"` Tags []string `json:"tags,omitempty"` } type GovColumn struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` DatabaseName string `json:"database_name"` TableName string `json:"table_name"` ColumnName string `json:"column_name"` ColumnType string `json:"column_type"` ColumnPosition int `json:"column_position"` DefaultKind *string `json:"default_kind"` DefaultExpression *string `json:"default_expression"` Comment *string `json:"comment"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` IsDeleted bool `json:"is_deleted"` Tags []string `json:"tags,omitempty"` } type SchemaChange struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` ChangeType string `json:"change_type"` DatabaseName string `json:"database_name"` TableName string `json:"table_name"` ColumnName string `json:"column_name"` OldValue string `json:"old_value"` NewValue string `json:"new_value"` DetectedAt string `json:"detected_at"` CreatedAt string `json:"created_at"` } type QueryLogEntry struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` QueryID string `json:"query_id"` User string `json:"ch_user"` QueryText string `json:"query_text"` NormalizedHash string `json:"normalized_hash"` QueryKind string `json:"query_kind"` EventTime string `json:"event_time"` DurationMs int64 `json:"duration_ms"` ReadRows int64 `json:"read_rows"` ReadBytes int64 `json:"read_bytes"` ResultRows int64 `json:"result_rows"` WrittenRows int64 `json:"written_rows"` WrittenBytes int64 `json:"written_bytes"` MemoryUsage int64 `json:"memory_usage"` TablesUsed string `json:"tables_used"` IsError bool `json:"is_error"` ErrorMessage *string `json:"error_message"` CreatedAt string `json:"created_at"` } type LineageEdge struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` SourceDatabase string `json:"source_database"` SourceTable string `json:"source_table"` TargetDatabase string `json:"target_database"` TargetTable string `json:"target_table"` QueryID string `json:"query_id"` User string `json:"ch_user"` EdgeType string `json:"edge_type"` DetectedAt string `json:"detected_at"` ColumnEdges []ColumnLineageEdge `json:"column_edges,omitempty"` } type ColumnLineageEdge struct { ID string `json:"id"` LineageEdgeID string `json:"lineage_edge_id"` ConnectionID string `json:"connection_id"` SourceColumn string `json:"source_column"` TargetColumn string `json:"target_column"` } type TagEntry struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` ObjectType string `json:"object_type"` DatabaseName string `json:"database_name"` TableName string `json:"table_name"` ColumnName string `json:"column_name"` Tag string `json:"tag"` TaggedBy string `json:"tagged_by"` CreatedAt string `json:"created_at"` } type ChUser struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` Name string `json:"name"` AuthType *string `json:"auth_type"` HostIP *string `json:"host_ip"` DefaultRoles *string `json:"default_roles"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` } type ChRole struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` Name string `json:"name"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` } type RoleGrant struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` UserName string `json:"user_name"` GrantedRoleName string `json:"granted_role_name"` IsDefault bool `json:"is_default"` WithAdminOption bool `json:"with_admin_option"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` } type Grant struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` UserName *string `json:"user_name"` RoleName *string `json:"role_name"` AccessType string `json:"access_type"` GrantDatabase *string `json:"grant_database"` GrantTable *string `json:"grant_table"` GrantColumn *string `json:"grant_column"` IsPartialRevoke bool `json:"is_partial_revoke"` GrantOption bool `json:"grant_option"` FirstSeen string `json:"first_seen"` LastUpdated string `json:"last_updated"` } type AccessMatrixEntry struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` UserName string `json:"user_name"` RoleName *string `json:"role_name"` DatabaseName *string `json:"database_name"` TableName *string `json:"table_name"` Privilege string `json:"privilege"` IsDirectGrant bool `json:"is_direct_grant"` LastQueryTime *string `json:"last_query_time"` } type Policy struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` Name string `json:"name"` Description *string `json:"description"` ObjectType string `json:"object_type"` ObjectDatabase *string `json:"object_database"` ObjectTable *string `json:"object_table"` ObjectColumn *string `json:"object_column"` RequiredRole string `json:"required_role"` Severity string `json:"severity"` EnforcementMode string `json:"enforcement_mode"` Enabled bool `json:"enabled"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type PolicyViolation struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` PolicyID string `json:"policy_id"` QueryLogID string `json:"query_log_id"` User string `json:"ch_user"` ViolationDetail string `json:"violation_detail"` Severity string `json:"severity"` DetectionPhase string `json:"detection_phase"` RequestEndpoint *string `json:"request_endpoint"` DetectedAt string `json:"detected_at"` CreatedAt string `json:"created_at"` // Joined fields PolicyName string `json:"policy_name,omitempty"` } type ObjectComment struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` ObjectType string `json:"object_type"` DatabaseName string `json:"database_name"` TableName string `json:"table_name"` ColumnName string `json:"column_name"` CommentText string `json:"comment_text"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type Incident struct { ID string `json:"id"` ConnectionID string `json:"connection_id"` SourceType string `json:"source_type"` SourceRef *string `json:"source_ref"` DedupeKey *string `json:"dedupe_key"` Title string `json:"title"` Severity string `json:"severity"` Status string `json:"status"` Assignee *string `json:"assignee"` Details *string `json:"details"` ResolutionNote *string `json:"resolution_note"` OccurrenceCount int `json:"occurrence_count"` FirstSeenAt string `json:"first_seen_at"` LastSeenAt string `json:"last_seen_at"` ResolvedAt *string `json:"resolved_at"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` UpdatedAt string `json:"updated_at"` } type IncidentComment struct { ID string `json:"id"` IncidentID string `json:"incident_id"` CommentText string `json:"comment_text"` CreatedBy *string `json:"created_by"` CreatedAt string `json:"created_at"` } // ── Summary types ──────────────────────────────────────────────────────────── type GovernanceOverview struct { DatabaseCount int `json:"database_count"` TableCount int `json:"table_count"` ColumnCount int `json:"column_count"` TaggedTableCount int `json:"tagged_table_count"` UserCount int `json:"user_count"` RoleCount int `json:"role_count"` QueryCount24h int `json:"query_count_24h"` LineageEdgeCount int `json:"lineage_edge_count"` PolicyCount int `json:"policy_count"` ViolationCount int `json:"violation_count"` IncidentCount int `json:"incident_count"` SchemaChangeCount int `json:"schema_change_count"` SyncStates []SyncState `json:"sync_states"` RecentChanges []SchemaChange `json:"recent_changes"` RecentViolations []PolicyViolation `json:"recent_violations"` } type OverPermission struct { UserName string `json:"user_name"` RoleName *string `json:"role_name"` DatabaseName *string `json:"database_name"` TableName *string `json:"table_name"` Privilege string `json:"privilege"` LastQueryTime *string `json:"last_query_time"` DaysSinceQuery *int `json:"days_since_query"` Reason string `json:"reason"` } // ── Sync result types ──────────────────────────────────────────────────────── type SyncResult struct { MetadataResult *MetadataSyncResult `json:"metadata,omitempty"` MetadataError string `json:"metadata_error,omitempty"` QueryLogResult *QueryLogSyncResult `json:"query_log,omitempty"` QueryLogError string `json:"query_log_error,omitempty"` AccessResult *AccessSyncResult `json:"access,omitempty"` AccessError string `json:"access_error,omitempty"` } type MetadataSyncResult struct { DatabasesSynced int `json:"databases_synced"` TablesSynced int `json:"tables_synced"` ColumnsSynced int `json:"columns_synced"` SchemaChanges int `json:"schema_changes"` } type QueryLogSyncResult struct { QueriesIngested int `json:"queries_ingested"` LineageEdgesFound int `json:"lineage_edges_found"` ViolationsFound int `json:"violations_found"` NewWatermark string `json:"new_watermark"` } type AccessSyncResult struct { UsersSynced int `json:"users_synced"` RolesSynced int `json:"roles_synced"` GrantsSynced int `json:"grants_synced"` MatrixEntries int `json:"matrix_entries"` OverPermissions int `json:"over_permissions"` } // ── Credentials holder ─────────────────────────────────────────────────────── type CHCredentials struct { ConnectionID string User string Password string } // ── Lineage graph (for API response) ───────────────────────────────────────── type LineageNode struct { ID string `json:"id"` Database string `json:"database"` Table string `json:"table"` Type string `json:"type"` // "source", "target", "current" Columns []GovColumn `json:"columns,omitempty"` } type LineageGraph struct { Nodes []LineageNode `json:"nodes"` Edges []LineageEdge `json:"edges"` } // ── Helpers ────────────────────────────────────────────────────────────────── func StrPtr(s string) *string { if s == "" { return nil } return &s } ================================================ FILE: internal/langfuse/langfuse.go ================================================ package langfuse import ( "bytes" "encoding/json" "log/slog" "net/http" "strings" "sync" "time" "github.com/google/uuid" ) // Config holds Langfuse connection settings. type Config struct { PublicKey string SecretKey string BaseURL string } // Enabled returns true when both keys are set. func (c Config) Enabled() bool { return c.PublicKey != "" && c.SecretKey != "" } // NormalizeBaseURL ensures BaseURL has a sensible default and no trailing slash. func (c *Config) NormalizeBaseURL() { c.BaseURL = strings.TrimRight(strings.TrimSpace(c.BaseURL), "/") if c.BaseURL == "" { c.BaseURL = "https://cloud.langfuse.com" } } // Usage holds token counts for a generation. type Usage struct { Input int `json:"input"` Output int `json:"output"` Total int `json:"total"` } // TraceParams captures trace-level data. type TraceParams struct { ID string Name string UserID string SessionID string Input interface{} Output interface{} Release string Metadata map[string]string Tags []string } // GenerationParams captures one LLM generation. type GenerationParams struct { ID string TraceID string Name string Model string ModelParameters map[string]interface{} Input interface{} Output interface{} StartTime time.Time EndTime time.Time Usage *Usage Level string // "DEFAULT" or "ERROR" } // ScoreParams captures a score attached to a trace. type ScoreParams struct { TraceID string Name string Value float64 Comment string DataType string // "NUMERIC" or "BOOLEAN" } // EventParams captures a point-in-time event within a trace. type EventParams struct { TraceID string Name string Input interface{} Level string } type event struct { ID string `json:"id"` Type string `json:"type"` Timestamp string `json:"timestamp"` Body interface{} `json:"body"` } // Client sends observability events to Langfuse asynchronously. // Always non-nil — inactive when config is not enabled. type Client struct { mu sync.RWMutex cfg Config http *http.Client events chan event stopCh chan struct{} wg sync.WaitGroup } // New creates a Client. Always returns a valid pointer. // The client is inactive until Reconfigure is called with valid credentials. func New() *Client { return &Client{ http: &http.Client{Timeout: 10 * time.Second}, events: make(chan event, 256), stopCh: make(chan struct{}), } } // Reconfigure swaps the config at runtime. Safe to call while the client is running. func (c *Client) Reconfigure(cfg Config) { cfg.NormalizeBaseURL() c.mu.Lock() c.cfg = cfg c.mu.Unlock() if cfg.Enabled() { slog.Info("Langfuse observability configured", "base_url", cfg.BaseURL) } else { slog.Info("Langfuse observability disabled") } } // IsEnabled returns true if the client has valid credentials. func (c *Client) IsEnabled() bool { c.mu.RLock() defer c.mu.RUnlock() return c.cfg.Enabled() } func (c *Client) getConfig() Config { c.mu.RLock() defer c.mu.RUnlock() return c.cfg } // Start spawns the background flush goroutine. func (c *Client) Start() { c.wg.Add(1) go c.loop() } // Stop drains pending events and shuts down. func (c *Client) Stop() { close(c.stopCh) c.wg.Wait() slog.Info("Langfuse client stopped") } func (c *Client) loop() { defer c.wg.Done() ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() var buf []event for { select { case e := <-c.events: buf = append(buf, e) if len(buf) >= 10 { c.flush(buf) buf = buf[:0] } case <-ticker.C: if len(buf) > 0 { c.flush(buf) buf = buf[:0] } case <-c.stopCh: for { select { case e := <-c.events: buf = append(buf, e) default: c.flush(buf) return } } } } } func (c *Client) enqueue(e event) { if !c.IsEnabled() { return } select { case c.events <- e: default: slog.Warn("langfuse event dropped, channel full") } } func now() string { return time.Now().UTC().Format(time.RFC3339Nano) } func newID() string { return uuid.NewString() } // LogTrace enqueues a trace-create event. func (c *Client) LogTrace(p TraceParams) { id := p.ID if id == "" { id = newID() } body := map[string]interface{}{ "id": id, "name": p.Name, } if p.UserID != "" { body["userId"] = p.UserID } if p.SessionID != "" { body["sessionId"] = p.SessionID } if p.Input != nil { body["input"] = p.Input } if p.Output != nil { body["output"] = p.Output } if p.Release != "" { body["release"] = p.Release } if len(p.Metadata) > 0 { body["metadata"] = p.Metadata } if len(p.Tags) > 0 { body["tags"] = p.Tags } c.enqueue(event{ ID: id, Type: "trace-create", Timestamp: now(), Body: body, }) } // LogGeneration enqueues a generation-create event. func (c *Client) LogGeneration(p GenerationParams) { id := p.ID if id == "" { id = newID() } body := map[string]interface{}{ "id": id, "traceId": p.TraceID, "name": p.Name, "type": "GENERATION", "model": p.Model, "startTime": p.StartTime.UTC().Format(time.RFC3339Nano), "endTime": p.EndTime.UTC().Format(time.RFC3339Nano), } if len(p.ModelParameters) > 0 { body["modelParameters"] = p.ModelParameters } if p.Input != nil { body["input"] = p.Input } if p.Output != nil { body["output"] = p.Output } if p.Usage != nil { body["usage"] = p.Usage } if p.Level != "" { body["level"] = p.Level } c.enqueue(event{ ID: id, Type: "generation-create", Timestamp: now(), Body: body, }) } // LogScore enqueues a score-create event. func (c *Client) LogScore(p ScoreParams) { dataType := p.DataType if dataType == "" { dataType = "NUMERIC" } body := map[string]interface{}{ "traceId": p.TraceID, "name": p.Name, "value": p.Value, "dataType": dataType, } if p.Comment != "" { body["comment"] = p.Comment } c.enqueue(event{ ID: newID(), Type: "score-create", Timestamp: now(), Body: body, }) } // LogEvent enqueues an event-create for notable occurrences within a trace. func (c *Client) LogEvent(p EventParams) { body := map[string]interface{}{ "traceId": p.TraceID, "name": p.Name, } if p.Input != nil { body["input"] = p.Input } if p.Level != "" { body["level"] = p.Level } c.enqueue(event{ ID: newID(), Type: "event-create", Timestamp: now(), Body: body, }) } func (c *Client) flush(batch []event) { if len(batch) == 0 { return } cfg := c.getConfig() if !cfg.Enabled() { return } payload := map[string]interface{}{ "batch": batch, } body, err := json.Marshal(payload) if err != nil { slog.Warn("langfuse: failed to marshal batch", "error", err) return } req, err := http.NewRequest(http.MethodPost, cfg.BaseURL+"/api/public/ingestion", bytes.NewReader(body)) if err != nil { slog.Warn("langfuse: failed to create request", "error", err) return } req.SetBasicAuth(cfg.PublicKey, cfg.SecretKey) req.Header.Set("Content-Type", "application/json") resp, err := c.http.Do(req) if err != nil { slog.Warn("langfuse: flush failed", "error", err) return } defer resp.Body.Close() if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusMultiStatus { slog.Warn("langfuse: unexpected status", "status", resp.StatusCode) } } // TestConnection verifies credentials by calling the Langfuse API. // Returns nil on success, error with details on failure. func (c *Client) TestConnection(cfg Config) error { cfg.NormalizeBaseURL() req, err := http.NewRequest(http.MethodGet, cfg.BaseURL+"/api/public/projects", nil) if err != nil { return err } req.SetBasicAuth(cfg.PublicKey, cfg.SecretKey) resp, err := c.http.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusOK { return nil } return &ConnectionError{StatusCode: resp.StatusCode} } // ConnectionError represents a Langfuse API error. type ConnectionError struct { StatusCode int } func (e *ConnectionError) Error() string { switch e.StatusCode { case 401: return "invalid credentials" case 403: return "access denied" default: return "unexpected status: " + http.StatusText(e.StatusCode) } } ================================================ FILE: internal/license/license.go ================================================ package license import ( "crypto/ed25519" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "log/slog" "sort" "strings" "time" ) // LicenseFile is the on-disk JSON format for a signed license. type LicenseFile struct { LicenseID string `json:"license_id"` Edition string `json:"edition"` Customer string `json:"customer"` Features []string `json:"features"` MaxConnections int `json:"max_connections"` IssuedAt string `json:"issued_at"` ExpiresAt string `json:"expires_at"` Signature string `json:"signature"` } // LicenseInfo is the public-facing license status returned by the API. type LicenseInfo struct { Edition string `json:"edition"` Valid bool `json:"valid"` Customer string `json:"customer,omitempty"` ExpiresAt string `json:"expires_at,omitempty"` LicenseID string `json:"license_id,omitempty"` } // CommunityLicense returns the default community license info. func CommunityLicense() *LicenseInfo { return &LicenseInfo{ Edition: "community", Valid: false, } } // ValidateLicense parses and verifies a signed license JSON string. // Returns a LicenseInfo with Valid=true on success, or CommunityLicense() on any failure. func ValidateLicense(licenseJSON string) *LicenseInfo { if licenseJSON == "" { return CommunityLicense() } var lf LicenseFile if err := json.Unmarshal([]byte(licenseJSON), &lf); err != nil { slog.Warn("License parse error", "error", err) return CommunityLicense() } // Decode the embedded public key pub, err := parsePublicKey(publicKeyPEM) if err != nil { slog.Error("Failed to parse embedded public key", "error", err) return CommunityLicense() } // Rebuild the signable payload (all fields except signature) payload := SignablePayload(lf) // Decode and verify the signature sig, err := base64.StdEncoding.DecodeString(lf.Signature) if err != nil { slog.Warn("License signature decode error", "error", err) return CommunityLicense() } if !ed25519.Verify(pub, payload, sig) { slog.Warn("License signature verification failed") return CommunityLicense() } // Check expiry expires, err := time.Parse(time.RFC3339, lf.ExpiresAt) if err != nil { slog.Warn("License expiry parse error", "error", err) return CommunityLicense() } if expires.Before(time.Now()) { slog.Warn("License expired", "expires_at", lf.ExpiresAt) return &LicenseInfo{ Edition: strings.ToLower(strings.TrimSpace(lf.Edition)), Valid: false, Customer: lf.Customer, ExpiresAt: lf.ExpiresAt, LicenseID: lf.LicenseID, } } edition := strings.ToLower(strings.TrimSpace(lf.Edition)) slog.Debug("Pro license validated", "customer", lf.Customer, "expires", lf.ExpiresAt) return &LicenseInfo{ Edition: edition, Valid: true, Customer: lf.Customer, ExpiresAt: lf.ExpiresAt, LicenseID: lf.LicenseID, } } // SignablePayload returns the canonical JSON bytes for signature verification. // All fields except "signature", sorted by key, compact encoding. func SignablePayload(lf LicenseFile) []byte { m := map[string]interface{}{ "license_id": lf.LicenseID, "edition": lf.Edition, "customer": lf.Customer, "features": lf.Features, "max_connections": lf.MaxConnections, "issued_at": lf.IssuedAt, "expires_at": lf.ExpiresAt, } keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } sort.Strings(keys) buf := []byte("{") for i, k := range keys { if i > 0 { buf = append(buf, ',') } kb, _ := json.Marshal(k) vb, _ := json.Marshal(m[k]) buf = append(buf, kb...) buf = append(buf, ':') buf = append(buf, vb...) } buf = append(buf, '}') return buf } func parsePublicKey(pemData []byte) (ed25519.PublicKey, error) { block, _ := pem.Decode(pemData) if block == nil { return nil, fmt.Errorf("no PEM block found") } key, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { return nil, err } pub, ok := key.(ed25519.PublicKey) if !ok { return nil, fmt.Errorf("not an Ed25519 public key") } return pub, nil } ================================================ FILE: internal/license/pubkey.go ================================================ package license import _ "embed" //go:embed public.pem var publicKeyPEM []byte ================================================ FILE: internal/license/public.pem ================================================ -----BEGIN PUBLIC KEY----- MCowBQYDK2VwAyEA62CBTMWey4wS4Fknr/5Sfk7k1J7+4MYpBfxBPvKXRFg= -----END PUBLIC KEY----- ================================================ FILE: internal/license/tokens.go ================================================ package license import ( "crypto/rand" "encoding/hex" "regexp" "strings" "github.com/google/uuid" ) // GenerateTunnelToken generates a tunnel token with prefix 'cht_' func GenerateTunnelToken() string { b := make([]byte, 16) rand.Read(b) return "cht_" + hex.EncodeToString(b) } // GenerateSessionToken generates a session token func GenerateSessionToken() string { u1 := uuid.New().String() u2 := strings.ReplaceAll(uuid.New().String(), "-", "") return u1 + u2 } var tunnelTokenRegex = regexp.MustCompile(`^cht_[a-f0-9]{32}$`) // IsValidTunnelToken validates tunnel token format func IsValidTunnelToken(token string) bool { return tunnelTokenRegex.MatchString(token) } ================================================ FILE: internal/models/dag.go ================================================ package models import "fmt" // DepGraph is the resolved DAG of model dependencies. type DepGraph struct { // Order is the topological execution order (model IDs). Order []string // Deps maps model_id -> [dependency model_ids] (upstream). Deps map[string][]string // RevDeps maps model_id -> [dependent model_ids] (downstream). RevDeps map[string][]string } // BuildDAG constructs the dependency graph and returns topological order. // modelIDs: all model IDs. // refsByID: model_id -> [referenced model names from $ref()]. // nameToID: model_name -> model_id. func BuildDAG(modelIDs []string, refsByID map[string][]string, nameToID map[string]string) (*DepGraph, error) { g := &DepGraph{ Deps: make(map[string][]string), RevDeps: make(map[string][]string), } // Build in-degree map inDegree := make(map[string]int, len(modelIDs)) for _, id := range modelIDs { inDegree[id] = 0 } for id, refNames := range refsByID { for _, refName := range refNames { depID, ok := nameToID[refName] if !ok { return nil, fmt.Errorf("model references unknown model %q via $ref()", refName) } if depID == id { return nil, fmt.Errorf("model cannot reference itself via $ref(%s)", refName) } g.Deps[id] = append(g.Deps[id], depID) g.RevDeps[depID] = append(g.RevDeps[depID], id) inDegree[id]++ } } // Kahn's algorithm for topological sort var queue []string for _, id := range modelIDs { if inDegree[id] == 0 { queue = append(queue, id) } } var order []string for len(queue) > 0 { curr := queue[0] queue = queue[1:] order = append(order, curr) for _, downstream := range g.RevDeps[curr] { inDegree[downstream]-- if inDegree[downstream] == 0 { queue = append(queue, downstream) } } } if len(order) != len(modelIDs) { return nil, fmt.Errorf("cycle detected in model dependencies") } g.Order = order return g, nil } // ConnectedComponents returns groups of model IDs (independent pipelines). // Each group preserves topological order from g.Order. func (g *DepGraph) ConnectedComponents() [][]string { all := make(map[string]bool, len(g.Order)) for _, id := range g.Order { all[id] = true } visited := make(map[string]bool, len(g.Order)) var components [][]string for _, id := range g.Order { if visited[id] { continue } // BFS on undirected edges component := make(map[string]bool) queue := []string{id} for len(queue) > 0 { cur := queue[0] queue = queue[1:] if visited[cur] { continue } visited[cur] = true component[cur] = true for _, dep := range g.Deps[cur] { if !visited[dep] && all[dep] { queue = append(queue, dep) } } for _, rev := range g.RevDeps[cur] { if !visited[rev] && all[rev] { queue = append(queue, rev) } } } // Filter g.Order to preserve topological order var ordered []string for _, oid := range g.Order { if component[oid] { ordered = append(ordered, oid) } } components = append(components, ordered) } return components } // ComponentContaining returns the component that includes modelID, // preserving topological order from g.Order. func (g *DepGraph) ComponentContaining(modelID string) []string { for _, comp := range g.ConnectedComponents() { for _, id := range comp { if id == modelID { return comp } } } return nil } // GetUpstreamDeps returns the transitive upstream dependencies for a model ID. func GetUpstreamDeps(modelID string, deps map[string][]string) map[string]bool { visited := make(map[string]bool) var walk func(id string) walk = func(id string) { for _, depID := range deps[id] { if !visited[depID] { visited[depID] = true walk(depID) } } } walk(modelID) return visited } ================================================ FILE: internal/models/ref.go ================================================ package models import ( "fmt" "regexp" "strings" ) // stripSQLComments removes single-line (-- ...) and block (/* ... */) comments // so that $ref() inside comments is not treated as a real reference. func stripSQLComments(sql string) string { // Remove block comments first (non-greedy, handles multiline) blockRe := regexp.MustCompile(`(?s)/\*.*?\*/`) sql = blockRe.ReplaceAllString(sql, "") // Remove single-line comments lineRe := regexp.MustCompile(`--[^\n]*`) sql = lineRe.ReplaceAllString(sql, "") return sql } // refPattern matches $ref(model_name) in SQL. // Model names follow ClickHouse identifier rules: [a-zA-Z_][a-zA-Z0-9_]* var refPattern = regexp.MustCompile(`\$ref\(\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\)`) // modelNamePattern validates model names as valid ClickHouse identifiers. var modelNamePattern = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`) // ValidateModelName checks if a name is a valid ClickHouse identifier. func ValidateModelName(name string) error { if name == "" { return fmt.Errorf("model name cannot be empty") } if !modelNamePattern.MatchString(name) { return fmt.Errorf("model name %q must be a valid identifier (letters, digits, underscores, starting with letter or underscore)", name) } return nil } // ExtractRefs returns all model names referenced via $ref() in the SQL body. // $ref() occurrences inside SQL comments are ignored. func ExtractRefs(sqlBody string) []string { matches := refPattern.FindAllStringSubmatch(stripSQLComments(sqlBody), -1) seen := make(map[string]bool) var refs []string for _, m := range matches { name := strings.TrimSpace(m[1]) if !seen[name] { seen[name] = true refs = append(refs, name) } } return refs } // ResolveRefs replaces all $ref(model_name) with `target_database`.`model_name`. // modelTargets maps model_name -> target_database. // $ref() occurrences inside SQL comments are ignored (comments are stripped first). func ResolveRefs(sqlBody string, modelTargets map[string]string) (string, error) { sqlBody = stripSQLComments(sqlBody) var resolveErr error resolved := refPattern.ReplaceAllStringFunc(sqlBody, func(match string) string { sub := refPattern.FindStringSubmatch(match) if len(sub) < 2 { return match } name := strings.TrimSpace(sub[1]) db, ok := modelTargets[name] if !ok { resolveErr = fmt.Errorf("unresolved reference: $ref(%s)", name) return match } return fmt.Sprintf("`%s`.`%s`", db, name) }) return resolved, resolveErr } ================================================ FILE: internal/models/runner.go ================================================ package models import ( "fmt" "log/slog" "sync" "time" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // Runner executes model builds against ClickHouse. type Runner struct { db *database.DB gateway *tunnel.Gateway secret string mu sync.Mutex // prevents concurrent runs per connection running map[string]bool } // NewRunner creates a new model runner. func NewRunner(db *database.DB, gw *tunnel.Gateway, secret string) *Runner { return &Runner{ db: db, gateway: gw, secret: secret, running: make(map[string]bool), } } // RunAll executes all models for a connection in dependency order. func (r *Runner) RunAll(connectionID, triggeredBy string) (string, error) { if err := r.acquireLock(connectionID); err != nil { return "", err } defer r.releaseLock(connectionID) if !r.gateway.IsTunnelOnline(connectionID) { return "", fmt.Errorf("tunnel not connected") } user, password, err := r.findCredentials(connectionID) if err != nil { return "", fmt.Errorf("no credentials: %w", err) } allModels, err := r.db.GetModelsByConnection(connectionID) if err != nil { return "", fmt.Errorf("load models: %w", err) } if len(allModels) == 0 { return "", fmt.Errorf("no models defined") } dag, idToModel, modelTargets, err := r.buildDAG(allModels) if err != nil { return "", err } return r.execute(connectionID, triggeredBy, dag, idToModel, modelTargets, user, password) } // RunPipeline executes only the connected component containing anchorModelID. func (r *Runner) RunPipeline(connectionID, anchorModelID, triggeredBy string) (string, error) { if err := r.acquireLock(connectionID); err != nil { return "", err } defer r.releaseLock(connectionID) if !r.gateway.IsTunnelOnline(connectionID) { return "", fmt.Errorf("tunnel not connected") } user, password, err := r.findCredentials(connectionID) if err != nil { return "", fmt.Errorf("no credentials: %w", err) } allModels, err := r.db.GetModelsByConnection(connectionID) if err != nil { return "", fmt.Errorf("load models: %w", err) } if len(allModels) == 0 { return "", fmt.Errorf("no models defined") } dag, idToModel, modelTargets, err := r.buildDAG(allModels) if err != nil { return "", err } component := dag.ComponentContaining(anchorModelID) if len(component) == 0 { return "", fmt.Errorf("anchor model not found in DAG") } dag.Order = component return r.execute(connectionID, triggeredBy, dag, idToModel, modelTargets, user, password) } // RunSingle executes a single model and its upstream dependencies. func (r *Runner) RunSingle(connectionID, modelID, triggeredBy string) (string, error) { if err := r.acquireLock(connectionID); err != nil { return "", err } defer r.releaseLock(connectionID) if !r.gateway.IsTunnelOnline(connectionID) { return "", fmt.Errorf("tunnel not connected") } user, password, err := r.findCredentials(connectionID) if err != nil { return "", fmt.Errorf("no credentials: %w", err) } allModels, err := r.db.GetModelsByConnection(connectionID) if err != nil { return "", fmt.Errorf("load models: %w", err) } dag, idToModel, modelTargets, err := r.buildDAG(allModels) if err != nil { return "", err } // Filter to only the target model and its upstream deps upstream := GetUpstreamDeps(modelID, dag.Deps) upstream[modelID] = true var filteredIDs []string for _, id := range dag.Order { if upstream[id] { filteredIDs = append(filteredIDs, id) } } dag.Order = filteredIDs return r.execute(connectionID, triggeredBy, dag, idToModel, modelTargets, user, password) } // Validate checks all models for reference errors and cycles. func (r *Runner) Validate(connectionID string) ([]ValidationError, error) { allModels, err := r.db.GetModelsByConnection(connectionID) if err != nil { return nil, fmt.Errorf("load models: %w", err) } if len(allModels) == 0 { return nil, nil } nameToID := make(map[string]string) for _, m := range allModels { nameToID[m.Name] = m.ID } var errors []ValidationError refsByID := make(map[string][]string) for _, m := range allModels { refs := ExtractRefs(m.SQLBody) refsByID[m.ID] = refs for _, ref := range refs { if _, ok := nameToID[ref]; !ok { errors = append(errors, ValidationError{ ModelID: m.ID, ModelName: m.Name, Error: fmt.Sprintf("references unknown model %q via $ref()", ref), }) } if nameToID[ref] == m.ID { errors = append(errors, ValidationError{ ModelID: m.ID, ModelName: m.Name, Error: fmt.Sprintf("cannot reference itself via $ref(%s)", ref), }) } } } if len(errors) > 0 { return errors, nil } // Check for cycles var modelIDs []string for _, m := range allModels { modelIDs = append(modelIDs, m.ID) } _, dagErr := BuildDAG(modelIDs, refsByID, nameToID) if dagErr != nil { errors = append(errors, ValidationError{ Error: dagErr.Error(), }) } return errors, nil } // ValidationError represents a validation problem. type ValidationError struct { ModelID string `json:"model_id,omitempty"` ModelName string `json:"model_name,omitempty"` Error string `json:"error"` } // ── Internal helpers ──────────────────────────────────────────────── func (r *Runner) buildDAG(allModels []database.Model) (*DepGraph, map[string]database.Model, map[string]string, error) { nameToID := make(map[string]string) idToModel := make(map[string]database.Model) modelTargets := make(map[string]string) var modelIDs []string refsByID := make(map[string][]string) for _, m := range allModels { nameToID[m.Name] = m.ID idToModel[m.ID] = m modelTargets[m.Name] = m.TargetDatabase modelIDs = append(modelIDs, m.ID) refsByID[m.ID] = ExtractRefs(m.SQLBody) } dag, err := BuildDAG(modelIDs, refsByID, nameToID) if err != nil { return nil, nil, nil, fmt.Errorf("build DAG: %w", err) } return dag, idToModel, modelTargets, nil } func (r *Runner) execute(connectionID, triggeredBy string, dag *DepGraph, idToModel map[string]database.Model, modelTargets map[string]string, user, password string) (string, error) { runID, err := r.db.CreateModelRun(connectionID, len(dag.Order), triggeredBy) if err != nil { return "", fmt.Errorf("create run: %w", err) } // Create pending result records for _, id := range dag.Order { m := idToModel[id] if _, err := r.db.CreateModelRunResult(runID, m.ID, m.Name); err != nil { slog.Error("Failed to create run result", "model", m.Name, "error", err) } } // Execute in topological order failed := make(map[string]bool) var succeeded, failedCount, skipped int for _, id := range dag.Order { m := idToModel[id] // Skip if any upstream dependency failed shouldSkip := false for _, depID := range dag.Deps[id] { if failed[depID] { shouldSkip = true break } } if shouldSkip { skipped++ failed[id] = true r.db.UpdateModelRunResult(runID, id, "skipped", "", 0, "upstream dependency failed") r.db.UpdateModelStatus(id, "error", "upstream dependency failed") continue } // Resolve $ref() resolvedSQL, resolveErr := ResolveRefs(m.SQLBody, modelTargets) if resolveErr != nil { failedCount++ failed[id] = true r.db.UpdateModelRunResult(runID, id, "error", resolvedSQL, 0, resolveErr.Error()) r.db.UpdateModelStatus(id, "error", resolveErr.Error()) continue } // Mark as running r.db.UpdateModelRunResult(runID, id, "running", "", 0, "") // Build and execute DDL stmts := buildDDL(m, resolvedSQL) start := time.Now() var execErr error for _, stmt := range stmts { _, execErr = r.gateway.ExecuteQuery(connectionID, stmt, user, password, 5*time.Minute) if execErr != nil { break } } elapsed := time.Since(start).Milliseconds() ddlForLog := stmts[len(stmts)-1] // log the main statement if execErr != nil { failedCount++ failed[id] = true r.db.UpdateModelRunResult(runID, id, "error", ddlForLog, elapsed, execErr.Error()) r.db.UpdateModelStatus(id, "error", execErr.Error()) slog.Error("Model execution failed", "model", m.Name, "error", execErr) } else { succeeded++ r.db.UpdateModelRunResult(runID, id, "success", ddlForLog, elapsed, "") r.db.UpdateModelStatus(id, "success", "") } } // Finalize run runStatus := "success" if failedCount > 0 && succeeded > 0 { runStatus = "partial" } else if failedCount > 0 || skipped == len(dag.Order) { runStatus = "error" } r.db.FinalizeModelRun(runID, runStatus, succeeded, failedCount, skipped) return runID, nil } // buildDDL generates the DDL statement(s) for a model. // Returns a slice because TABLE needs DROP + CREATE as separate statements. func buildDDL(m database.Model, resolvedSQL string) []string { switch m.Materialization { case "table": drop := fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", m.TargetDatabase, m.Name) create := fmt.Sprintf("CREATE TABLE `%s`.`%s` ENGINE = %s ORDER BY %s AS %s", m.TargetDatabase, m.Name, m.TableEngine, m.OrderBy, resolvedSQL) return []string{drop, create} default: // view return []string{ fmt.Sprintf("CREATE OR REPLACE VIEW `%s`.`%s` AS %s", m.TargetDatabase, m.Name, resolvedSQL), } } } func (r *Runner) acquireLock(connectionID string) error { r.mu.Lock() defer r.mu.Unlock() if r.running[connectionID] { return fmt.Errorf("a model run is already in progress for this connection") } r.running[connectionID] = true return nil } func (r *Runner) releaseLock(connectionID string) { r.mu.Lock() defer r.mu.Unlock() delete(r.running, connectionID) } func (r *Runner) findCredentials(connectionID string) (string, string, error) { sessions, err := r.db.GetActiveSessionsByConnection(connectionID, 3) if err != nil { return "", "", fmt.Errorf("failed to load sessions: %w", err) } for _, s := range sessions { password, err := crypto.Decrypt(s.EncryptedPassword, r.secret) if err != nil { continue } return s.ClickhouseUser, password, nil } return "", "", fmt.Errorf("no active sessions with valid credentials for connection %s", connectionID) } ================================================ FILE: internal/models/scheduler.go ================================================ package models import ( "log/slog" "time" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/scheduler" ) const modelTickInterval = 30 * time.Second // Scheduler checks for due model schedules and triggers RunAll. type Scheduler struct { db *database.DB runner *Runner stopCh chan struct{} } // NewScheduler creates a new model scheduler. func NewScheduler(db *database.DB, runner *Runner) *Scheduler { return &Scheduler{ db: db, runner: runner, stopCh: make(chan struct{}), } } // Start begins the scheduler goroutine. func (s *Scheduler) Start() { go func() { slog.Info("Model scheduler started", "interval", modelTickInterval) ticker := time.NewTicker(modelTickInterval) defer ticker.Stop() for { select { case <-s.stopCh: slog.Info("Model scheduler stopped") return case <-ticker.C: s.tick() } } }() } // Stop signals the scheduler goroutine to stop. func (s *Scheduler) Stop() { close(s.stopCh) } func (s *Scheduler) tick() { schedules, err := s.db.GetEnabledModelSchedules() if err != nil { slog.Error("Failed to load enabled model schedules", "error", err) return } now := time.Now().UTC() for _, sched := range schedules { if sched.NextRunAt == nil || sched.AnchorModelID == nil { continue } nextRun, err := time.Parse(time.RFC3339, *sched.NextRunAt) if err != nil { continue } if nextRun.After(now) { continue } slog.Info("Model schedule triggered", "connection_id", sched.ConnectionID, "anchor_model_id", *sched.AnchorModelID, "cron", sched.Cron) status := "success" var runError string _, runErr := s.runner.RunPipeline(sched.ConnectionID, *sched.AnchorModelID, "scheduler") if runErr != nil { status = "error" runError = runErr.Error() slog.Error("Scheduled model run failed", "connection_id", sched.ConnectionID, "anchor_model_id", *sched.AnchorModelID, "error", runErr) } // Compute next run and update status by schedule ID var nextRunAt *string if next := scheduler.ComputeNextRun(sched.Cron, time.Now().UTC()); next != nil { formatted := next.Format(time.RFC3339) nextRunAt = &formatted } if err := s.db.UpdateModelScheduleStatusByID(sched.ID, status, runError, nextRunAt); err != nil { slog.Error("Failed to update model schedule status", "schedule_id", sched.ID, "error", err) } } } ================================================ FILE: internal/pipelines/clickhouse_sink.go ================================================ package pipelines import ( "context" "encoding/json" "fmt" "log/slog" "sort" "strings" "sync" "time" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // ClickHouseSink writes batches to a ClickHouse table via the tunnel gateway. type ClickHouseSink struct { gateway *tunnel.Gateway db *database.DB secretKey string tableOnce sync.Once tableErr error } // NewClickHouseSink creates a new ClickHouse sink connector. func NewClickHouseSink(gw *tunnel.Gateway, db *database.DB, secretKey string) *ClickHouseSink { return &ClickHouseSink{ gateway: gw, db: db, secretKey: secretKey, } } func (s *ClickHouseSink) Type() string { return "sink_clickhouse" } // Validate checks the sink configuration. func (s *ClickHouseSink) Validate(cfg ConnectorConfig) error { db, _ := cfg.Fields["database"].(string) table, _ := cfg.Fields["table"].(string) if db == "" { return fmt.Errorf("database is required") } if table == "" { return fmt.Errorf("table is required") } return nil } // WriteBatch inserts a batch of records into the ClickHouse table using INSERT FORMAT JSONEachRow. func (s *ClickHouseSink) WriteBatch(ctx context.Context, cfg ConnectorConfig, batch Batch) (int, error) { if len(batch.Records) == 0 { return 0, nil } // Auto-create table on first batch if configured if boolField(cfg.Fields, "create_table", false) { s.tableOnce.Do(func() { s.tableErr = s.ensureTable(ctx, cfg, batch) }) if s.tableErr != nil { return 0, fmt.Errorf("ensure table: %w", s.tableErr) } } db, _ := cfg.Fields["database"].(string) table, _ := cfg.Fields["table"].(string) // Build JSONEachRow payload var sb strings.Builder for _, rec := range batch.Records { if len(rec.RawJSON) > 0 { sb.Write(rec.RawJSON) } else { raw, err := json.Marshal(rec.Data) if err != nil { return 0, fmt.Errorf("marshal record: %w", err) } sb.Write(raw) } sb.WriteByte('\n') } query := fmt.Sprintf("INSERT INTO `%s`.`%s` FORMAT JSONEachRow\n%s", db, table, sb.String()) // Find credentials from the pipeline's connection connectionID, _ := cfg.Fields["connection_id"].(string) if connectionID == "" { return 0, fmt.Errorf("no connection_id in sink config") } user, password, err := s.findCredentials(connectionID) if err != nil { return 0, fmt.Errorf("find credentials: %w", err) } _, execErr := s.gateway.ExecuteQuery(connectionID, query, user, password, 30*time.Second) if execErr != nil { return 0, fmt.Errorf("execute insert: %w", execErr) } return len(batch.Records), nil } // ensureTable creates the target table if it doesn't exist, inferring schema from the first batch. func (s *ClickHouseSink) ensureTable(ctx context.Context, cfg ConnectorConfig, batch Batch) error { db := stringField(cfg.Fields, "database", "default") table := stringField(cfg.Fields, "table", "") engine := stringField(cfg.Fields, "create_table_engine", "MergeTree") orderBy := stringField(cfg.Fields, "create_table_order_by", "tuple()") if table == "" { return fmt.Errorf("table name is required for auto-creation") } if orderBy == "" { orderBy = "tuple()" } // Infer columns from first record if len(batch.Records) == 0 { return fmt.Errorf("cannot infer schema from empty batch") } data := batch.Records[0].Data if len(data) == 0 { return fmt.Errorf("cannot infer schema from empty record") } // Collect column names sorted for deterministic output colNames := make([]string, 0, len(data)) for k := range data { colNames = append(colNames, k) } sort.Strings(colNames) // Build column definitions var cols []string for _, name := range colNames { chType := inferClickHouseType(data[name]) cols = append(cols, fmt.Sprintf("`%s` %s", name, chType)) } ddl := fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s`.`%s` (\n %s\n) ENGINE = %s\nORDER BY %s", db, table, strings.Join(cols, ",\n "), engine, orderBy) connectionID, _ := cfg.Fields["connection_id"].(string) if connectionID == "" { return fmt.Errorf("no connection_id in sink config") } user, password, err := s.findCredentials(connectionID) if err != nil { return fmt.Errorf("find credentials: %w", err) } _, execErr := s.gateway.ExecuteQuery(connectionID, ddl, user, password, 30*time.Second) if execErr != nil { return fmt.Errorf("execute CREATE TABLE: %w", execErr) } slog.Info("Auto-created ClickHouse table", "database", db, "table", table, "engine", engine, "columns", len(cols)) return nil } // inferClickHouseType maps a Go/JSON value to a ClickHouse column type. func inferClickHouseType(v interface{}) string { switch v.(type) { case string: return "String" case float64: return "Float64" case bool: return "UInt8" case nil: return "Nullable(String)" default: return "String" } } // findCredentials retrieves ClickHouse credentials from active sessions. func (s *ClickHouseSink) findCredentials(connectionID string) (string, string, error) { sessions, err := s.db.GetActiveSessionsByConnection(connectionID, 3) if err != nil { return "", "", fmt.Errorf("failed to load sessions: %w", err) } for _, sess := range sessions { password, err := crypto.Decrypt(sess.EncryptedPassword, s.secretKey) if err != nil { continue } return sess.ClickhouseUser, password, nil } return "", "", fmt.Errorf("no active sessions with valid credentials for connection %s", connectionID) } ================================================ FILE: internal/pipelines/database_source.go ================================================ package pipelines import ( "context" "database/sql" "encoding/json" "fmt" "log/slog" "time" _ "github.com/go-sql-driver/mysql" _ "github.com/lib/pq" _ "modernc.org/sqlite" ) // DatabaseSource polls a PostgreSQL or MySQL database for new rows. type DatabaseSource struct{} func (d *DatabaseSource) Type() string { return "source_database" } // Validate checks database source configuration. func (d *DatabaseSource) Validate(cfg ConnectorConfig) error { dbType := stringField(cfg.Fields, "db_type", "") connStr := stringField(cfg.Fields, "connection_string", "") query := stringField(cfg.Fields, "query", "") if dbType == "" { return fmt.Errorf("db_type is required (postgres, mysql, or sqlite)") } if dbType != "postgres" && dbType != "mysql" && dbType != "sqlite" { return fmt.Errorf("db_type must be 'postgres', 'mysql', or 'sqlite'") } if connStr == "" { return fmt.Errorf("connection_string is required") } if query == "" { return fmt.Errorf("query is required") } return nil } // Start begins polling the source database and sends batches to the output channel. func (d *DatabaseSource) Start(ctx context.Context, cfg ConnectorConfig, out chan<- Batch) error { dbType := stringField(cfg.Fields, "db_type", "") connStr := stringField(cfg.Fields, "connection_string", "") query := stringField(cfg.Fields, "query", "") pollIntervalSec := intField(cfg.Fields, "poll_interval", 60) watermarkCol := stringField(cfg.Fields, "watermark_column", "") batchSize := intField(cfg.Fields, "batch_size", 1000) // Map db_type to driver name driver := dbType if dbType == "postgres" { driver = "postgres" } db, err := sql.Open(driver, connStr) if err != nil { return fmt.Errorf("open database: %w", err) } defer db.Close() db.SetMaxOpenConns(2) db.SetMaxIdleConns(1) db.SetConnMaxLifetime(5 * time.Minute) if err := db.PingContext(ctx); err != nil { return fmt.Errorf("ping database: %w", err) } slog.Info("Database source started", "type", dbType, "poll_interval", pollIntervalSec) var watermark interface{} ticker := time.NewTicker(time.Duration(pollIntervalSec) * time.Second) defer ticker.Stop() poll := func() error { var rows *sql.Rows var queryErr error if watermarkCol != "" && watermark != nil { rows, queryErr = db.QueryContext(ctx, query, watermark) } else { rows, queryErr = db.QueryContext(ctx, query) } if queryErr != nil { return fmt.Errorf("query: %w", queryErr) } defer rows.Close() columns, err := rows.Columns() if err != nil { return fmt.Errorf("get columns: %w", err) } var buf []Record for rows.Next() { values := make([]interface{}, len(columns)) valuePtrs := make([]interface{}, len(columns)) for i := range values { valuePtrs[i] = &values[i] } if err := rows.Scan(valuePtrs...); err != nil { slog.Warn("Database source row scan error", "error", err) continue } data := make(map[string]interface{}) for i, col := range columns { val := values[i] // Convert []byte to string for JSON compatibility if b, ok := val.([]byte); ok { data[col] = string(b) } else { data[col] = val } } // Update watermark if watermarkCol != "" { if wv, ok := data[watermarkCol]; ok { watermark = wv } } raw, _ := json.Marshal(data) buf = append(buf, Record{ Data: data, RawJSON: raw, }) if len(buf) >= batchSize { select { case out <- Batch{Records: buf, SourceTS: time.Now()}: case <-ctx.Done(): return nil } buf = nil } } if err := rows.Err(); err != nil { return fmt.Errorf("rows iteration: %w", err) } // Flush remaining if len(buf) > 0 { select { case out <- Batch{Records: buf, SourceTS: time.Now()}: case <-ctx.Done(): return nil } } return nil } // First poll if err := poll(); err != nil { slog.Error("Database source poll error", "error", err) } for { select { case <-ctx.Done(): return nil case <-ticker.C: if err := poll(); err != nil { slog.Error("Database source poll error", "error", err) } } } } ================================================ FILE: internal/pipelines/helpers.go ================================================ package pipelines // intField extracts an int from a config map with a default fallback. func intField(fields map[string]interface{}, key string, def int) int { v, ok := fields[key] if !ok { return def } switch n := v.(type) { case float64: return int(n) case int: return n case int64: return int(n) default: return def } } // stringField extracts a string from a config map with a default fallback. func stringField(fields map[string]interface{}, key, def string) string { v, ok := fields[key] if !ok { return def } s, ok := v.(string) if !ok { return def } return s } // boolField extracts a bool from a config map with a default fallback. func boolField(fields map[string]interface{}, key string, def bool) bool { v, ok := fields[key] if !ok { return def } b, ok := v.(bool) if !ok { return def } return b } ================================================ FILE: internal/pipelines/kafka.go ================================================ package pipelines import ( "context" "crypto/tls" "encoding/json" "fmt" "log/slog" "strings" "time" "github.com/IBM/sarama" ) // KafkaSource consumes messages from a Kafka topic using a consumer group. type KafkaSource struct{} func (k *KafkaSource) Type() string { return "source_kafka" } // Validate checks Kafka configuration. func (k *KafkaSource) Validate(cfg ConnectorConfig) error { brokers, _ := cfg.Fields["brokers"].(string) topic, _ := cfg.Fields["topic"].(string) if brokers == "" { return fmt.Errorf("brokers is required") } if topic == "" { return fmt.Errorf("topic is required") } return nil } // Start begins consuming messages from Kafka and sends batches to the output channel. func (k *KafkaSource) Start(ctx context.Context, cfg ConnectorConfig, out chan<- Batch) error { brokers := strings.Split(stringField(cfg.Fields, "brokers", ""), ",") topic := stringField(cfg.Fields, "topic", "") group := stringField(cfg.Fields, "consumer_group", "ch-ui-pipeline") batchSize := intField(cfg.Fields, "batch_size", 500) batchTimeoutMs := intField(cfg.Fields, "batch_timeout_ms", 5000) config := sarama.NewConfig() config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} config.Consumer.Offsets.Initial = sarama.OffsetNewest config.Version = sarama.V2_6_0_0 // SASL configuration saslMechanism := stringField(cfg.Fields, "sasl_mechanism", "") if saslMechanism != "" { config.Net.SASL.Enable = true config.Net.SASL.User = stringField(cfg.Fields, "sasl_username", "") config.Net.SASL.Password = stringField(cfg.Fields, "sasl_password", "") switch strings.ToUpper(saslMechanism) { case "PLAIN": config.Net.SASL.Mechanism = sarama.SASLTypePlaintext case "SCRAM-SHA-256": config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256 config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &scramClient{HashGeneratorFcn: SHA256} } case "SCRAM-SHA-512": config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &scramClient{HashGeneratorFcn: SHA512} } } } // TLS if boolField(cfg.Fields, "use_tls", false) { config.Net.TLS.Enable = true config.Net.TLS.Config = &tls.Config{ MinVersion: tls.VersionTLS12, } } // Create consumer group client, err := sarama.NewConsumerGroup(brokers, group, config) if err != nil { return fmt.Errorf("create kafka consumer group: %w", err) } defer client.Close() handler := &kafkaGroupHandler{ batchSize: batchSize, batchTimeoutMs: batchTimeoutMs, out: out, } slog.Info("Kafka source started", "brokers", brokers, "topic", topic, "group", group) for { if ctx.Err() != nil { return nil } if err := client.Consume(ctx, []string{topic}, handler); err != nil { return fmt.Errorf("kafka consume: %w", err) } } } // kafkaGroupHandler implements sarama.ConsumerGroupHandler. type kafkaGroupHandler struct { batchSize int batchTimeoutMs int out chan<- Batch } func (h *kafkaGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } func (h *kafkaGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } func (h *kafkaGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { var buf []Record ticker := time.NewTicker(time.Duration(h.batchTimeoutMs) * time.Millisecond) defer ticker.Stop() flush := func() { if len(buf) == 0 { return } batch := Batch{ Records: buf, SourceTS: time.Now(), } select { case h.out <- batch: case <-session.Context().Done(): return } buf = nil } for { select { case <-session.Context().Done(): flush() return nil case msg, ok := <-claim.Messages(): if !ok { flush() return nil } var data map[string]interface{} if err := json.Unmarshal(msg.Value, &data); err != nil { slog.Warn("Kafka message parse error, wrapping as raw", "error", err, "offset", msg.Offset) data = map[string]interface{}{ "_raw": string(msg.Value), "_topic": msg.Topic, "_partition": msg.Partition, "_offset": msg.Offset, "_timestamp": msg.Timestamp.UTC().Format(time.RFC3339), } } buf = append(buf, Record{ Data: data, RawJSON: msg.Value, }) session.MarkMessage(msg, "") if len(buf) >= h.batchSize { flush() } case <-ticker.C: flush() } } } ================================================ FILE: internal/pipelines/kafka_scram.go ================================================ package pipelines import ( "crypto/sha256" "crypto/sha512" "hash" "github.com/xdg-go/scram" ) // SHA256 and SHA512 hash generators for SCRAM authentication. var ( SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } ) // scramClient implements sarama.SCRAMClient using xdg-go/scram. type scramClient struct { *scram.ClientConversation scram.HashGeneratorFcn } func (c *scramClient) Begin(userName, password, authzID string) (err error) { client, err := c.HashGeneratorFcn.NewClient(userName, password, authzID) if err != nil { return err } c.ClientConversation = client.NewConversation() return nil } func (c *scramClient) Step(challenge string) (string, error) { return c.ClientConversation.Step(challenge) } func (c *scramClient) Done() bool { return c.ClientConversation.Done() } ================================================ FILE: internal/pipelines/registry.go ================================================ package pipelines import "fmt" // NewSource returns a SourceConnector for the given node type. func NewSource(nodeType string) (SourceConnector, error) { switch nodeType { case "source_kafka": return &KafkaSource{}, nil case "source_webhook": return &WebhookSource{}, nil case "source_database": return &DatabaseSource{}, nil case "source_s3": return &S3Source{}, nil default: return nil, fmt.Errorf("unknown source type: %s", nodeType) } } ================================================ FILE: internal/pipelines/runner.go ================================================ package pipelines import ( "context" "encoding/json" "fmt" "log/slog" "sync" "time" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // RunningPipeline is the runtime state of a single active pipeline. type RunningPipeline struct { PipelineID string RunID string Cancel context.CancelFunc Metrics *Metrics StartedAt time.Time Done chan struct{} } // Runner manages the lifecycle of all running pipelines. type Runner struct { db *database.DB gateway *tunnel.Gateway cfg *config.Config mu sync.RWMutex pipelines map[string]*RunningPipeline stopCh chan struct{} } // NewRunner creates a new pipeline runner. func NewRunner(db *database.DB, gw *tunnel.Gateway, cfg *config.Config) *Runner { return &Runner{ db: db, gateway: gw, cfg: cfg, pipelines: make(map[string]*RunningPipeline), stopCh: make(chan struct{}), } } // Start resumes any pipelines that were in "running" status (crash recovery). func (r *Runner) Start() { go func() { pipelines, err := r.db.GetPipelinesByStatus("running") if err != nil { slog.Error("Failed to load running pipelines for recovery", "error", err) return } for _, p := range pipelines { if err := r.StartPipeline(p.ID); err != nil { slog.Error("Failed to resume pipeline", "error", err, "pipeline", p.ID) r.db.UpdatePipelineStatus(p.ID, "error", err.Error()) } } if len(pipelines) > 0 { slog.Info("Pipeline runner started", "resumed", len(pipelines)) } }() } // Stop gracefully stops all running pipelines. func (r *Runner) Stop() { close(r.stopCh) r.mu.RLock() for _, rp := range r.pipelines { rp.Cancel() } r.mu.RUnlock() // Wait for all to finish with timeout timer := time.NewTimer(30 * time.Second) defer timer.Stop() r.mu.RLock() for _, rp := range r.pipelines { select { case <-rp.Done: case <-timer.C: slog.Warn("Timeout waiting for pipeline to stop", "pipeline", rp.PipelineID) } } r.mu.RUnlock() } // StartPipeline starts a single pipeline by ID. func (r *Runner) StartPipeline(pipelineID string) error { r.mu.Lock() if _, exists := r.pipelines[pipelineID]; exists { r.mu.Unlock() return fmt.Errorf("pipeline %s is already running", pipelineID) } r.mu.Unlock() // Load pipeline + graph from DB pipeline, err := r.db.GetPipelineByID(pipelineID) if err != nil || pipeline == nil { return fmt.Errorf("pipeline not found: %s", pipelineID) } nodes, edges, err := r.db.GetPipelineGraph(pipelineID) if err != nil { return fmt.Errorf("load pipeline graph: %w", err) } // Find source and sink nodes var sourceNode *database.PipelineNode var sinkNode *database.PipelineNode for i := range nodes { switch { case isSourceType(nodes[i].NodeType): if sourceNode != nil { return fmt.Errorf("pipeline has multiple source nodes") } sourceNode = &nodes[i] case nodes[i].NodeType == "sink_clickhouse": if sinkNode != nil { return fmt.Errorf("pipeline has multiple sink nodes") } sinkNode = &nodes[i] } } if sourceNode == nil { return fmt.Errorf("pipeline has no source node") } if sinkNode == nil { return fmt.Errorf("pipeline has no sink node") } // Validate that source connects to sink connected := false for _, e := range edges { if e.SourceNodeID == sourceNode.ID && e.TargetNodeID == sinkNode.ID { connected = true break } } if !connected { return fmt.Errorf("source node is not connected to sink node") } // Parse node configs sourceCfg, err := parseNodeConfig(sourceNode) if err != nil { return fmt.Errorf("parse source config: %w", err) } sinkCfg, err := parseNodeConfig(sinkNode) if err != nil { return fmt.Errorf("parse sink config: %w", err) } // Inject runtime fields into configs sinkCfg.Fields["connection_id"] = pipeline.ConnectionID sourceCfg.Fields["pipeline_id"] = pipelineID // Instantiate connectors source, err := NewSource(sourceCfg.NodeType) if err != nil { return fmt.Errorf("create source connector: %w", err) } sink := NewClickHouseSink(r.gateway, r.db, r.cfg.AppSecretKey) // Validate configs if err := source.Validate(sourceCfg); err != nil { return fmt.Errorf("validate source config: %w", err) } if err := sink.Validate(sinkCfg); err != nil { return fmt.Errorf("validate sink config: %w", err) } // Create run record runID, err := r.db.CreatePipelineRun(pipelineID, "running") if err != nil { return fmt.Errorf("create pipeline run: %w", err) } // Update pipeline status r.db.UpdatePipelineStatus(pipelineID, "running", "") // Launch goroutine ctx, cancel := context.WithCancel(context.Background()) metrics := &Metrics{} done := make(chan struct{}) rp := &RunningPipeline{ PipelineID: pipelineID, RunID: runID, Cancel: cancel, Metrics: metrics, StartedAt: time.Now(), Done: done, } r.mu.Lock() r.pipelines[pipelineID] = rp r.mu.Unlock() go r.runPipeline(ctx, rp, source, sink, sourceCfg, sinkCfg, pipeline.ConnectionID) r.db.CreatePipelineRunLog(runID, "info", "Pipeline started") slog.Info("Pipeline started", "pipeline", pipelineID, "source", sourceCfg.NodeType, "run", runID) return nil } // StopPipeline stops a running pipeline. func (r *Runner) StopPipeline(pipelineID string) error { r.mu.RLock() rp, exists := r.pipelines[pipelineID] r.mu.RUnlock() if !exists { // Pipeline might not be running in-memory, just update DB status r.db.UpdatePipelineStatus(pipelineID, "stopped", "") return nil } rp.Cancel() // Wait for goroutine to finish with timeout select { case <-rp.Done: case <-time.After(15 * time.Second): slog.Warn("Timeout waiting for pipeline to stop", "pipeline", pipelineID) } return nil } // GetRunningMetrics returns metrics for a running pipeline. func (r *Runner) GetRunningMetrics(pipelineID string) *Metrics { r.mu.RLock() rp, exists := r.pipelines[pipelineID] r.mu.RUnlock() if !exists { return nil } return rp.Metrics } // runPipeline is the main execution loop for a single pipeline. func (r *Runner) runPipeline(ctx context.Context, rp *RunningPipeline, source SourceConnector, sink SinkConnector, sourceCfg, sinkCfg ConnectorConfig, connectionID string) { defer close(rp.Done) defer func() { r.mu.Lock() delete(r.pipelines, rp.PipelineID) r.mu.Unlock() }() batchCh := make(chan Batch, 10) var sourceErr error // Start source in a goroutine go func() { sourceErr = source.Start(ctx, sourceCfg, batchCh) close(batchCh) }() // Consume batches and write to sink for batch := range batchCh { select { case <-ctx.Done(): goto done default: } rows, err := sink.WriteBatch(ctx, sinkCfg, batch) if err != nil { rp.Metrics.ErrorsCount.Add(1) r.db.CreatePipelineRunLog(rp.RunID, "error", fmt.Sprintf("Write batch failed: %v", err)) slog.Error("Pipeline batch write failed", "pipeline", rp.PipelineID, "error", err) continue } rp.Metrics.RowsIngested.Add(int64(rows)) rp.Metrics.BatchesSent.Add(1) rp.Metrics.LastBatchAt.Store(time.Now()) // Estimate bytes from raw JSON for _, rec := range batch.Records { rp.Metrics.BytesIngested.Add(int64(len(rec.RawJSON))) } } done: // Finalize status := "success" errMsg := "" if sourceErr != nil && ctx.Err() == nil { status = "error" errMsg = sourceErr.Error() } else if ctx.Err() != nil { status = "stopped" } r.db.UpdatePipelineRun( rp.RunID, status, rp.Metrics.RowsIngested.Load(), rp.Metrics.BytesIngested.Load(), rp.Metrics.ErrorsCount.Load(), errMsg, "{}", ) r.db.UpdatePipelineStatus(rp.PipelineID, status, errMsg) r.db.CreatePipelineRunLog(rp.RunID, "info", fmt.Sprintf("Pipeline %s (rows: %d, errors: %d)", status, rp.Metrics.RowsIngested.Load(), rp.Metrics.ErrorsCount.Load())) slog.Info("Pipeline finished", "pipeline", rp.PipelineID, "status", status, "rows", rp.Metrics.RowsIngested.Load()) } // Helper functions func isSourceType(nodeType string) bool { switch nodeType { case "source_kafka", "source_webhook", "source_database", "source_s3": return true } return false } func parseNodeConfig(node *database.PipelineNode) (ConnectorConfig, error) { var fields map[string]interface{} if err := json.Unmarshal([]byte(node.ConfigEncrypted), &fields); err != nil { return ConnectorConfig{}, fmt.Errorf("unmarshal node config: %w", err) } return ConnectorConfig{ NodeType: node.NodeType, Fields: fields, }, nil } ================================================ FILE: internal/pipelines/s3_source.go ================================================ package pipelines import ( "bufio" "context" "encoding/json" "fmt" "io" "log/slog" "strings" "sync" "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" ) // S3Source polls an S3-compatible bucket for new files and parses them. type S3Source struct{} func (s *S3Source) Type() string { return "source_s3" } // Validate checks S3 configuration. func (s *S3Source) Validate(cfg ConnectorConfig) error { endpoint := stringField(cfg.Fields, "endpoint", "") bucket := stringField(cfg.Fields, "bucket", "") accessKey := stringField(cfg.Fields, "access_key", "") secretKey := stringField(cfg.Fields, "secret_key", "") if endpoint == "" { return fmt.Errorf("endpoint is required") } if bucket == "" { return fmt.Errorf("bucket is required") } if accessKey == "" { return fmt.Errorf("access_key is required") } if secretKey == "" { return fmt.Errorf("secret_key is required") } return nil } // Start begins polling S3 for new files and sends batches to the output channel. func (s *S3Source) Start(ctx context.Context, cfg ConnectorConfig, out chan<- Batch) error { endpoint := stringField(cfg.Fields, "endpoint", "") bucket := stringField(cfg.Fields, "bucket", "") prefix := stringField(cfg.Fields, "prefix", "") accessKey := stringField(cfg.Fields, "access_key", "") secretKey := stringField(cfg.Fields, "secret_key", "") region := stringField(cfg.Fields, "region", "us-east-1") format := stringField(cfg.Fields, "format", "json") pollIntervalSec := intField(cfg.Fields, "poll_interval", 300) useSSL := boolField(cfg.Fields, "use_ssl", true) batchSize := intField(cfg.Fields, "batch_size", 1000) client, err := minio.New(endpoint, &minio.Options{ Creds: credentials.NewStaticV4(accessKey, secretKey, ""), Secure: useSSL, Region: region, }) if err != nil { return fmt.Errorf("create S3 client: %w", err) } slog.Info("S3 source started", "endpoint", endpoint, "bucket", bucket, "prefix", prefix, "format", format) // Track processed files to avoid reprocessing var processed sync.Map ticker := time.NewTicker(time.Duration(pollIntervalSec) * time.Second) defer ticker.Stop() poll := func() error { objectCh := client.ListObjects(ctx, bucket, minio.ListObjectsOptions{ Prefix: prefix, Recursive: true, }) for obj := range objectCh { if obj.Err != nil { slog.Warn("S3 list error", "error", obj.Err) continue } // Skip already processed if _, seen := processed.LoadOrStore(obj.Key, true); seen { continue } if err := s.processFile(ctx, client, bucket, obj.Key, format, batchSize, out); err != nil { slog.Error("S3 file processing error", "key", obj.Key, "error", err) processed.Delete(obj.Key) // Allow retry } } return nil } // First poll if err := poll(); err != nil { slog.Error("S3 source poll error", "error", err) } for { select { case <-ctx.Done(): return nil case <-ticker.C: if err := poll(); err != nil { slog.Error("S3 source poll error", "error", err) } } } } // processFile reads and parses a single S3 object. func (s *S3Source) processFile(ctx context.Context, client *minio.Client, bucket, key, format string, batchSize int, out chan<- Batch) error { obj, err := client.GetObject(ctx, bucket, key, minio.GetObjectOptions{}) if err != nil { return fmt.Errorf("get object: %w", err) } defer obj.Close() slog.Info("Processing S3 file", "key", key, "format", format) switch strings.ToLower(format) { case "json", "ndjson", "jsonl": return s.parseNDJSON(ctx, obj, batchSize, out) case "csv": return s.parseCSV(ctx, obj, batchSize, out) default: return fmt.Errorf("unsupported format: %s", format) } } // parseNDJSON reads newline-delimited JSON. func (s *S3Source) parseNDJSON(ctx context.Context, r io.Reader, batchSize int, out chan<- Batch) error { scanner := bufio.NewScanner(r) scanner.Buffer(make([]byte, 1024*1024), 10*1024*1024) // 10MB max line var buf []Record for scanner.Scan() { if ctx.Err() != nil { return nil } line := strings.TrimSpace(scanner.Text()) if line == "" { continue } var data map[string]interface{} if err := json.Unmarshal([]byte(line), &data); err != nil { slog.Warn("S3 JSON parse error, skipping line", "error", err) continue } buf = append(buf, Record{ Data: data, RawJSON: []byte(line), }) if len(buf) >= batchSize { select { case out <- Batch{Records: buf, SourceTS: time.Now()}: case <-ctx.Done(): return nil } buf = nil } } if len(buf) > 0 { select { case out <- Batch{Records: buf, SourceTS: time.Now()}: case <-ctx.Done(): } } return scanner.Err() } // parseCSV reads CSV files (first row = headers). func (s *S3Source) parseCSV(ctx context.Context, r io.Reader, batchSize int, out chan<- Batch) error { scanner := bufio.NewScanner(r) scanner.Buffer(make([]byte, 1024*1024), 10*1024*1024) // Read header if !scanner.Scan() { return fmt.Errorf("empty CSV file") } headers := strings.Split(scanner.Text(), ",") for i := range headers { headers[i] = strings.TrimSpace(headers[i]) } var buf []Record for scanner.Scan() { if ctx.Err() != nil { return nil } line := scanner.Text() if strings.TrimSpace(line) == "" { continue } values := strings.Split(line, ",") data := make(map[string]interface{}) for i, h := range headers { if i < len(values) { data[h] = strings.TrimSpace(values[i]) } else { data[h] = "" } } raw, _ := json.Marshal(data) buf = append(buf, Record{ Data: data, RawJSON: raw, }) if len(buf) >= batchSize { select { case out <- Batch{Records: buf, SourceTS: time.Now()}: case <-ctx.Done(): return nil } buf = nil } } if len(buf) > 0 { select { case out <- Batch{Records: buf, SourceTS: time.Now()}: case <-ctx.Done(): } } return scanner.Err() } ================================================ FILE: internal/pipelines/types.go ================================================ package pipelines import ( "context" "sync/atomic" "time" ) // Record represents a single data record flowing through the pipeline. type Record struct { Data map[string]interface{} // Column name -> value RawJSON []byte // Original bytes for pass-through } // Batch is a slice of records ready for INSERT. type Batch struct { Records []Record SourceTS time.Time } // ConnectorConfig is the parsed config for a connector node. type ConnectorConfig struct { NodeType string `json:"node_type"` Fields map[string]interface{} `json:"fields"` } // SourceConnector is the interface all source connectors implement. type SourceConnector interface { // Validate checks configuration before pipeline start. Validate(cfg ConnectorConfig) error // Start begins reading data. It sends batches to the output channel. // It blocks until ctx is cancelled or an unrecoverable error occurs. Start(ctx context.Context, cfg ConnectorConfig, out chan<- Batch) error // Type returns the connector type identifier. Type() string } // SinkConnector writes batches to the destination. type SinkConnector interface { Validate(cfg ConnectorConfig) error WriteBatch(ctx context.Context, cfg ConnectorConfig, batch Batch) (rowsWritten int, err error) Type() string } // Metrics tracks pipeline execution metrics (thread-safe via atomic). type Metrics struct { RowsIngested atomic.Int64 BytesIngested atomic.Int64 BatchesSent atomic.Int64 ErrorsCount atomic.Int64 LastBatchAt atomic.Value // time.Time } ================================================ FILE: internal/pipelines/webhook.go ================================================ package pipelines import ( "context" "encoding/json" "fmt" "io" "log/slog" "net/http" "strings" "sync" "time" ) // WebhookSource receives data via HTTP POST requests. // When started, it registers itself in a global registry so that an // external HTTP handler can route incoming requests to the right pipeline. type WebhookSource struct{} func (w *WebhookSource) Type() string { return "source_webhook" } // Validate checks webhook configuration. func (w *WebhookSource) Validate(cfg ConnectorConfig) error { // auth_token is optional — when empty, the webhook accepts all requests return nil } // Start blocks until the context is cancelled, forwarding received batches to out. func (w *WebhookSource) Start(ctx context.Context, cfg ConnectorConfig, out chan<- Batch) error { pipelineID, _ := cfg.Fields["pipeline_id"].(string) authToken, _ := cfg.Fields["auth_token"].(string) batchSize := intField(cfg.Fields, "batch_size", 100) batchTimeoutMs := intField(cfg.Fields, "batch_timeout_ms", 5000) // Create a receiver for this pipeline recv := &webhookReceiver{ authToken: authToken, incoming: make(chan Record, 1000), } // Register in global registry webhookRegistry.Store(pipelineID, recv) defer webhookRegistry.Delete(pipelineID) slog.Info("Webhook source started", "pipeline", pipelineID) // Batch accumulation loop var buf []Record ticker := time.NewTicker(time.Duration(batchTimeoutMs) * time.Millisecond) defer ticker.Stop() flush := func() { if len(buf) == 0 { return } batch := Batch{ Records: buf, SourceTS: time.Now(), } select { case out <- batch: case <-ctx.Done(): return } buf = nil } for { select { case <-ctx.Done(): flush() return nil case rec := <-recv.incoming: buf = append(buf, rec) if len(buf) >= batchSize { flush() } case <-ticker.C: flush() } } } // ── Webhook HTTP integration ─────────────────────────────────────── // webhookRegistry maps pipeline IDs to active webhook receivers. var webhookRegistry sync.Map // webhookReceiver holds the channel for a single pipeline's webhook endpoint. type webhookReceiver struct { authToken string incoming chan Record } // HandleWebhook is an HTTP handler that routes incoming webhook POSTs to the // correct running pipeline. Mount at: POST /api/pipelines/webhook/{pipelineID} func HandleWebhook(w http.ResponseWriter, r *http.Request) { // Extract pipeline ID from URL path (last segment) parts := strings.Split(strings.TrimRight(r.URL.Path, "/"), "/") if len(parts) == 0 { http.Error(w, "missing pipeline ID", http.StatusBadRequest) return } pipelineID := parts[len(parts)-1] val, ok := webhookRegistry.Load(pipelineID) if !ok { http.Error(w, "pipeline not running or not a webhook pipeline", http.StatusNotFound) return } recv := val.(*webhookReceiver) // Authenticate (skip if no auth token configured) if recv.authToken != "" { token := r.Header.Get("Authorization") token = strings.TrimPrefix(token, "Bearer ") if token == "" { token = r.URL.Query().Get("token") } if token != recv.authToken { http.Error(w, "unauthorized", http.StatusUnauthorized) return } } // Read body body, err := io.ReadAll(io.LimitReader(r.Body, 10*1024*1024)) // 10MB limit if err != nil { http.Error(w, "failed to read body", http.StatusBadRequest) return } contentType := r.Header.Get("Content-Type") records, parseErr := parseWebhookBody(body, contentType) if parseErr != nil { http.Error(w, fmt.Sprintf("parse error: %v", parseErr), http.StatusBadRequest) return } // Send records to pipeline (non-blocking with backpressure) accepted := 0 for _, rec := range records { select { case recv.incoming <- rec: accepted++ default: // Channel full, apply backpressure http.Error(w, "pipeline buffer full, try again later", http.StatusTooManyRequests) return } } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) fmt.Fprintf(w, `{"accepted":%d}`, accepted) } // parseWebhookBody parses JSON or NDJSON into records. func parseWebhookBody(body []byte, contentType string) ([]Record, error) { trimmed := strings.TrimSpace(string(body)) if trimmed == "" { return nil, fmt.Errorf("empty body") } // Try to detect if it's an array or single object if strings.HasPrefix(trimmed, "[") { // JSON array var arr []json.RawMessage if err := json.Unmarshal(body, &arr); err != nil { return nil, fmt.Errorf("invalid JSON array: %w", err) } var records []Record for _, raw := range arr { var data map[string]interface{} if err := json.Unmarshal(raw, &data); err != nil { return nil, fmt.Errorf("invalid JSON object in array: %w", err) } records = append(records, Record{ Data: data, RawJSON: raw, }) } return records, nil } // NDJSON or single object lines := strings.Split(trimmed, "\n") var records []Record for _, line := range lines { line = strings.TrimSpace(line) if line == "" { continue } var data map[string]interface{} if err := json.Unmarshal([]byte(line), &data); err != nil { return nil, fmt.Errorf("invalid JSON line: %w", err) } records = append(records, Record{ Data: data, RawJSON: []byte(line), }) } return records, nil } ================================================ FILE: internal/queryproc/variables.go ================================================ package queryproc import ( "fmt" "math" "regexp" "strconv" "strings" "time" ) // TimeRange represents a dashboard time range selection. type TimeRange struct { Type string `json:"type"` // "relative" or "absolute" From string `json:"from"` To string `json:"to"` } // ProcessorOptions contains all inputs for query variable interpolation. type ProcessorOptions struct { Query string TimeRange *TimeRange TimeField string TimeFieldUnit string // "ns", "us", "ms", "s" - defaults to "ms" MaxDataPoints int // defaults to 1000 Table string } // ProcessedResult contains the output of query variable interpolation. type ProcessedResult struct { Query string `json:"query"` HasTimeVariables bool `json:"has_time_variables"` InterpolatedVars map[string]any `json:"interpolated_vars"` Errors []string `json:"errors"` } // Variable patterns for detection and replacement. var ( patTimeFilterWithCol = regexp.MustCompile(`(?i)\$__timeFilter\(([^)]+)\)`) patTimeFilterSimple = regexp.MustCompile(`(?i)\$__timeFilter`) patTimestampFilter = regexp.MustCompile(`(?i)\$__timestamp\(([^)]+)\)`) patTimeField = regexp.MustCompile(`(?i)\$__timeField`) patTimeFrom = regexp.MustCompile(`(?i)\$__timeFrom`) patTimeTo = regexp.MustCompile(`(?i)\$__timeTo`) patInterval = regexp.MustCompile(`(?i)\$__interval`) patTable = regexp.MustCompile(`(?i)\$__table`) // Pattern for parsing relative time strings like "5m", "1h", "7d", "1M", "1y", // and Grafana-style forms like "now-5m" or "now-15min". patRelativeTime = regexp.MustCompile(`^(?:now-)?\s*(\d+)\s*([a-zA-Z]+)$`) ) // HasTimeVariables checks if the query contains any supported time or table variables. func HasTimeVariables(query string) bool { if query == "" { return false } return patTimeFilterWithCol.MatchString(query) || patTimeFilterSimple.MatchString(query) || patTimestampFilter.MatchString(query) || patTimeField.MatchString(query) || patTimeFrom.MatchString(query) || patTimeTo.MatchString(query) || patInterval.MatchString(query) || patTable.MatchString(query) } // ProcessQueryVariables interpolates all dashboard variables in the given query. func ProcessQueryVariables(opts ProcessorOptions) ProcessedResult { if opts.Query == "" { return ProcessedResult{ Query: "", HasTimeVariables: false, InterpolatedVars: map[string]any{}, Errors: []string{"Invalid query provided"}, } } // Apply defaults. timeFieldUnit := opts.TimeFieldUnit if timeFieldUnit == "" { timeFieldUnit = "ms" } maxDataPoints := opts.MaxDataPoints if maxDataPoints <= 0 { maxDataPoints = 1000 } processedQuery := opts.Query interpolatedVars := map[string]any{} var errors []string // 1. Handle $__table replacement. if patTable.MatchString(processedQuery) { if opts.Table != "" { processedQuery = patTable.ReplaceAllString(processedQuery, opts.Table) interpolatedVars["table"] = opts.Table } else { errors = append(errors, "$__table variable used but no table specified") } } // 2. Handle $__timeField replacement. if patTimeField.MatchString(processedQuery) { effectiveTimeField := opts.TimeField if effectiveTimeField == "" { effectiveTimeField = "timestamp" } processedQuery = patTimeField.ReplaceAllString(processedQuery, effectiveTimeField) interpolatedVars["timeField"] = effectiveTimeField } // 3. Handle time-range-related variables. if opts.TimeRange != nil { from, to, ok := getTimeBounds(opts.TimeRange) if ok { // 3a. Handle $__timeFilter(column) with column name in parentheses. if matches := patTimeFilterWithCol.FindStringSubmatch(processedQuery); matches != nil { columnName := strings.TrimSpace(matches[1]) timeFilter := generateTimeFilter(from, to, columnName, timeFieldUnit) processedQuery = patTimeFilterWithCol.ReplaceAllString(processedQuery, timeFilter) interpolatedVars["timeFilter"] = timeFilter interpolatedVars["timeFilterColumn"] = columnName } // 3b. Handle $__timeFilter without column (uses timeField or "timestamp"). // After the column variant is already replaced, only bare $__timeFilter remain. if patTimeFilterSimple.MatchString(processedQuery) { effectiveTimeField := opts.TimeField if effectiveTimeField == "" { effectiveTimeField = "timestamp" } timeFilter := generateTimeFilter(from, to, effectiveTimeField, timeFieldUnit) processedQuery = patTimeFilterSimple.ReplaceAllString(processedQuery, timeFilter) interpolatedVars["timeFilter"] = timeFilter } // 3c. Handle $__timeFrom. if patTimeFrom.MatchString(processedQuery) { fromValue := convertToEpoch(from, timeFieldUnit) processedQuery = patTimeFrom.ReplaceAllString(processedQuery, strconv.FormatInt(fromValue, 10)) interpolatedVars["timeFrom"] = fromValue } // 3d. Handle $__timeTo. if patTimeTo.MatchString(processedQuery) { toValue := convertToEpoch(to, timeFieldUnit) processedQuery = patTimeTo.ReplaceAllString(processedQuery, strconv.FormatInt(toValue, 10)) interpolatedVars["timeTo"] = toValue } // 3e. Handle $__interval. if patInterval.MatchString(processedQuery) { intervalSeconds := calculateInterval(from, to, maxDataPoints) processedQuery = patInterval.ReplaceAllString(processedQuery, strconv.Itoa(intervalSeconds)) interpolatedVars["interval"] = intervalSeconds } // 3f. Handle $__timestamp(column) as a DateTime range predicate. if patTimestampFilter.MatchString(processedQuery) { fromSec := from.Unix() toSec := to.Unix() processedQuery = patTimestampFilter.ReplaceAllStringFunc(processedQuery, func(match string) string { matches := patTimestampFilter.FindStringSubmatch(match) if len(matches) < 2 { return match } columnName := strings.TrimSpace(matches[1]) return fmt.Sprintf("(%s >= toDateTime(%d) AND %s <= toDateTime(%d))", columnName, fromSec, columnName, toSec) }) interpolatedVars["timestampFrom"] = fromSec interpolatedVars["timestampTo"] = toSec } } else { errors = append(errors, "Invalid time range provided") } } else if patTimeFilterWithCol.MatchString(processedQuery) || patTimeFilterSimple.MatchString(processedQuery) || patTimeFrom.MatchString(processedQuery) || patTimeTo.MatchString(processedQuery) || patInterval.MatchString(processedQuery) || patTimestampFilter.MatchString(processedQuery) { errors = append(errors, "Time-range variables found but no time range was provided") } return ProcessedResult{ Query: processedQuery, HasTimeVariables: HasTimeVariables(opts.Query), InterpolatedVars: interpolatedVars, Errors: errors, } } // InferTimeUnit infers the time unit from a column name suffix. // Returns "ns", "us", "ms", or "s". Defaults to "ms". func InferTimeUnit(columnName string) string { lower := strings.ToLower(columnName) if strings.HasSuffix(lower, "_ns") || strings.Contains(lower, "_ns_") { return "ns" } if strings.HasSuffix(lower, "_us") || strings.Contains(lower, "_us_") { return "us" } if strings.HasSuffix(lower, "_ms") || strings.Contains(lower, "_ms_") { return "ms" } if strings.HasSuffix(lower, "_s") && !strings.HasSuffix(lower, "_ms") { return "s" } return "ms" } // getTimeBounds parses a TimeRange into concrete from/to time.Time values. func getTimeBounds(tr *TimeRange) (from, to time.Time, ok bool) { now := time.Now() if tr.Type == "relative" { toToken := strings.TrimSpace(tr.To) if toToken == "" { toToken = "now" } fromToken := strings.TrimSpace(tr.From) if fromToken == "" { fromToken = "1h" } to = parseRelativeTime(toToken, now) from = parseRelativeTime(fromToken, now) } else { from = parseAbsoluteTime(tr.From) to = parseAbsoluteTime(tr.To) } if from.IsZero() || to.IsZero() { return time.Time{}, time.Time{}, false } if from.After(to) { from, to = to, from } return from, to, true } // parseRelativeTime parses a relative time string like "5m", "1h", "7d", "1M", "1y" // as an offset subtracted from the base time. "now" returns the base time unchanged. func parseRelativeTime(timeStr string, base time.Time) time.Time { trimmed := strings.ToLower(strings.TrimSpace(timeStr)) if trimmed == "" { return base.Add(-5 * time.Minute) } if trimmed == "now" { return base } matches := patRelativeTime.FindStringSubmatch(trimmed) if matches == nil { // Default to 5 minutes ago on invalid input. return base.Add(-5 * time.Minute) } value, _ := strconv.Atoi(matches[1]) unit := matches[2] switch unit { case "s", "sec", "secs", "second", "seconds": unit = "s" case "m", "min", "mins", "minute", "minutes": unit = "m" case "h", "hr", "hrs", "hour", "hours": unit = "h" case "d", "day", "days": unit = "d" case "w", "week", "weeks": unit = "w" case "mo", "mon", "month", "months", "mth": unit = "M" case "y", "yr", "yrs", "year", "years": unit = "y" } switch unit { case "s": return base.Add(-time.Duration(value) * time.Second) case "m": return base.Add(-time.Duration(value) * time.Minute) case "h": return base.Add(-time.Duration(value) * time.Hour) case "d": return base.Add(-time.Duration(value) * 24 * time.Hour) case "w": return base.Add(-time.Duration(value) * 7 * 24 * time.Hour) case "M": // Approximate month as 30 days. return base.Add(-time.Duration(value) * 30 * 24 * time.Hour) case "y": // Approximate year as 365 days. return base.Add(-time.Duration(value) * 365 * 24 * time.Hour) default: return base.Add(-5 * time.Minute) } } // parseAbsoluteTime attempts to parse a time string in common formats. func parseAbsoluteTime(s string) time.Time { // Try RFC3339 first (most common for API payloads). if t, err := time.Parse(time.RFC3339, s); err == nil { return t } // Try RFC3339Nano. if t, err := time.Parse(time.RFC3339Nano, s); err == nil { return t } // Try date-only format. if t, err := time.Parse("2006-01-02", s); err == nil { return t } // Try datetime without timezone. if t, err := time.Parse("2006-01-02 15:04:05", s); err == nil { return t } // Try datetime-local without timezone offset. if t, err := time.Parse("2006-01-02T15:04", s); err == nil { return t } if t, err := time.Parse("2006-01-02T15:04:05", s); err == nil { return t } return time.Time{} } // generateTimeFilter builds a SQL time filter condition for the given column. func generateTimeFilter(from, to time.Time, columnName, timeUnit string) string { fromValue := convertToEpoch(from, timeUnit) toValue := convertToEpoch(to, timeUnit) return fmt.Sprintf("%s >= %d AND %s <= %d", columnName, fromValue, columnName, toValue) } // convertToEpoch converts a time.Time to an epoch value in the specified unit. func convertToEpoch(t time.Time, unit string) int64 { switch unit { case "ns": return t.UnixNano() case "us": return t.UnixMicro() case "ms": return t.UnixMilli() case "s": return t.Unix() default: return t.UnixMilli() } } // calculateInterval computes the aggregation interval in seconds for a given // time span and maximum number of data points. func calculateInterval(from, to time.Time, maxDataPoints int) int { durationMs := to.Sub(from).Milliseconds() intervalMs := float64(durationMs) / float64(maxDataPoints) if intervalMs < 1000 { intervalMs = 1000 } return int(math.Floor(intervalMs / 1000)) } ================================================ FILE: internal/queryproc/variables_test.go ================================================ package queryproc import ( "strings" "testing" "time" ) func TestParseRelativeTime_NowMinusMinutes(t *testing.T) { base := time.Date(2026, 2, 12, 12, 0, 0, 0, time.UTC) cases := []string{"now-5m", "now-5min", "5m", "5minutes"} for _, tc := range cases { got := parseRelativeTime(tc, base) want := base.Add(-5 * time.Minute) if got.Unix() != want.Unix() { t.Fatalf("%s: expected %v, got %v", tc, want, got) } } } func TestGetTimeBounds_RelativeRangeWithCustomTo(t *testing.T) { from, to, ok := getTimeBounds(&TimeRange{ Type: "relative", From: "now-15m", To: "now-5m", }) if !ok { t.Fatalf("expected valid range") } if !from.Before(to) { t.Fatalf("expected from < to, got from=%v to=%v", from, to) } } func TestProcessQueryVariables_TimestampMacroWithRelativeExpression(t *testing.T) { out := ProcessQueryVariables(ProcessorOptions{ Query: "SELECT count() FROM x WHERE $__timestamp(event_time)", TimeRange: &TimeRange{ Type: "relative", From: "now-1h", To: "now", }, }) if len(out.Errors) > 0 { t.Fatalf("unexpected errors: %+v", out.Errors) } if strings.Contains(out.Query, "$__timestamp") { t.Fatalf("timestamp macro was not replaced: %s", out.Query) } } ================================================ FILE: internal/scheduler/cron.go ================================================ package scheduler import ( "strconv" "strings" "time" ) // parseField parses a single cron field (e.g. "*/5", "1-15", "1,5,10", "*") // and returns a set of matching integer values within [min, max]. func parseField(field string, min, max int) map[int]bool { values := make(map[int]bool) parts := strings.Split(field, ",") for _, part := range parts { rangePart := part step := 1 if idx := strings.Index(part, "/"); idx >= 0 { rangePart = part[:idx] s, err := strconv.Atoi(part[idx+1:]) if err != nil || s <= 0 { continue } step = s } switch { case rangePart == "*": // Every value from min to max, filtered by step. for v := min; v <= max; v++ { if (v-min)%step == 0 { values[v] = true } } case strings.Contains(rangePart, "-"): bounds := strings.SplitN(rangePart, "-", 2) s, err1 := strconv.Atoi(bounds[0]) e, err2 := strconv.Atoi(bounds[1]) if err1 != nil || err2 != nil { continue } for v := s; v <= e; v++ { if (v-s)%step == 0 { values[v] = true } } default: num, err := strconv.Atoi(rangePart) if err == nil { values[num] = true } } } return values } // ComputeNextRun parses a standard 5-field cron expression (minute hour dom month dow) // and returns the next matching UTC time after `from`, iterating minute by minute // up to 1 year ahead. Returns nil if no match is found. func ComputeNextRun(cron string, from time.Time) *time.Time { fields := strings.Fields(strings.TrimSpace(cron)) if len(fields) != 5 { return nil } minutes := parseField(fields[0], 0, 59) hours := parseField(fields[1], 0, 23) dom := parseField(fields[2], 1, 31) months := parseField(fields[3], 1, 12) dow := parseField(fields[4], 0, 6) if len(minutes) == 0 || len(hours) == 0 || len(dom) == 0 || len(months) == 0 || len(dow) == 0 { return nil } // Start from the next minute after `from`, truncated to the minute boundary. next := from.UTC().Truncate(time.Minute).Add(time.Minute) // 525600 minutes = 1 year for i := 0; i < 525600; i++ { m := next.Minute() h := next.Hour() d := next.Day() mo := int(next.Month()) dw := int(next.Weekday()) // Sunday=0 if minutes[m] && hours[h] && dom[d] && months[mo] && dow[dw] { result := next return &result } next = next.Add(time.Minute) } return nil } // ValidateCron returns true if the cron expression is syntactically valid // and can produce at least one future run time. func ValidateCron(cron string) bool { return ComputeNextRun(cron, time.Now()) != nil } ================================================ FILE: internal/scheduler/runner.go ================================================ package scheduler import ( "encoding/json" "fmt" "log/slog" "sync" "time" "github.com/caioricciuti/ch-ui/internal/alerts" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/tunnel" ) const ( tickInterval = 30 * time.Second maxConcurrent = 3 ) // Runner executes due scheduled jobs on a 30-second tick interval. type Runner struct { db *database.DB gateway *tunnel.Gateway secret string stopCh chan struct{} } // NewRunner creates a new schedule runner. func NewRunner(db *database.DB, gw *tunnel.Gateway, secret string) *Runner { return &Runner{ db: db, gateway: gw, secret: secret, stopCh: make(chan struct{}), } } // Start begins the runner goroutine that ticks every 30 seconds. func (r *Runner) Start() { go func() { slog.Info("Schedule runner started", "interval", tickInterval) ticker := time.NewTicker(tickInterval) defer ticker.Stop() for { select { case <-r.stopCh: slog.Info("Schedule runner stopped") return case <-ticker.C: r.tick() } } }() } // Stop signals the runner goroutine to stop. func (r *Runner) Stop() { close(r.stopCh) } // tick fetches due jobs from SQLite and executes them concurrently. func (r *Runner) tick() { schedules, err := r.db.GetEnabledSchedules() if err != nil { slog.Error("Failed to load enabled schedules", "error", err) return } now := time.Now().UTC() var due []database.Schedule for _, s := range schedules { if s.NextRunAt == nil { continue } nextRun, err := time.Parse(time.RFC3339, *s.NextRunAt) if err != nil { continue } if nextRun.After(now) { continue } due = append(due, s) } if len(due) == 0 { return } slog.Info("Processing due scheduled jobs", "count", len(due)) sem := make(chan struct{}, maxConcurrent) var wg sync.WaitGroup for _, schedule := range due { wg.Add(1) sem <- struct{}{} go func(s database.Schedule) { defer wg.Done() defer func() { <-sem }() r.runSchedule(s) }(schedule) } wg.Wait() } func (r *Runner) runSchedule(schedule database.Schedule) { // Create a run record runID, err := r.db.CreateScheduleRun(schedule.ID, "running") if err != nil { slog.Error("Failed to create schedule run", "error", err, "schedule", schedule.ID) } start := time.Now() status := "success" var runError string rowCount := 0 connectionID := "" defer func() { elapsed := int(time.Since(start).Milliseconds()) // Update run record if runID != "" { r.db.UpdateScheduleRun(runID, status, rowCount, elapsed, runError) } // Update schedule status var nextRun *time.Time if schedule.Enabled { nextRun = ComputeNextRun(schedule.Cron, time.Now().UTC()) } r.db.UpdateScheduleStatus(schedule.ID, status, runError, nextRun) // Audit log details := fmt.Sprintf("schedule=%s status=%s elapsed=%dms", schedule.Name, status, elapsed) r.db.CreateAuditLog(database.AuditLogParams{ Action: "schedule.run", ConnectionID: schedule.ConnectionID, Details: &details, }) slog.Info("Scheduled job completed", "schedule", schedule.ID, "name", schedule.Name, "status", status, "elapsed_ms", elapsed, ) if status == "error" { fingerprint := fmt.Sprintf("schedule:%s:error", schedule.ID) payload := map[string]interface{}{ "schedule_id": schedule.ID, "schedule_name": schedule.Name, "run_id": runID, "elapsed_ms": elapsed, "error": runError, "row_count": rowCount, } connPtr := nullableConnectionID(connectionID) if _, alertErr := r.db.CreateAlertEvent( connPtr, alerts.EventTypeScheduleFailed, alerts.SeverityError, fmt.Sprintf("Scheduled query failed: %s", schedule.Name), runError, payload, fingerprint, runID, ); alertErr != nil { slog.Warn("Failed to create schedule failure alert event", "schedule", schedule.ID, "error", alertErr) } } else if status == "success" { threshold := int(float64(maxInt(schedule.TimeoutMs, 60000)) * 0.8) if threshold < 5000 { threshold = 5000 } if elapsed >= threshold { fingerprint := fmt.Sprintf("schedule:%s:slow", schedule.ID) payload := map[string]interface{}{ "schedule_id": schedule.ID, "schedule_name": schedule.Name, "run_id": runID, "elapsed_ms": elapsed, "slow_threshold_ms": threshold, "timeout_ms": schedule.TimeoutMs, "row_count": rowCount, } connPtr := nullableConnectionID(connectionID) if _, alertErr := r.db.CreateAlertEvent( connPtr, alerts.EventTypeScheduleSlow, alerts.SeverityWarn, fmt.Sprintf("Scheduled query slow run: %s", schedule.Name), fmt.Sprintf("Run took %dms (threshold %dms)", elapsed, threshold), payload, fingerprint, runID, ); alertErr != nil { slog.Warn("Failed to create schedule slow alert event", "schedule", schedule.ID, "error", alertErr) } } } }() // Fetch the saved query from SQLite savedQuery, err := r.db.GetSavedQueryByID(schedule.SavedQueryID) if err != nil { status = "error" runError = fmt.Sprintf("failed to fetch saved query: %v", err) return } if savedQuery == nil { status = "error" runError = "saved query not found" return } // Determine connection ID if schedule.ConnectionID != nil && *schedule.ConnectionID != "" { connectionID = *schedule.ConnectionID } else if savedQuery.ConnectionID != nil && *savedQuery.ConnectionID != "" { connectionID = *savedQuery.ConnectionID } if connectionID == "" { status = "error" runError = "no connection ID configured for schedule or saved query" return } // Check that the tunnel is online if !r.gateway.IsTunnelOnline(connectionID) { status = "error" runError = "tunnel not connected" return } // Find credentials from an active session for this connection user, password, credErr := r.findCredentials(connectionID) if credErr != nil { status = "error" runError = fmt.Sprintf("no credentials available: %v", credErr) return } // Execute the query timeout := time.Duration(schedule.TimeoutMs) * time.Millisecond if timeout <= 0 { timeout = 60 * time.Second } result, execErr := r.gateway.ExecuteQuery(connectionID, savedQuery.Query, user, password, timeout) if execErr != nil { status = "error" runError = execErr.Error() return } rowCount = countRows(result) } func nullableConnectionID(connectionID string) *string { if connectionID == "" { return nil } id := connectionID return &id } func maxInt(a, b int) int { if a > b { return a } return b } // findCredentials looks for active session credentials for a connection. func (r *Runner) findCredentials(connectionID string) (string, string, error) { sessions, err := r.db.GetActiveSessionsByConnection(connectionID, 3) if err != nil { return "", "", fmt.Errorf("failed to load sessions: %w", err) } for _, s := range sessions { password, err := crypto.Decrypt(s.EncryptedPassword, r.secret) if err != nil { continue } return s.ClickhouseUser, password, nil } return "", "", fmt.Errorf("no active sessions with valid credentials for connection %s", connectionID) } // countRows counts rows in a query result. func countRows(result *tunnel.QueryResult) int { if result == nil || len(result.Data) == 0 { return 0 } var rows []json.RawMessage if err := json.Unmarshal(result.Data, &rows); err != nil { return 0 } return len(rows) } ================================================ FILE: internal/server/handlers/admin.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "strings" "time" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/governance" "github.com/caioricciuti/ch-ui/internal/langfuse" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // AdminHandler handles admin-only routes for ClickHouse management. // All routes require the admin role, enforced by middleware.RequireAdmin. type AdminHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config Langfuse *langfuse.Client GovSyncer *governance.Syncer } // Routes registers all admin routes on the given chi.Router. func (h *AdminHandler) Routes(r chi.Router) { r.Use(middleware.RequireAdmin(h.DB)) r.Get("/users", h.GetUsers) r.Get("/user-roles", h.GetUserRoles) r.Put("/user-roles/{username}", h.SetUserRole) r.Delete("/user-roles/{username}", h.DeleteUserRole) r.Get("/connections", h.GetConnections) r.Get("/stats", h.GetStats) r.Get("/clickhouse-users", h.GetClickHouseUsers) r.Post("/clickhouse-users", h.CreateClickHouseUser) r.Put("/clickhouse-users/{username}/password", h.UpdateClickHouseUserPassword) r.Delete("/clickhouse-users/{username}", h.DeleteClickHouseUser) // Brain admin management r.Get("/brain/providers", h.ListBrainProviders) r.Post("/brain/providers", h.CreateBrainProvider) r.Put("/brain/providers/{id}", h.UpdateBrainProvider) r.Delete("/brain/providers/{id}", h.DeleteBrainProvider) r.Post("/brain/providers/{id}/sync-models", h.SyncBrainProviderModels) r.Get("/brain/models", h.ListBrainModels) r.Put("/brain/models/{id}", h.UpdateBrainModel) r.Post("/brain/models/bulk", h.BulkUpdateBrainModels) r.Get("/brain/skills", h.ListBrainSkills) r.Post("/brain/skills", h.CreateBrainSkill) r.Put("/brain/skills/{id}", h.UpdateBrainSkill) // Langfuse observability r.Get("/langfuse", h.GetLangfuseConfig) r.Put("/langfuse", h.UpdateLangfuseConfig) r.Delete("/langfuse", h.DeleteLangfuseConfig) r.Post("/langfuse/test", h.TestLangfuseConnection) // Governance feature toggle (Pro; stays admin-only, not Pro-gated at this // level so admins can inspect/disable the toggle even when the license // lapses — the syncer itself is Pro-gated at startup). r.Get("/governance/settings", h.GetGovernanceSettings) r.Put("/governance/settings", h.UpdateGovernanceSettings) } // ---------- GET /users ---------- func (h *AdminHandler) GetUsers(w http.ResponseWriter, r *http.Request) { users, err := h.DB.GetUsers() if err != nil { slog.Error("Failed to get users", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve users"}) return } roleOverrides, err := h.DB.GetAllUserRoles() if err != nil { slog.Error("Failed to get user role overrides", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve user role overrides"}) return } userMap := make(map[string]database.SessionUser, len(users)+len(roleOverrides)) for _, u := range users { userMap[u.Username] = u } for _, ov := range roleOverrides { if _, exists := userMap[ov.Username]; exists { continue } userMap[ov.Username] = database.SessionUser{ Username: ov.Username, UserRole: ov.Role, LastLogin: "", SessionCount: 0, } } appUsers := make([]database.SessionUser, 0, len(userMap)) for _, u := range userMap { appUsers = append(appUsers, u) } includeStale := false switch strings.ToLower(strings.TrimSpace(r.URL.Query().Get("include_stale"))) { case "1", "true", "yes": includeStale = true } existsMap, err := h.fetchCurrentClickHouseUsers(r) if err != nil { slog.Warn("Admin users: failed to compare with current ClickHouse users", "error", err) // Fallback to app-local users only if ClickHouse comparison fails. if appUsers == nil { appUsers = []database.SessionUser{} } writeJSON(w, http.StatusOK, map[string]interface{}{ "users": appUsers, "sync": map[string]bool{"clickhouse_user_check": false}, }) return } type responseUser struct { database.SessionUser ExistsInClickHouse bool `json:"exists_in_clickhouse"` } filtered := make([]responseUser, 0, len(appUsers)) for _, u := range appUsers { exists := existsMap[u.Username] if !includeStale && !exists { continue } filtered = append(filtered, responseUser{ SessionUser: u, ExistsInClickHouse: exists, }) } if filtered == nil { filtered = []responseUser{} } writeJSON(w, http.StatusOK, map[string]interface{}{ "users": filtered, "sync": map[string]bool{ "clickhouse_user_check": true, }, }) } func (h *AdminHandler) fetchCurrentClickHouseUsers(r *http.Request) (map[string]bool, error) { session := middleware.GetSession(r) if session == nil { return nil, fmt.Errorf("not authenticated") } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { return nil, fmt.Errorf("tunnel offline") } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { return nil, fmt.Errorf("decrypt credentials: %w", err) } result, err := h.Gateway.ExecuteQuery( session.ConnectionID, "SELECT name FROM system.users ORDER BY name FORMAT JSON", session.ClickhouseUser, password, 20*time.Second, ) if err != nil { return nil, err } var rows []map[string]interface{} if err := json.Unmarshal(result.Data, &rows); err != nil { return nil, fmt.Errorf("parse system.users result: %w", err) } exists := make(map[string]bool, len(rows)) for _, row := range rows { if name, ok := row["name"].(string); ok && strings.TrimSpace(name) != "" { exists[name] = true } } return exists, nil } // ---------- GET /user-roles ---------- func (h *AdminHandler) GetUserRoles(w http.ResponseWriter, r *http.Request) { roles, err := h.DB.GetAllUserRoles() if err != nil { slog.Error("Failed to get user roles", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve user roles"}) return } if roles == nil { roles = []database.UserRole{} } writeJSON(w, http.StatusOK, roles) } // ---------- PUT /user-roles/{username} ---------- func (h *AdminHandler) SetUserRole(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) username := chi.URLParam(r, "username") if username == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Username is required"}) return } var body struct { Role string `json:"role"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } body.Role = strings.ToLower(strings.TrimSpace(body.Role)) validRoles := map[string]bool{"admin": true, "analyst": true, "viewer": true} if !validRoles[body.Role] { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Role must be one of: admin, analyst, viewer"}) return } isTargetAdmin, err := h.DB.IsUserRole(username, "admin") if err != nil { slog.Error("Failed checking current role assignment", "error", err, "user", username) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to validate current role"}) return } if isTargetAdmin && body.Role != "admin" { adminCount, err := h.DB.CountUsersWithRole("admin") if err != nil { slog.Error("Failed counting admins", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to validate admin safety rule"}) return } if adminCount <= 1 { writeJSON(w, http.StatusBadRequest, map[string]string{ "error": "Cannot remove the last admin. Assign another admin first.", }) return } } if err := h.DB.SetUserRole(username, body.Role); err != nil { slog.Error("Failed to set user role", "error", err, "user", username) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to set user role"}) return } if err := h.DB.SetSessionsUserRole(username, body.Role); err != nil { slog.Warn("Failed to refresh active session roles after role update", "error", err, "user", username) } var actorName *string if session != nil { actorName = strPtr(session.ClickhouseUser) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "user_role.set", Username: actorName, Details: strPtr(fmt.Sprintf("Set role for %q to %s", username, body.Role)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]string{ "message": "User role updated", "username": username, "role": body.Role, }) } // ---------- DELETE /user-roles/{username} ---------- func (h *AdminHandler) DeleteUserRole(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) username := chi.URLParam(r, "username") if username == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Username is required"}) return } isTargetAdmin, err := h.DB.IsUserRole(username, "admin") if err != nil { slog.Error("Failed checking current role assignment", "error", err, "user", username) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to validate current role"}) return } if isTargetAdmin { adminCount, err := h.DB.CountUsersWithRole("admin") if err != nil { slog.Error("Failed counting admins", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to validate admin safety rule"}) return } if adminCount <= 1 { writeJSON(w, http.StatusBadRequest, map[string]string{ "error": "Cannot remove the last admin. Assign another admin first.", }) return } } if err := h.DB.DeleteUserRole(username); err != nil { slog.Error("Failed to delete user role", "error", err, "user", username) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete user role"}) return } if err := h.DB.SetSessionsUserRole(username, "viewer"); err != nil { slog.Warn("Failed to refresh active session roles after role override removal", "error", err, "user", username) } var actorName *string if session != nil { actorName = strPtr(session.ClickhouseUser) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "user_role.deleted", Username: actorName, Details: strPtr(fmt.Sprintf("Removed role override for %q", username)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]string{ "message": "User role override removed", "username": username, }) } // ---------- GET /connections ---------- func (h *AdminHandler) GetConnections(w http.ResponseWriter, r *http.Request) { conns, err := h.DB.GetConnections() if err != nil { slog.Error("Failed to list connections", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connections"}) return } type connInfo struct { ID string `json:"id"` Name string `json:"name"` Status string `json:"status"` Online bool `json:"online"` CreatedAt string `json:"created_at"` LastSeen *string `json:"last_seen_at,omitempty"` } results := make([]connInfo, 0, len(conns)) for _, c := range conns { results = append(results, connInfo{ ID: c.ID, Name: c.Name, Status: c.Status, Online: h.Gateway.IsTunnelOnline(c.ID), CreatedAt: c.CreatedAt, LastSeen: c.LastSeenAt, }) } writeJSON(w, http.StatusOK, results) } // ---------- GET /stats ---------- func (h *AdminHandler) GetStats(w http.ResponseWriter, r *http.Request) { users, err := h.DB.GetUsers() if err != nil { slog.Error("Failed to get users for stats", "error", err) users = []database.SessionUser{} } conns, err := h.DB.GetConnections() if err != nil { slog.Error("Failed to get connections for stats", "error", err) conns = []database.Connection{} } onlineCount := 0 for _, c := range conns { if h.Gateway.IsTunnelOnline(c.ID) { onlineCount++ } } auditLogs, err := h.DB.GetAuditLogs(1000) if err != nil { slog.Error("Failed to get audit logs for stats", "error", err) auditLogs = []database.AuditLog{} } loginCount := 0 queryCount := 0 for _, log := range auditLogs { switch log.Action { case "user.login": loginCount++ case "query.execute": queryCount++ } } writeJSON(w, http.StatusOK, map[string]interface{}{ "users_count": len(users), "connections": len(conns), "online": onlineCount, "login_count": loginCount, "query_count": queryCount, }) } // ---------- GET /clickhouse-users ---------- func (h *AdminHandler) GetClickHouseUsers(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "Tunnel is offline"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } query := "SELECT name, storage, auth_type, host_ip, host_names, default_roles_all, default_roles_list, default_roles_except FROM system.users ORDER BY name FORMAT JSON" result, err := h.Gateway.ExecuteQuery( session.ConnectionID, query, session.ClickhouseUser, password, 30*time.Second, ) if err != nil { slog.Warn("Failed to query system.users", "error", err, "connection", session.ConnectionID) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "data": result.Data, "meta": result.Meta, }) } // ---------- POST /clickhouse-users ---------- func (h *AdminHandler) CreateClickHouseUser(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "Tunnel is offline"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } var body struct { Name string `json:"name"` Password string `json:"password"` AuthType string `json:"auth_type"` DefaultRoles []string `json:"default_roles"` IfNotExists *bool `json:"if_not_exists"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } authType := strings.TrimSpace(strings.ToLower(body.AuthType)) if authType == "" { if strings.TrimSpace(body.Password) == "" { authType = "no_password" } else { authType = "sha256_password" } } switch authType { case "no_password", "plaintext_password", "sha256_password", "double_sha1_password": default: writeJSON(w, http.StatusBadRequest, map[string]string{"error": "auth_type must be one of: no_password, plaintext_password, sha256_password, double_sha1_password"}) return } if authType != "no_password" && strings.TrimSpace(body.Password) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "password is required for selected auth_type"}) return } allRoles, roleNames, parseErr := parseDefaultRolesInput(body.DefaultRoles) if parseErr != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": parseErr.Error()}) return } var createSQL strings.Builder createSQL.WriteString("CREATE USER ") if body.IfNotExists == nil || *body.IfNotExists { createSQL.WriteString("IF NOT EXISTS ") } createSQL.WriteString(escapeIdentifier(name)) createSQL.WriteString(buildClickHouseCreateAuthClause(authType, body.Password)) createSQLStr := createSQL.String() executedCommands := []string{createSQLStr} if _, err := h.Gateway.ExecuteQuery(session.ConnectionID, createSQLStr, session.ClickhouseUser, password, 30*time.Second); err != nil { slog.Warn("Failed to create ClickHouse user", "error", err, "connection", session.ConnectionID, "name", name) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("%s\n\nCommand:\n%s", err.Error(), createSQLStr)}) return } escapedRoles := make([]string, 0, len(roleNames)) for _, role := range roleNames { escapedRoles = append(escapedRoles, escapeIdentifier(role)) } // Apply role membership/default role as follow-up statements for broad ClickHouse compatibility. if len(escapedRoles) > 0 { grantSQL := "GRANT " + strings.Join(escapedRoles, ", ") + " TO " + escapeIdentifier(name) executedCommands = append(executedCommands, grantSQL) if _, err := h.Gateway.ExecuteQuery(session.ConnectionID, grantSQL, session.ClickhouseUser, password, 30*time.Second); err != nil { slog.Warn("ClickHouse user created but role grant failed", "error", err, "connection", session.ConnectionID, "name", name) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("user created but failed to grant roles: %v\n\nCommand:\n%s", err, grantSQL)}) return } } if allRoles || len(escapedRoles) > 0 { defaultRoleClause := "ALL" if !allRoles { defaultRoleClause = strings.Join(escapedRoles, ", ") } alterSQL := "ALTER USER " + escapeIdentifier(name) + " DEFAULT ROLE " + defaultRoleClause executedCommands = append(executedCommands, alterSQL) if _, err := h.Gateway.ExecuteQuery(session.ConnectionID, alterSQL, session.ClickhouseUser, password, 30*time.Second); err != nil { slog.Warn("ClickHouse user created but default role assignment failed", "error", err, "connection", session.ConnectionID, "name", name) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("user created but failed to set default role: %v\n\nCommand:\n%s", err, alterSQL)}) return } } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "admin.clickhouse_user.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("name=%s auth_type=%s", name, authType)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{ "success": true, "name": name, "command": createSQLStr, "commands": executedCommands, }) } // ---------- PUT /clickhouse-users/{username}/password ---------- func (h *AdminHandler) UpdateClickHouseUserPassword(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "Tunnel is offline"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } username := strings.TrimSpace(chi.URLParam(r, "username")) if username == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username is required"}) return } var body struct { Password string `json:"password"` AuthType string `json:"auth_type"` IfExists *bool `json:"if_exists"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } authType := strings.TrimSpace(strings.ToLower(body.AuthType)) if authType == "" { authType = "sha256_password" } switch authType { case "no_password", "plaintext_password", "sha256_password", "double_sha1_password": default: writeJSON(w, http.StatusBadRequest, map[string]string{"error": "auth_type must be one of: no_password, plaintext_password, sha256_password, double_sha1_password"}) return } if authType != "no_password" && strings.TrimSpace(body.Password) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "password is required for selected auth_type"}) return } ifExists := body.IfExists == nil || *body.IfExists var b strings.Builder b.WriteString("ALTER USER ") if ifExists { b.WriteString("IF EXISTS ") } b.WriteString(escapeIdentifier(username)) b.WriteString(buildClickHouseAlterAuthClause(authType, body.Password)) if _, err := h.Gateway.ExecuteQuery(session.ConnectionID, b.String(), session.ClickhouseUser, password, 30*time.Second); err != nil { slog.Warn("Failed to update ClickHouse user password", "error", err, "connection", session.ConnectionID, "name", username) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("%s\n\nCommand:\n%s", err.Error(), b.String())}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "admin.clickhouse_user.password_changed", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("name=%s auth_type=%s", username, authType)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "command": b.String(), }) } func buildClickHouseCreateAuthClause(authType, password string) string { switch authType { case "no_password": return "" case "plaintext_password": return " IDENTIFIED BY '" + escapeLiteral(password) + "'" default: return " IDENTIFIED WITH " + authType + " BY '" + escapeLiteral(password) + "'" } } func buildClickHouseAlterAuthClause(authType, password string) string { switch authType { case "no_password": return " IDENTIFIED WITH no_password" case "plaintext_password": return " IDENTIFIED BY '" + escapeLiteral(password) + "'" default: return " IDENTIFIED WITH " + authType + " BY '" + escapeLiteral(password) + "'" } } func parseDefaultRolesInput(input []string) (all bool, roles []string, err error) { seen := make(map[string]struct{}, len(input)) for _, raw := range input { role := strings.TrimSpace(raw) if role == "" { continue } if strings.EqualFold(role, "ALL") { all = true continue } key := strings.ToLower(role) if _, exists := seen[key]; exists { continue } seen[key] = struct{}{} roles = append(roles, role) } if all && len(roles) > 0 { return false, nil, fmt.Errorf("default_roles cannot mix ALL with named roles") } return all, roles, nil } // ---------- DELETE /clickhouse-users/{username} ---------- func (h *AdminHandler) DeleteClickHouseUser(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "Tunnel is offline"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } username := strings.TrimSpace(chi.URLParam(r, "username")) if username == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username is required"}) return } if username == session.ClickhouseUser { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "cannot delete current session user"}) return } ifExists := true if raw := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("if_exists"))); raw == "false" || raw == "0" { ifExists = false } sql := "DROP USER " if ifExists { sql += "IF EXISTS " } sql += escapeIdentifier(username) if _, err := h.Gateway.ExecuteQuery(session.ConnectionID, sql, session.ClickhouseUser, password, 30*time.Second); err != nil { slog.Warn("Failed to delete ClickHouse user", "error", err, "connection", session.ConnectionID, "name", username) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("%s\n\nCommand:\n%s", err.Error(), sql)}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "admin.clickhouse_user.deleted", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("name=%s", username)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "command": sql, }) } // ---------- Helpers ---------- func escapeString(s string) string { s = strings.ReplaceAll(s, `\`, `\\`) s = strings.ReplaceAll(s, `'`, `''`) s = strings.ReplaceAll(s, `%`, `\%`) s = strings.ReplaceAll(s, `_`, `\_`) return s } ================================================ FILE: internal/server/handlers/admin_brain.go ================================================ package handlers import ( "context" "encoding/json" "fmt" "log/slog" "net/http" "sort" "strings" "time" braincore "github.com/caioricciuti/ch-ui/internal/brain" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/go-chi/chi/v5" ) func normalizeProviderKind(kind string) (string, bool) { switch strings.ToLower(strings.TrimSpace(kind)) { case "openai": return "openai", true case "openai_compatible", "openai-compatible": return "openai_compatible", true case "ollama": return "ollama", true default: return "", false } } func modelDisplayName(m database.BrainModel) string { if m.DisplayName != nil && strings.TrimSpace(*m.DisplayName) != "" { return strings.TrimSpace(*m.DisplayName) } return m.Name } func scoreRecommendedModel(name string) int { n := strings.ToLower(strings.TrimSpace(name)) switch { case strings.Contains(n, "gpt-5"): return 100 case strings.Contains(n, "gpt-4.1"): return 95 case strings.Contains(n, "gpt-4o"): return 90 case strings.Contains(n, "gpt-4"): return 80 case strings.Contains(n, "o3"), strings.Contains(n, "o1"): return 70 case strings.Contains(n, "claude"): return 60 case strings.Contains(n, "llama"), strings.Contains(n, "qwen"), strings.Contains(n, "mistral"), strings.Contains(n, "gemma"): return 50 default: return 10 } } func pickRecommendedModel(models []database.BrainModel) *database.BrainModel { if len(models) == 0 { return nil } ordered := make([]database.BrainModel, 0, len(models)) ordered = append(ordered, models...) sort.SliceStable(ordered, func(i, j int) bool { a := ordered[i] b := ordered[j] sa := scoreRecommendedModel(a.Name) sb := scoreRecommendedModel(b.Name) if sa != sb { return sa > sb } return strings.ToLower(a.Name) < strings.ToLower(b.Name) }) return &ordered[0] } func applyModelBulkAction(db *database.DB, providerID, action string) (int, error) { models, err := db.GetBrainModels(providerID) if err != nil { return 0, err } if len(models) == 0 { return 0, nil } switch action { case "deactivate_all": if err := db.ClearDefaultBrainModelsByProvider(providerID); err != nil { return 0, err } for _, m := range models { if err := db.SetBrainModelActive(m.ID, false); err != nil { return 0, err } } return len(models), nil case "activate_all": var defaultModelID string for _, m := range models { if m.IsDefault { defaultModelID = m.ID break } } if defaultModelID == "" { rec := pickRecommendedModel(models) if rec != nil { defaultModelID = rec.ID } } for _, m := range models { display := modelDisplayName(m) isDefault := m.ID == defaultModelID if err := db.UpdateBrainModel(m.ID, display, true, isDefault); err != nil { return 0, err } } return len(models), nil case "activate_recommended": rec := pickRecommendedModel(models) if rec == nil { return 0, nil } if err := db.ClearDefaultBrainModelsByProvider(providerID); err != nil { return 0, err } for _, m := range models { display := modelDisplayName(m) isRec := m.ID == rec.ID if err := db.UpdateBrainModel(m.ID, display, isRec, isRec); err != nil { return 0, err } } return len(models), nil default: return 0, fmt.Errorf("unsupported action: %s", action) } } func (h *AdminHandler) ListBrainProviders(w http.ResponseWriter, r *http.Request) { providers, err := h.DB.GetBrainProviders() if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list providers"}) return } if providers == nil { providers = []database.BrainProvider{} } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "providers": providers}) } func (h *AdminHandler) CreateBrainProvider(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) var body struct { Name string `json:"name"` Kind string `json:"kind"` BaseURL string `json:"baseUrl"` APIKey string `json:"apiKey"` IsActive *bool `json:"isActive"` IsDefault *bool `json:"isDefault"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Provider name is required"}) return } kind, ok := normalizeProviderKind(body.Kind) if !ok { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Provider kind must be openai, openai_compatible, or ollama"}) return } var encryptedKey *string if strings.TrimSpace(body.APIKey) != "" { encrypted, err := crypto.Encrypt(strings.TrimSpace(body.APIKey), h.Config.AppSecretKey) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to encrypt API key"}) return } encryptedKey = &encrypted } isActive := true if body.IsActive != nil { isActive = *body.IsActive } isDefault := false if body.IsDefault != nil { isDefault = *body.IsDefault } actor := "" if session != nil { actor = session.ClickhouseUser } providerID, err := h.DB.CreateBrainProvider(name, kind, body.BaseURL, encryptedKey, isActive, isDefault, actor) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create provider"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.provider.created", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("provider=%s kind=%s", name, kind)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"success": true, "id": providerID}) } func (h *AdminHandler) UpdateBrainProvider(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) providerID := chi.URLParam(r, "id") if strings.TrimSpace(providerID) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Provider ID is required"}) return } existing, err := h.DB.GetBrainProviderByID(providerID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load provider"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Provider not found"}) return } var body struct { Name *string `json:"name"` Kind *string `json:"kind"` BaseURL *string `json:"baseUrl"` APIKey *string `json:"apiKey"` IsActive *bool `json:"isActive"` IsDefault *bool `json:"isDefault"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := existing.Name if body.Name != nil && strings.TrimSpace(*body.Name) != "" { name = strings.TrimSpace(*body.Name) } kind := existing.Kind if body.Kind != nil { n, ok := normalizeProviderKind(*body.Kind) if !ok { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Provider kind must be openai, openai_compatible, or ollama"}) return } kind = n } baseURL := "" if existing.BaseURL != nil { baseURL = *existing.BaseURL } if body.BaseURL != nil { baseURL = strings.TrimSpace(*body.BaseURL) } isActive := existing.IsActive if body.IsActive != nil { isActive = *body.IsActive } isDefault := existing.IsDefault if body.IsDefault != nil { isDefault = *body.IsDefault } updateAPIKey := false var encryptedKey *string if body.APIKey != nil { updateAPIKey = true if strings.TrimSpace(*body.APIKey) != "" { encrypted, encErr := crypto.Encrypt(strings.TrimSpace(*body.APIKey), h.Config.AppSecretKey) if encErr != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to encrypt API key"}) return } encryptedKey = &encrypted } } if err := h.DB.UpdateBrainProvider(providerID, name, kind, baseURL, encryptedKey, updateAPIKey, isActive, isDefault); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update provider"}) return } actor := "" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.provider.updated", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("provider_id=%s", providerID)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *AdminHandler) DeleteBrainProvider(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) providerID := chi.URLParam(r, "id") if strings.TrimSpace(providerID) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Provider ID is required"}) return } if err := h.DB.DeleteBrainProvider(providerID); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete provider"}) return } actor := "" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.provider.deleted", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("provider_id=%s", providerID)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *AdminHandler) SyncBrainProviderModels(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) providerID := chi.URLParam(r, "id") if strings.TrimSpace(providerID) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Provider ID is required"}) return } provider, err := h.DB.GetBrainProviderByID(providerID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load provider"}) return } if provider == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Provider not found"}) return } adapter, err := braincore.NewProvider(provider.Kind) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } cfg := braincore.ProviderConfig{Kind: provider.Kind} if provider.BaseURL != nil { cfg.BaseURL = *provider.BaseURL } if provider.EncryptedAPIKey != nil { decrypted, decErr := crypto.Decrypt(*provider.EncryptedAPIKey, h.Config.AppSecretKey) if decErr != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt provider API key"}) return } cfg.APIKey = decrypted } ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second) defer cancel() modelNames, err := adapter.ListModels(ctx, cfg) if err != nil { writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } if len(modelNames) == 0 { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "models": []database.BrainModel{}}) return } var firstID string for _, name := range modelNames { id, ensureErr := h.DB.EnsureBrainModel(providerID, name, name) if ensureErr != nil { slog.Warn("Failed to sync model", "provider", providerID, "model", name, "error", ensureErr) continue } if firstID == "" { firstID = id } } models, _ := h.DB.GetBrainModels(providerID) hasDefault := false hasActive := false for _, m := range models { if m.IsDefault { hasDefault = true } if m.IsActive { hasActive = true } if hasDefault && hasActive { break } } if !hasDefault || !hasActive { rec := pickRecommendedModel(models) if rec != nil { _ = h.DB.ClearDefaultBrainModelsByProvider(providerID) _ = h.DB.UpdateBrainModel(rec.ID, modelDisplayName(*rec), true, true) } else if firstID != "" { _ = h.DB.UpdateBrainModel(firstID, modelNames[0], true, true) } models, _ = h.DB.GetBrainModels(providerID) } actor := "" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.provider.models_synced", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("provider_id=%s models=%d", providerID, len(modelNames))), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "models": models}) } func (h *AdminHandler) ListBrainModels(w http.ResponseWriter, r *http.Request) { models, err := h.DB.GetBrainModelsWithProvider(false) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list models"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "models": models}) } func (h *AdminHandler) UpdateBrainModel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) modelID := chi.URLParam(r, "id") if strings.TrimSpace(modelID) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Model ID is required"}) return } existing, err := h.DB.GetBrainModelByID(modelID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load model"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Model not found"}) return } var body struct { DisplayName *string `json:"displayName"` IsActive *bool `json:"isActive"` IsDefault *bool `json:"isDefault"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } displayName := existing.Name if existing.DisplayName != nil && strings.TrimSpace(*existing.DisplayName) != "" { displayName = *existing.DisplayName } if body.DisplayName != nil { displayName = strings.TrimSpace(*body.DisplayName) } isActive := existing.IsActive if body.IsActive != nil { isActive = *body.IsActive } isDefault := existing.IsDefault if body.IsDefault != nil { isDefault = *body.IsDefault } if isDefault { isActive = true } if !isActive { isDefault = false } if err := h.DB.UpdateBrainModel(modelID, displayName, isActive, isDefault); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update model"}) return } actor := "" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.model.updated", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("model_id=%s", modelID)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *AdminHandler) BulkUpdateBrainModels(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) var body struct { ProviderID string `json:"providerId"` Action string `json:"action"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } providerID := strings.TrimSpace(body.ProviderID) if providerID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "providerId is required"}) return } action := strings.TrimSpace(body.Action) if action != "deactivate_all" && action != "activate_all" && action != "activate_recommended" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "action must be one of: deactivate_all, activate_all, activate_recommended"}) return } updated, err := applyModelBulkAction(h.DB, providerID, action) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to apply bulk model action"}) return } actor := "" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.model.bulk_updated", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("provider_id=%s action=%s updated=%d", providerID, action, updated)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "updated": updated, }) } func (h *AdminHandler) ListBrainSkills(w http.ResponseWriter, r *http.Request) { skills, err := h.DB.GetBrainSkills() if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list skills"}) return } if skills == nil { skills = []database.BrainSkill{} } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "skills": skills}) } func (h *AdminHandler) CreateBrainSkill(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) var body struct { Name string `json:"name"` Content string `json:"content"` IsActive *bool `json:"isActive"` IsDefault *bool `json:"isDefault"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) content := strings.TrimSpace(body.Content) if name == "" || content == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name and content are required"}) return } isActive := true if body.IsActive != nil { isActive = *body.IsActive } isDefault := false if body.IsDefault != nil { isDefault = *body.IsDefault } actor := "" if session != nil { actor = session.ClickhouseUser } id, err := h.DB.CreateBrainSkill(name, content, actor, isActive, isDefault) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create skill"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.skill.created", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("skill=%s", name)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"success": true, "id": id}) } func (h *AdminHandler) UpdateBrainSkill(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) skillID := chi.URLParam(r, "id") if strings.TrimSpace(skillID) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Skill ID is required"}) return } existing, err := h.DB.GetBrainSkillByID(skillID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load skill"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Skill not found"}) return } var body struct { Name *string `json:"name"` Content *string `json:"content"` IsActive *bool `json:"isActive"` IsDefault *bool `json:"isDefault"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := existing.Name if body.Name != nil && strings.TrimSpace(*body.Name) != "" { name = strings.TrimSpace(*body.Name) } content := existing.Content if body.Content != nil { content = strings.TrimSpace(*body.Content) } isActive := existing.IsActive if body.IsActive != nil { isActive = *body.IsActive } isDefault := existing.IsDefault if body.IsDefault != nil { isDefault = *body.IsDefault } if strings.TrimSpace(name) == "" || strings.TrimSpace(content) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name and content are required"}) return } if err := h.DB.UpdateBrainSkill(skillID, name, content, isActive, isDefault); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update skill"}) return } actor := "" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.skill.updated", Username: strPtr(actor), Details: strPtr(fmt.Sprintf("skill_id=%s", skillID)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } ================================================ FILE: internal/server/handlers/admin_governance.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/server/middleware" ) // governanceSettingsResponse is the shape returned by the governance settings // endpoints so the frontend can render the toggle state + last-change metadata. type governanceSettingsResponse struct { SyncEnabled bool `json:"sync_enabled"` UpdatedAt string `json:"updated_at"` UpdatedBy string `json:"updated_by"` BannerDismissed bool `json:"banner_dismissed"` SyncerRunning bool `json:"syncer_running"` } // GetGovernanceSettings returns the current governance sync toggle state. func (h *AdminHandler) GetGovernanceSettings(w http.ResponseWriter, r *http.Request) { resp := h.buildGovernanceSettingsResponse() writeJSON(w, http.StatusOK, resp) } // UpdateGovernanceSettings flips the governance sync toggle and, if the syncer // handle is wired in, starts or stops the background goroutine accordingly. func (h *AdminHandler) UpdateGovernanceSettings(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) var body struct { SyncEnabled *bool `json:"sync_enabled"` BannerDismissed *bool `json:"banner_dismissed"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } actor := "unknown" if session != nil { actor = session.ClickhouseUser } if body.SyncEnabled != nil { if err := h.DB.SetGovernanceSyncEnabled(*body.SyncEnabled, actor); err != nil { slog.Error("Failed to persist governance sync setting", "error", err) writeError(w, http.StatusInternalServerError, "Failed to save setting") return } if h.GovSyncer != nil { if *body.SyncEnabled { h.GovSyncer.StartBackground() } else { h.GovSyncer.Stop() } } details := fmt.Sprintf(`{"sync_enabled":%t}`, *body.SyncEnabled) h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.sync_toggle", Username: strPtr(actor), Details: strPtr(details), IPAddress: strPtr(r.RemoteAddr), }) } if body.BannerDismissed != nil && *body.BannerDismissed { if err := h.DB.SetSetting(database.SettingGovernanceUpgradeBanner, "true"); err != nil { slog.Warn("Failed to persist governance banner dismissal", "error", err) } } writeJSON(w, http.StatusOK, h.buildGovernanceSettingsResponse()) } func (h *AdminHandler) buildGovernanceSettingsResponse() governanceSettingsResponse { enabled := h.DB.GovernanceSyncEnabled() updatedAt, _ := h.DB.GetSetting(database.SettingGovernanceSyncUpdatedAt) updatedBy, _ := h.DB.GetSetting(database.SettingGovernanceSyncUpdatedBy) bannerDismissed, _ := h.DB.GetSetting(database.SettingGovernanceUpgradeBanner) running := false if h.GovSyncer != nil { running = h.GovSyncer.IsRunning() } return governanceSettingsResponse{ SyncEnabled: enabled, UpdatedAt: updatedAt, UpdatedBy: updatedBy, BannerDismissed: bannerDismissed == "true", SyncerRunning: running, } } ================================================ FILE: internal/server/handlers/admin_langfuse.go ================================================ package handlers import ( "encoding/json" "net/http" "strings" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/langfuse" "github.com/caioricciuti/ch-ui/internal/server/middleware" ) // GetLangfuseConfig returns the current Langfuse configuration (secret key masked). func (h *AdminHandler) GetLangfuseConfig(w http.ResponseWriter, r *http.Request) { publicKey, _ := h.DB.GetSetting("langfuse.public_key") baseURL, _ := h.DB.GetSetting("langfuse.base_url") encryptedSecret, _ := h.DB.GetSetting("langfuse.secret_key") if baseURL == "" { baseURL = "https://cloud.langfuse.com" } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "public_key": publicKey, "base_url": baseURL, "has_secret_key": encryptedSecret != "", "enabled": h.Langfuse.IsEnabled(), }) } // UpdateLangfuseConfig saves Langfuse configuration and reconfigures the client. func (h *AdminHandler) UpdateLangfuseConfig(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) var body struct { PublicKey string `json:"publicKey"` SecretKey string `json:"secretKey"` BaseURL string `json:"baseUrl"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } publicKey := strings.TrimSpace(body.PublicKey) secretKey := strings.TrimSpace(body.SecretKey) baseURL := strings.TrimSpace(body.BaseURL) if publicKey == "" { writeError(w, http.StatusBadRequest, "Public key is required") return } // Save public key if err := h.DB.SetSetting("langfuse.public_key", publicKey); err != nil { writeError(w, http.StatusInternalServerError, "Failed to save public key") return } // Save base URL if baseURL == "" { baseURL = "https://cloud.langfuse.com" } if err := h.DB.SetSetting("langfuse.base_url", baseURL); err != nil { writeError(w, http.StatusInternalServerError, "Failed to save base URL") return } // Save secret key (only if provided — allows partial update) if secretKey != "" { encrypted, err := crypto.Encrypt(secretKey, h.Config.AppSecretKey) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to encrypt secret key") return } if err := h.DB.SetSetting("langfuse.secret_key", encrypted); err != nil { writeError(w, http.StatusInternalServerError, "Failed to save secret key") return } } // Reconfigure the live client cfg, err := loadLangfuseConfig(h.DB, h.Config.AppSecretKey) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to reload config") return } h.Langfuse.Reconfigure(cfg) // Audit log actor := "unknown" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "langfuse.config.updated", Username: strPtr(actor), Details: strPtr("base_url=" + baseURL), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "enabled": h.Langfuse.IsEnabled(), }) } // DeleteLangfuseConfig removes all Langfuse settings and disables the client. func (h *AdminHandler) DeleteLangfuseConfig(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) _ = h.DB.DeleteSetting("langfuse.public_key") _ = h.DB.DeleteSetting("langfuse.secret_key") _ = h.DB.DeleteSetting("langfuse.base_url") h.Langfuse.Reconfigure(langfuse.Config{}) actor := "unknown" if session != nil { actor = session.ClickhouseUser } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "langfuse.config.deleted", Username: strPtr(actor), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "enabled": false, }) } // TestLangfuseConnection verifies credentials against the Langfuse API. func (h *AdminHandler) TestLangfuseConnection(w http.ResponseWriter, r *http.Request) { var body struct { PublicKey string `json:"publicKey"` SecretKey string `json:"secretKey"` BaseURL string `json:"baseUrl"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } publicKey := strings.TrimSpace(body.PublicKey) secretKey := strings.TrimSpace(body.SecretKey) baseURL := strings.TrimSpace(body.BaseURL) // If secret key not provided, use stored one if secretKey == "" { encryptedSecret, _ := h.DB.GetSetting("langfuse.secret_key") if encryptedSecret != "" { decrypted, err := crypto.Decrypt(encryptedSecret, h.Config.AppSecretKey) if err == nil { secretKey = decrypted } } } if publicKey == "" || secretKey == "" { writeError(w, http.StatusBadRequest, "Public key and secret key are required") return } cfg := langfuse.Config{ PublicKey: publicKey, SecretKey: secretKey, BaseURL: baseURL, } if err := h.Langfuse.TestConnection(cfg); err != nil { writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "connected": false, "error": err.Error(), }) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "connected": true, }) } // loadLangfuseConfig reads Langfuse configuration from the settings table. func loadLangfuseConfig(db *database.DB, appSecretKey string) (langfuse.Config, error) { var cfg langfuse.Config publicKey, err := db.GetSetting("langfuse.public_key") if err != nil { return cfg, err } cfg.PublicKey = publicKey encryptedSecret, err := db.GetSetting("langfuse.secret_key") if err != nil { return cfg, err } if encryptedSecret != "" { decrypted, err := crypto.Decrypt(encryptedSecret, appSecretKey) if err != nil { return cfg, err } cfg.SecretKey = decrypted } baseURL, err := db.GetSetting("langfuse.base_url") if err != nil { return cfg, err } cfg.BaseURL = baseURL cfg.NormalizeBaseURL() return cfg, nil } ================================================ FILE: internal/server/handlers/auth.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "net/url" "strings" "time" "github.com/go-chi/chi/v5" "github.com/google/uuid" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" "github.com/caioricciuti/ch-ui/internal/version" ) // Session and rate-limit constants. const ( SessionCookie = "chui_session" SessionDuration = 7 * 24 * time.Hour RateLimitWindow = 15 * time.Minute MaxAttemptsPerIP = 5 MaxAttemptsPerUser = 3 ) // AuthHandler implements the authentication HTTP endpoints. type AuthHandler struct { DB *database.DB Gateway *tunnel.Gateway RateLimiter *middleware.RateLimiter Config *config.Config } // Routes returns a chi.Router with all auth routes mounted. func (h *AuthHandler) Routes(r chi.Router) { r.Post("/login", h.Login) r.Post("/logout", h.Logout) r.Get("/session", h.Session) r.Get("/connections", h.Connections) r.Post("/switch-connection", h.SwitchConnection) } // ---------- request / response types ---------- type loginRequest struct { Username string `json:"username"` Password string `json:"password"` ConnectionID string `json:"connectionId"` ConnectionIDSnake string `json:"connection_id"` } type switchConnectionRequest struct { ConnectionID string `json:"connectionId"` ConnectionIDSnake string `json:"connection_id"` Username string `json:"username"` Password string `json:"password"` } type connectionInfo struct { ID string `json:"id"` Name string `json:"name"` Status string `json:"status"` Online bool `json:"online"` LastSeenAt *string `json:"last_seen_at"` CreatedAt string `json:"created_at"` } func (r loginRequest) resolvedConnectionID() string { if id := strings.TrimSpace(r.ConnectionID); id != "" { return id } return strings.TrimSpace(r.ConnectionIDSnake) } func (r switchConnectionRequest) resolvedConnectionID() string { if id := strings.TrimSpace(r.ConnectionID); id != "" { return id } return strings.TrimSpace(r.ConnectionIDSnake) } func normalizeRateLimitUsername(username string) string { return strings.ToLower(strings.TrimSpace(username)) } func userRateLimitKey(username, connectionID string) string { return fmt.Sprintf("user:%s:%s", normalizeRateLimitUsername(username), strings.TrimSpace(connectionID)) } func sanitizeClickHouseAuthMessage(raw string) string { msg := strings.ToLower(strings.TrimSpace(raw)) if msg == "" { return "Invalid credentials" } if strings.Contains(msg, "auth") || strings.Contains(msg, "credential") || strings.Contains(msg, "password") || strings.Contains(msg, "unauthorized") || strings.Contains(msg, "access denied") { return "Invalid credentials" } if strings.Contains(msg, "timeout") || strings.Contains(msg, "deadline") || strings.Contains(msg, "refused") || strings.Contains(msg, "no route") || strings.Contains(msg, "connection reset") || strings.Contains(msg, "network") || strings.Contains(msg, "tls") { return "Connection to ClickHouse failed" } return "Authentication failed" } func shouldUseSecureCookie(r *http.Request, cfg *config.Config) bool { // Direct TLS request (no proxy) if r != nil && r.TLS != nil { return true } // Reverse proxy forwarding HTTPS if r != nil && strings.EqualFold(strings.TrimSpace(r.Header.Get("X-Forwarded-Proto")), "https") { return true } // Fallback to configured public app URL scheme. if cfg != nil && strings.TrimSpace(cfg.AppURL) != "" { if parsed, err := url.Parse(cfg.AppURL); err == nil { return strings.EqualFold(parsed.Scheme, "https") } } return false } // ---------- POST /login ---------- func (h *AuthHandler) Login(w http.ResponseWriter, r *http.Request) { var req loginRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } req.Username = strings.TrimSpace(req.Username) if req.Username == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Username is required"}) return } req.ConnectionID = req.resolvedConnectionID() // --- Rate limiting --- clientIP := getClientIP(r) ipKey := fmt.Sprintf("ip:%s", clientIP) ipResult := h.RateLimiter.CheckAuthRateLimit(ipKey, "ip", MaxAttemptsPerIP, RateLimitWindow) if !ipResult.Allowed { retrySeconds := int(ipResult.RetryAfter.Seconds()) slog.Warn("IP rate limited", "ip", clientIP, "retryAfter", retrySeconds) writeJSON(w, http.StatusTooManyRequests, map[string]interface{}{ "error": "Too many login attempts from this IP", "retryAfter": retrySeconds, }) return } // --- Resolve connection --- connections, err := h.DB.GetConnections() if err != nil { slog.Error("Failed to get connections", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connections"}) return } if len(connections) == 0 { writeJSON(w, http.StatusBadRequest, map[string]string{ "error": "No connections available", "message": "No connections are configured. Please set up an agent first.", }) return } var conn *database.Connection if req.ConnectionID != "" { for i := range connections { if connections[i].ID == req.ConnectionID { conn = &connections[i] break } } if conn == nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection not found"}) return } } else { conn = &connections[0] } userKey := userRateLimitKey(req.Username, conn.ID) userResult := h.RateLimiter.CheckAuthRateLimit(userKey, "user", MaxAttemptsPerUser, RateLimitWindow) if !userResult.Allowed { retrySeconds := int(userResult.RetryAfter.Seconds()) slog.Warn("User rate limited", "user", req.Username, "connection", conn.ID, "retryAfter", retrySeconds) writeJSON(w, http.StatusTooManyRequests, map[string]interface{}{ "error": "Too many login attempts for this user", "retryAfter": retrySeconds, }) return } // --- Check tunnel is online (retry up to 3 times) --- online := false for attempt := 0; attempt < 3; attempt++ { if h.Gateway.IsTunnelOnline(conn.ID) { online = true break } if attempt < 2 { time.Sleep(500 * time.Millisecond) } } if !online { writeJSON(w, http.StatusServiceUnavailable, map[string]string{ "error": "Connection offline", "message": "The tunnel agent for this connection is not online. Please check that the agent is running.", }) return } // --- Test ClickHouse credentials --- testResult, err := h.Gateway.TestConnection(conn.ID, req.Username, req.Password, 15*time.Second) if err != nil { h.RateLimiter.RecordAttempt(ipKey, "ip") h.RateLimiter.RecordAttempt(userKey, "user") slog.Info("Login failed: connection test error", "user", req.Username, "error", err) writeJSON(w, http.StatusUnauthorized, map[string]string{ "error": "Authentication failed", "message": sanitizeClickHouseAuthMessage(err.Error()), }) return } if !testResult.Success { h.RateLimiter.RecordAttempt(ipKey, "ip") h.RateLimiter.RecordAttempt(userKey, "user") errMsg := testResult.Error if errMsg == "" { errMsg = "Invalid credentials" } slog.Info("Login failed: bad credentials", "user", req.Username) writeJSON(w, http.StatusUnauthorized, map[string]string{ "error": "Authentication failed", "message": sanitizeClickHouseAuthMessage(errMsg), }) return } // --- Resolve CH-UI role --- role := h.resolveUserRole(conn.ID, req.Username, req.Password, clientIP) // --- Encrypt password and create session --- encryptedPwd, err := crypto.Encrypt(req.Password, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to encrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Internal server error"}) return } token := uuid.NewString() expiresAt := time.Now().UTC().Add(SessionDuration).Format(time.RFC3339) _, err = h.DB.CreateSession(database.CreateSessionParams{ ConnectionID: conn.ID, ClickhouseUser: req.Username, EncryptedPassword: encryptedPwd, Token: token, ExpiresAt: expiresAt, UserRole: role, }) if err != nil { slog.Error("Failed to create session", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create session"}) return } // --- Set cookie --- http.SetCookie(w, &http.Cookie{ Name: SessionCookie, Value: token, Path: "/", MaxAge: int(SessionDuration.Seconds()), HttpOnly: true, Secure: shouldUseSecureCookie(r, h.Config), SameSite: http.SameSiteLaxMode, }) // --- Reset rate limits on success --- h.RateLimiter.ResetLimit(ipKey) h.RateLimiter.ResetLimit(userKey) // --- Audit log --- h.DB.CreateAuditLog(database.AuditLogParams{ Action: "user.login", Username: strPtr(req.Username), ConnectionID: strPtr(conn.ID), Details: strPtr(fmt.Sprintf("Login via connection %s (role: %s, version: %s)", conn.Name, role, testResult.Version)), IPAddress: strPtr(clientIP), }) slog.Info("User logged in", "user", req.Username, "role", role, "connection", conn.Name) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "user": req.Username, "user_role": role, "clickhouse_version": testResult.Version, "expires_at": expiresAt, "connection": map[string]interface{}{ "id": conn.ID, "name": conn.Name, "online": true, }, "session": map[string]interface{}{ "user": req.Username, "role": role, "connectionId": conn.ID, "connectionName": conn.Name, "connectionOnline": true, "expiresAt": expiresAt, "version": testResult.Version, "appVersion": version.Version, }, }) } // ---------- POST /logout ---------- func (h *AuthHandler) Logout(w http.ResponseWriter, r *http.Request) { cookie, err := r.Cookie(SessionCookie) if err != nil || cookie.Value == "" { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) return } session, _ := h.DB.GetSession(cookie.Value) if err := h.DB.DeleteSession(cookie.Value); err != nil { slog.Error("Failed to delete session", "error", err) } http.SetCookie(w, &http.Cookie{ Name: SessionCookie, Value: "", Path: "/", MaxAge: -1, HttpOnly: true, Secure: shouldUseSecureCookie(r, h.Config), SameSite: http.SameSiteLaxMode, }) if session != nil { clientIP := getClientIP(r) h.DB.CreateAuditLog(database.AuditLogParams{ Action: "user.logout", Username: strPtr(session.ClickhouseUser), IPAddress: strPtr(clientIP), }) slog.Info("User logged out", "user", session.ClickhouseUser) } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // ---------- GET /session ---------- func (h *AuthHandler) Session(w http.ResponseWriter, r *http.Request) { cookie, err := r.Cookie(SessionCookie) if err != nil || cookie.Value == "" { writeJSON(w, http.StatusOK, map[string]interface{}{ "authenticated": false, }) return } session, err := h.DB.GetSession(cookie.Value) if err != nil { slog.Error("Failed to get session", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Session lookup failed"}) return } if session == nil { http.SetCookie(w, &http.Cookie{ Name: SessionCookie, Value: "", Path: "/", MaxAge: -1, HttpOnly: true, Secure: shouldUseSecureCookie(r, h.Config), SameSite: http.SameSiteLaxMode, }) writeJSON(w, http.StatusOK, map[string]interface{}{ "authenticated": false, }) return } connOnline := h.Gateway.IsTunnelOnline(session.ConnectionID) connName := "" tc, _ := h.DB.GetConnectionByID(session.ConnectionID) if tc != nil { connName = tc.Name } role := "viewer" overrideRole, roleErr := h.DB.GetUserRole(session.ClickhouseUser) if roleErr != nil { slog.Warn("Failed to resolve explicit role for session", "user", session.ClickhouseUser, "error", roleErr) } else if overrideRole != "" { role = overrideRole } else if session.UserRole != nil { role = *session.UserRole } writeJSON(w, http.StatusOK, map[string]interface{}{ "authenticated": true, "user": session.ClickhouseUser, "user_role": role, "expires_at": session.ExpiresAt, "connection": map[string]interface{}{ "id": session.ConnectionID, "name": connName, "online": connOnline, }, "session": map[string]interface{}{ "user": session.ClickhouseUser, "role": role, "connectionId": session.ConnectionID, "connectionName": connName, "connectionOnline": connOnline, "expiresAt": session.ExpiresAt, "appVersion": version.Version, }, }) } // ---------- GET /connections ---------- func (h *AuthHandler) Connections(w http.ResponseWriter, r *http.Request) { connections, err := h.DB.GetConnections() if err != nil { slog.Error("Failed to get connections", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connections"}) return } result := make([]connectionInfo, 0, len(connections)) for _, c := range connections { result = append(result, connectionInfo{ ID: c.ID, Name: c.Name, Status: c.Status, Online: h.Gateway.IsTunnelOnline(c.ID), LastSeenAt: c.LastSeenAt, CreatedAt: c.CreatedAt, }) } writeJSON(w, http.StatusOK, map[string]interface{}{ "connections": result, }) } // ---------- POST /switch-connection ---------- func (h *AuthHandler) SwitchConnection(w http.ResponseWriter, r *http.Request) { cookie, err := r.Cookie(SessionCookie) if err != nil || cookie.Value == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } existingSession, err := h.DB.GetSession(cookie.Value) if err != nil || existingSession == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Session expired or invalid"}) return } var req switchConnectionRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } req.ConnectionID = req.resolvedConnectionID() if strings.TrimSpace(req.Username) == "" { req.Username = existingSession.ClickhouseUser } req.Username = strings.TrimSpace(req.Username) if req.ConnectionID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "connection_id (or connectionId) is required"}) return } newConn, err := h.DB.GetConnectionByID(req.ConnectionID) if err != nil { slog.Error("Failed to get connection", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connection"}) return } if newConn == nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection not found"}) return } // Check tunnel is online (retry up to 3 times). online := false for attempt := 0; attempt < 3; attempt++ { if h.Gateway.IsTunnelOnline(newConn.ID) { online = true break } if attempt < 2 { time.Sleep(500 * time.Millisecond) } } if !online { writeJSON(w, http.StatusServiceUnavailable, map[string]string{ "error": "Connection offline", "message": "The tunnel agent for this connection is not online. Please check that the agent is running.", }) return } testResult, err := h.Gateway.TestConnection(newConn.ID, req.Username, req.Password, 15*time.Second) if err != nil { slog.Info("Switch connection failed: test error", "user", req.Username, "error", err) writeJSON(w, http.StatusUnauthorized, map[string]string{ "error": "Authentication failed", "message": sanitizeClickHouseAuthMessage(err.Error()), }) return } if !testResult.Success { errMsg := testResult.Error if errMsg == "" { errMsg = "Invalid credentials" } writeJSON(w, http.StatusUnauthorized, map[string]string{ "error": "Authentication failed", "message": sanitizeClickHouseAuthMessage(errMsg), }) return } clientIP := getClientIP(r) role := h.resolveUserRole(newConn.ID, req.Username, req.Password, clientIP) encryptedPwd, err := crypto.Encrypt(req.Password, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to encrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Internal server error"}) return } if err := h.DB.DeleteSession(cookie.Value); err != nil { slog.Error("Failed to delete old session", "error", err) } token := uuid.NewString() expiresAt := time.Now().UTC().Add(SessionDuration).Format(time.RFC3339) _, err = h.DB.CreateSession(database.CreateSessionParams{ ConnectionID: newConn.ID, ClickhouseUser: req.Username, EncryptedPassword: encryptedPwd, Token: token, ExpiresAt: expiresAt, UserRole: role, }) if err != nil { slog.Error("Failed to create session", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create session"}) return } http.SetCookie(w, &http.Cookie{ Name: SessionCookie, Value: token, Path: "/", MaxAge: int(SessionDuration.Seconds()), HttpOnly: true, Secure: shouldUseSecureCookie(r, h.Config), SameSite: http.SameSiteLaxMode, }) h.DB.CreateAuditLog(database.AuditLogParams{ Action: "user.switch_connection", Username: strPtr(req.Username), ConnectionID: strPtr(newConn.ID), Details: strPtr(fmt.Sprintf("Switched to connection %s (role: %s)", newConn.Name, role)), IPAddress: strPtr(clientIP), }) slog.Info("User switched connection", "user", req.Username, "connection", newConn.Name, "role", role) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "user": req.Username, "user_role": role, "clickhouse_version": testResult.Version, "expires_at": expiresAt, "connection": map[string]interface{}{ "id": newConn.ID, "name": newConn.Name, "online": true, }, "session": map[string]interface{}{ "user": req.Username, "role": role, "connectionId": newConn.ID, "connectionName": newConn.Name, "connectionOnline": true, "expiresAt": expiresAt, "version": testResult.Version, "appVersion": version.Version, }, }) } // ---------- ClickHouse role detection ---------- func (h *AuthHandler) detectClickHouseRole(connectionID, username, password string) string { var err error _, err = h.Gateway.ExecuteQuery( connectionID, "SELECT 1 FROM system.users LIMIT 1", username, password, 10*time.Second, ) if err == nil { slog.Debug("Role detected as admin (system.users accessible)", "user", username) return "admin" } errStr := err.Error() if !isPermissionError(errStr) { slog.Debug("Role defaulting to viewer (non-permission error from system.users)", "user", username, "error", errStr) return "viewer" } result, err := h.Gateway.ExecuteQuery( connectionID, fmt.Sprintf("SELECT access_type FROM system.grants WHERE user_name = '%s'", escapeSingleQuotes(username)), username, password, 10*time.Second, ) if err != nil { slog.Debug("Role defaulting to viewer (system.grants query failed)", "user", username, "error", err) return "viewer" } role := classifyGrants(result) slog.Debug("Role detected from grants", "user", username, "role", role) return role } func (h *AuthHandler) resolveUserRole(connectionID, username, password, clientIP string) string { manualRole, err := h.DB.GetUserRole(username) if err == nil && manualRole != "" { slog.Debug("Using manually assigned role", "user", username, "role", manualRole) return manualRole } if err != nil { slog.Warn("Failed to read manual role override", "user", username, "error", err) } detectedRole := h.detectClickHouseRole(connectionID, username, password) if detectedRole != "admin" { return detectedRole } adminCount, err := h.DB.CountUsersWithRole("admin") if err != nil { slog.Warn("Failed to count admin overrides; denying implicit admin", "user", username, "error", err) return "viewer" } if adminCount == 0 { if err := h.DB.SetUserRole(username, "admin"); err != nil { slog.Warn("Failed to bootstrap first admin role", "user", username, "error", err) return "viewer" } _ = h.DB.CreateAuditLog(database.AuditLogParams{ Action: "user_role.bootstrap_admin", Username: strPtr(username), Details: strPtr("Automatically granted first explicit admin role from ClickHouse admin login"), IPAddress: strPtr(clientIP), ConnectionID: strPtr(connectionID), }) slog.Info("Bootstrapped first explicit admin role", "user", username) return "admin" } slog.Info("Ignoring implicit ClickHouse admin privileges without explicit CH-UI admin override", "user", username) return "viewer" } func classifyGrants(result *tunnel.QueryResult) string { if result == nil || len(result.Data) == 0 { return "viewer" } var rows []map[string]interface{} if err := json.Unmarshal(result.Data, &rows); err != nil { return "viewer" } adminGrants := map[string]bool{ "ALL": true, "CREATE": true, "CREATE DATABASE": true, "CREATE TABLE": true, "ALTER": true, "DROP": true, "SYSTEM": true, } analystGrants := map[string]bool{ "INSERT": true, "DELETE": true, "ALTER TABLE": true, "CREATE TEMPORARY TABLE": true, } hasAdmin := false hasAnalyst := false for _, row := range rows { accessType, ok := row["access_type"].(string) if !ok { continue } upper := strings.ToUpper(strings.TrimSpace(accessType)) if adminGrants[upper] { hasAdmin = true } if analystGrants[upper] { hasAnalyst = true } } if hasAdmin { return "admin" } if hasAnalyst { return "analyst" } return "viewer" } func isPermissionError(errStr string) bool { lower := strings.ToLower(errStr) return strings.Contains(lower, "access_denied") || strings.Contains(lower, "access denied") || strings.Contains(lower, "not enough privileges") || strings.Contains(lower, "permission denied") || strings.Contains(lower, "code: 497") } func escapeSingleQuotes(s string) string { // ClickHouse uses '' (doubled single-quote) to escape, not \' // Also escape backslashes to prevent escape-sequence bypasses s = strings.ReplaceAll(s, "\\", "\\\\") return strings.ReplaceAll(s, "'", "''") } // ---------- helpers ---------- func getClientIP(r *http.Request) string { // Only trust proxy headers if the request appears to come through a reverse proxy. // Check X-Forwarded-For only when a proxy indicator is present (TLS termination or // the presence of X-Forwarded-Proto, which is typically set by trusted proxies). if r.Header.Get("X-Forwarded-Proto") != "" || r.TLS != nil { if xff := r.Header.Get("X-Forwarded-For"); xff != "" { // Take the leftmost (client) IP — the rightmost entries are added by proxies parts := strings.SplitN(xff, ",", 2) ip := strings.TrimSpace(parts[0]) if ip != "" { return ip } } if xri := r.Header.Get("X-Real-IP"); xri != "" { return strings.TrimSpace(xri) } } addr := r.RemoteAddr if idx := strings.LastIndex(addr, ":"); idx != -1 { return addr[:idx] } return addr } ================================================ FILE: internal/server/handlers/auth_helpers_test.go ================================================ package handlers import "testing" func TestUserRateLimitKeyScopedByConnection(t *testing.T) { k1 := userRateLimitKey("Default", "conn-a") k2 := userRateLimitKey("default", "conn-b") if k1 == k2 { t.Fatalf("user rate limit key must include connection scope") } if k1 != "user:default:conn-a" { t.Fatalf("unexpected normalized key: %s", k1) } } func TestSanitizeClickHouseAuthMessage(t *testing.T) { tests := []struct { name string raw string want string }{ {name: "credentials", raw: "Code: 516. DB::Exception: Authentication failed", want: "Invalid credentials"}, {name: "network", raw: "dial tcp 127.0.0.1:8123: connection refused", want: "Connection to ClickHouse failed"}, {name: "empty", raw: "", want: "Invalid credentials"}, {name: "fallback", raw: "unexpected upstream response", want: "Authentication failed"}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { got := sanitizeClickHouseAuthMessage(tc.raw) if got != tc.want { t.Fatalf("unexpected sanitized message: got %q want %q", got, tc.want) } }) } } ================================================ FILE: internal/server/handlers/brain.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "regexp" "strings" "time" braincore "github.com/caioricciuti/ch-ui/internal/brain" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/langfuse" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" "github.com/caioricciuti/ch-ui/internal/version" "github.com/go-chi/chi/v5" "github.com/google/uuid" ) const baseBrainPrompt = `You are Brain, an expert ClickHouse assistant for analytics teams. Core behavior: - Prioritize correctness over verbosity. - Provide SQL first when the user asks for query help. - Keep default queries safe: LIMIT 100 for exploratory SELECT queries. - Ask one concise clarification if schema/context is insufficient. - Reuse prior chat context and artifacts when relevant. Output style: 1) One sentence acknowledging intent. 2) SQL in a fenced sql block when applicable. 3) Short explanation and optional alternatives.` // BrainHandler handles Brain chat, persistence, and artifacts. type BrainHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config Langfuse *langfuse.Client // nil when disabled } func (h *BrainHandler) Routes(r chi.Router) { r.Get("/models", h.ListModels) r.Get("/skills", h.GetSkill) r.Get("/chats", h.ListChats) r.Post("/chats", h.CreateChat) r.Get("/chats/{chatID}", h.GetChat) r.Put("/chats/{chatID}", h.UpdateChat) r.Delete("/chats/{chatID}", h.DeleteChat) r.Get("/chats/{chatID}/messages", h.ListMessages) r.Post("/chats/{chatID}/messages/stream", h.StreamMessage) r.Get("/chats/{chatID}/artifacts", h.ListArtifacts) r.Post("/chats/{chatID}/artifacts/query", h.RunQueryArtifact) // Legacy endpoint kept for compatibility with older UI. r.Post("/chat", h.LegacyChat) } type schemaColumn struct { Name string `json:"name"` Type string `json:"type"` } type schemaContext struct { Database string `json:"database"` Table string `json:"table"` Columns []schemaColumn `json:"columns"` SampleData interface{} `json:"sampleData"` } type createChatRequest struct { Title string `json:"title"` ModelID string `json:"modelId"` } type updateChatRequest struct { Title *string `json:"title"` Archived *bool `json:"archived"` ModelID *string `json:"modelId"` ContextDatabase *string `json:"contextDatabase"` ContextTable *string `json:"contextTable"` ContextTables *string `json:"contextTables"` } type streamMessageRequest struct { Content string `json:"content"` ModelID string `json:"modelId"` SchemaContext *schemaContext `json:"schemaContext,omitempty"` SchemaContexts []schemaContext `json:"schemaContexts,omitempty"` } type runQueryArtifactRequest struct { Query string `json:"query"` Title string `json:"title"` MessageID string `json:"messageId"` Timeout int `json:"timeout"` } func (h *BrainHandler) ListModels(w http.ResponseWriter, r *http.Request) { models, err := h.DB.GetBrainModelsWithProvider(true) if err != nil { slog.Error("Failed to list Brain models", "error", err) writeError(w, http.StatusInternalServerError, "Failed to load models") return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "models": models, }) } func (h *BrainHandler) GetSkill(w http.ResponseWriter, r *http.Request) { skill, err := h.DB.GetActiveBrainSkill() if err != nil { slog.Error("Failed to load active Brain skill", "error", err) writeError(w, http.StatusInternalServerError, "Failed to load active skill") return } if skill == nil { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "skill": nil}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "skill": skill}) } func (h *BrainHandler) ListChats(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } includeArchived := strings.EqualFold(strings.TrimSpace(r.URL.Query().Get("includeArchived")), "true") chats, err := h.DB.GetBrainChatsByUser(session.ClickhouseUser, session.ConnectionID, includeArchived) if err != nil { slog.Error("Failed to list Brain chats", "error", err) writeError(w, http.StatusInternalServerError, "Failed to load chats") return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "chats": chats, }) } func (h *BrainHandler) CreateChat(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var body createChatRequest if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } title := strings.TrimSpace(body.Title) if title == "" { title = "New Chat" } providerID := "" modelID := strings.TrimSpace(body.ModelID) if modelID != "" { rt, err := h.DB.GetBrainModelRuntimeByID(modelID) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to resolve model") return } if rt == nil || !rt.ModelActive || !rt.ProviderActive { writeError(w, http.StatusBadRequest, "Model is not available") return } providerID = rt.ProviderID } chatID, err := h.DB.CreateBrainChat(session.ClickhouseUser, session.ConnectionID, title, providerID, modelID, "", "", "") if err != nil { slog.Error("Failed to create Brain chat", "error", err) writeError(w, http.StatusInternalServerError, "Failed to create chat") return } chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil || chat == nil { writeJSON(w, http.StatusCreated, map[string]interface{}{"success": true, "id": chatID}) return } writeJSON(w, http.StatusCreated, map[string]interface{}{"success": true, "chat": chat}) } func (h *BrainHandler) GetChat(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "chat": chat}) } func (h *BrainHandler) UpdateChat(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } var body updateChatRequest if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } title := chat.Title if body.Title != nil { if strings.TrimSpace(*body.Title) != "" { title = strings.TrimSpace(*body.Title) } } archived := chat.Archived if body.Archived != nil { archived = *body.Archived } providerID := "" if chat.ProviderID != nil { providerID = *chat.ProviderID } modelID := "" if chat.ModelID != nil { modelID = *chat.ModelID } if body.ModelID != nil { modelID = strings.TrimSpace(*body.ModelID) providerID = "" if modelID != "" { rt, err := h.DB.GetBrainModelRuntimeByID(modelID) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to resolve model") return } if rt == nil || !rt.ModelActive || !rt.ProviderActive { writeError(w, http.StatusBadRequest, "Model is not available") return } providerID = rt.ProviderID } } contextDatabase := "" if chat.ContextDatabase != nil { contextDatabase = *chat.ContextDatabase } contextTable := "" if chat.ContextTable != nil { contextTable = *chat.ContextTable } contextTables := "" if chat.ContextTables != nil { contextTables = *chat.ContextTables } if body.ContextDatabase != nil { contextDatabase = strings.TrimSpace(*body.ContextDatabase) } if body.ContextTable != nil { contextTable = strings.TrimSpace(*body.ContextTable) } // If database changes but table wasn't explicitly set, clear table if body.ContextDatabase != nil && body.ContextTable == nil { contextTable = "" } if body.ContextTables != nil { contextTables = strings.TrimSpace(*body.ContextTables) // When using new multi-context format, clear legacy fields if contextTables != "" { contextDatabase = "" contextTable = "" } } if err := h.DB.UpdateBrainChat(chatID, title, providerID, modelID, archived, contextDatabase, contextTable, contextTables); err != nil { slog.Error("Failed to update Brain chat", "error", err) writeError(w, http.StatusInternalServerError, "Failed to update chat") return } updated, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil || updated == nil { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "chat": updated}) } func (h *BrainHandler) DeleteChat(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } if err := h.DB.DeleteBrainChat(chatID); err != nil { writeError(w, http.StatusInternalServerError, "Failed to delete chat") return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *BrainHandler) ListMessages(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } messages, err := h.DB.GetBrainMessages(chatID) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load messages") return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "messages": messages}) } func (h *BrainHandler) ListArtifacts(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } artifacts, err := h.DB.GetBrainArtifacts(chatID) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load artifacts") return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "artifacts": artifacts}) } func (h *BrainHandler) RunQueryArtifact(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } var body runQueryArtifactRequest if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(body.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !isBrainReadOnlyQuery(query) { writeError(w, http.StatusBadRequest, "Only read-only queries are allowed in Brain chat artifacts") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } timeout := 30 * time.Second if body.Timeout > 0 { timeout = time.Duration(body.Timeout) * time.Second } if timeout > 5*time.Minute { timeout = 5 * time.Minute } result, err := h.Gateway.ExecuteQuery(session.ConnectionID, query, session.ClickhouseUser, password, timeout) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } artifactPayload, _ := json.Marshal(map[string]interface{}{ "query": query, "data": json.RawMessage(result.Data), "meta": json.RawMessage(result.Meta), "statistics": json.RawMessage(result.Stats), }) title := strings.TrimSpace(body.Title) if title == "" { title = "Query Result" } artifactID, err := h.DB.CreateBrainArtifact(chatID, body.MessageID, "query_result", title, string(artifactPayload), session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to store artifact") return } toolInput, _ := json.Marshal(map[string]interface{}{"query": query}) toolOutput, _ := json.Marshal(map[string]interface{}{"artifact_id": artifactID}) if strings.TrimSpace(body.MessageID) != "" { _, _ = h.DB.CreateBrainToolCall(chatID, body.MessageID, "run_readonly_query", string(toolInput), string(toolOutput), "success", "") } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.query.run", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(title), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "artifact_id": artifactID, "result": map[string]interface{}{ "data": result.Data, "meta": result.Meta, "stats": result.Stats, }, }) } func (h *BrainHandler) StreamMessage(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } chatID := chi.URLParam(r, "chatID") chat, err := h.DB.GetBrainChatByIDForUser(chatID, session.ClickhouseUser) if err != nil { writeError(w, http.StatusInternalServerError, "Failed to load chat") return } if chat == nil { writeError(w, http.StatusNotFound, "Chat not found") return } var body streamMessageRequest if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } prompt := strings.TrimSpace(body.Content) if prompt == "" { writeError(w, http.StatusBadRequest, "Message content is required") return } userMessageID, err := h.DB.CreateBrainMessage(chatID, "user", prompt, "complete", "") if err != nil { writeError(w, http.StatusInternalServerError, "Failed to persist user message") return } assistantMessageID, err := h.DB.CreateBrainMessage(chatID, "assistant", "", "streaming", "") if err != nil { writeError(w, http.StatusInternalServerError, "Failed to persist assistant message") return } runtimeModel, err := h.resolveRuntimeModel(chat, strings.TrimSpace(body.ModelID)) if err != nil { _ = h.DB.UpdateBrainMessage(assistantMessageID, "", "error", err.Error()) writeError(w, http.StatusBadRequest, err.Error()) return } provider, err := braincore.NewProvider(runtimeModel.ProviderKind) if err != nil { _ = h.DB.UpdateBrainMessage(assistantMessageID, "", "error", err.Error()) writeError(w, http.StatusBadRequest, err.Error()) return } providerCfg := braincore.ProviderConfig{ Kind: runtimeModel.ProviderKind, } if runtimeModel.ProviderBaseURL != nil { providerCfg.BaseURL = *runtimeModel.ProviderBaseURL } if runtimeModel.ProviderEncryptedKey != nil { decrypted, decErr := crypto.Decrypt(*runtimeModel.ProviderEncryptedKey, h.Config.AppSecretKey) if decErr != nil { _ = h.DB.UpdateBrainMessage(assistantMessageID, "", "error", "Failed to decrypt provider API key") writeError(w, http.StatusInternalServerError, "Failed to decrypt provider API key") return } providerCfg.APIKey = decrypted } history, err := h.DB.GetBrainMessages(chatID) if err != nil { _ = h.DB.UpdateBrainMessage(assistantMessageID, "", "error", "Failed to load chat history") writeError(w, http.StatusInternalServerError, "Failed to load chat history") return } // Merge single SchemaContext (legacy) with SchemaContexts array var allContexts []schemaContext if body.SchemaContext != nil { allContexts = append(allContexts, *body.SchemaContext) } for _, sc := range body.SchemaContexts { // Dedupe: skip if already present from legacy field dup := false for _, existing := range allContexts { if existing.Database == sc.Database && existing.Table == sc.Table { dup = true break } } if !dup { allContexts = append(allContexts, sc) } } providerMessages := make([]braincore.Message, 0, len(history)+1) systemPrompt := h.buildSystemPrompt(allContexts) providerMessages = append(providerMessages, braincore.Message{Role: "system", Content: systemPrompt}) for _, msg := range history { role := strings.TrimSpace(strings.ToLower(msg.Role)) if role != "user" && role != "assistant" { continue } if strings.TrimSpace(msg.Content) == "" { continue } if msg.Status == "error" { continue } providerMessages = append(providerMessages, braincore.Message{Role: role, Content: msg.Content}) } w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") flusher, ok := w.(http.Flusher) if !ok { _ = h.DB.UpdateBrainMessage(assistantMessageID, "", "error", "Streaming not supported") writeError(w, http.StatusInternalServerError, "Streaming not supported") return } var built strings.Builder streamStart := time.Now() chatResult, streamErr := provider.StreamChat(r.Context(), providerCfg, runtimeModel.ModelName, providerMessages, func(delta string) error { if delta == "" { return nil } built.WriteString(delta) return writeSSE(w, flusher, map[string]interface{}{"type": "delta", "delta": delta, "messageId": assistantMessageID}) }) streamEnd := time.Now() if streamErr != nil { errMessage := streamErr.Error() if errMessage == "" { errMessage = "Unknown provider error" } modelParameters := brainModelParameters(chatResult, runtimeModel.ProviderKind, runtimeModel.ModelName) _ = h.DB.UpdateBrainMessage(assistantMessageID, built.String(), "error", errMessage) _ = writeSSE(w, flusher, map[string]interface{}{"type": "error", "error": errMessage, "messageId": assistantMessageID}) // Langfuse: trace error if h.Langfuse.IsEnabled() { traceID := uuid.NewString() h.Langfuse.LogTrace(langfuse.TraceParams{ ID: traceID, Name: "brain.chat", UserID: session.ClickhouseUser, SessionID: chatID, Input: prompt, Release: version.Version, Tags: []string{runtimeModel.ProviderKind, runtimeModel.ModelName, "brain", "error"}, Metadata: map[string]string{"connection_id": session.ConnectionID}, }) h.Langfuse.LogGeneration(langfuse.GenerationParams{ ID: uuid.NewString(), TraceID: traceID, Name: "StreamChat", Model: runtimeModel.ModelName, ModelParameters: modelParameters, Input: providerMessages, Output: errMessage, StartTime: streamStart, EndTime: streamEnd, Level: "ERROR", }) h.Langfuse.LogEvent(langfuse.EventParams{ TraceID: traceID, Name: "stream_error", Input: errMessage, Level: "ERROR", }) } return } assistantText := built.String() if strings.TrimSpace(assistantText) == "" { assistantText = "I could not generate a response for that prompt." } if err := h.DB.UpdateBrainMessage(assistantMessageID, assistantText, "complete", ""); err != nil { slog.Warn("Failed to persist assistant message", "error", err) } if err := h.DB.TouchBrainChat(chatID); err != nil { slog.Warn("Failed to update chat activity", "error", err) } title := chat.Title if title == "New Chat" || strings.TrimSpace(title) == "" { title = autoTitle(prompt) } streamCtxDB := "" if chat.ContextDatabase != nil { streamCtxDB = *chat.ContextDatabase } streamCtxTable := "" if chat.ContextTable != nil { streamCtxTable = *chat.ContextTable } streamCtxTables := "" if chat.ContextTables != nil { streamCtxTables = *chat.ContextTables } if err := h.DB.UpdateBrainChat(chatID, title, runtimeModel.ProviderID, runtimeModel.ModelID, chat.Archived, streamCtxDB, streamCtxTable, streamCtxTables); err != nil { slog.Warn("Failed to update chat title/model", "error", err) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "brain.chat", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("chat=%s user_msg=%s", chatID, userMessageID)), IPAddress: strPtr(r.RemoteAddr), }) // Langfuse: trace + generation + auto-scores if h.Langfuse.IsEnabled() { traceID := uuid.NewString() modelParameters := brainModelParameters(chatResult, runtimeModel.ProviderKind, runtimeModel.ModelName) metadata := map[string]string{ "connection_id": session.ConnectionID, "provider_kind": runtimeModel.ProviderKind, } if streamCtxDB != "" { metadata["schema_database"] = streamCtxDB } if streamCtxTable != "" { metadata["schema_table"] = streamCtxTable } h.Langfuse.LogTrace(langfuse.TraceParams{ ID: traceID, Name: "brain.chat", UserID: session.ClickhouseUser, SessionID: chatID, Input: prompt, Output: assistantText, Release: version.Version, Tags: []string{runtimeModel.ProviderKind, runtimeModel.ModelName, "brain"}, Metadata: metadata, }) genParams := langfuse.GenerationParams{ ID: uuid.NewString(), TraceID: traceID, Name: "StreamChat", Model: runtimeModel.ModelName, ModelParameters: modelParameters, Input: providerMessages, Output: assistantText, StartTime: streamStart, EndTime: streamEnd, } if chatResult != nil && (chatResult.InputTokens > 0 || chatResult.OutputTokens > 0) { genParams.Usage = &langfuse.Usage{ Input: chatResult.InputTokens, Output: chatResult.OutputTokens, Total: chatResult.InputTokens + chatResult.OutputTokens, } } h.Langfuse.LogGeneration(genParams) // Auto-scores latencyMs := float64(streamEnd.Sub(streamStart).Milliseconds()) h.Langfuse.LogScore(langfuse.ScoreParams{ TraceID: traceID, Name: "latency_ms", Value: latencyMs, DataType: "NUMERIC", }) hasSQLVal := 0.0 if containsSQL(assistantText) { hasSQLVal = 1.0 } h.Langfuse.LogScore(langfuse.ScoreParams{ TraceID: traceID, Name: "has_sql", Value: hasSQLVal, DataType: "BOOLEAN", }) if chatResult != nil && chatResult.InputTokens > 0 { efficiency := float64(chatResult.OutputTokens) / float64(chatResult.InputTokens) h.Langfuse.LogScore(langfuse.ScoreParams{ TraceID: traceID, Name: "token_efficiency", Value: efficiency, DataType: "NUMERIC", Comment: "output_tokens / input_tokens", }) } } _ = writeSSE(w, flusher, map[string]interface{}{"type": "done", "messageId": assistantMessageID, "chatId": chatID}) } func (h *BrainHandler) LegacyChat(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req struct { Messages []struct { Role string `json:"role"` Content string `json:"content"` } `json:"messages"` SchemaContext *schemaContext `json:"schemaContext,omitempty"` } if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } if len(req.Messages) == 0 { writeError(w, http.StatusBadRequest, "Messages are required") return } rt, err := h.DB.GetDefaultBrainModelRuntime() if err != nil { writeError(w, http.StatusInternalServerError, "Failed to resolve model") return } if rt == nil { writeError(w, http.StatusBadRequest, "No active AI model configured by admin") return } provider, err := braincore.NewProvider(rt.ProviderKind) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } cfg := braincore.ProviderConfig{Kind: rt.ProviderKind} if rt.ProviderBaseURL != nil { cfg.BaseURL = *rt.ProviderBaseURL } if rt.ProviderEncryptedKey != nil { decrypted, decErr := crypto.Decrypt(*rt.ProviderEncryptedKey, h.Config.AppSecretKey) if decErr != nil { writeError(w, http.StatusInternalServerError, "Failed to decrypt provider API key") return } cfg.APIKey = decrypted } var legacyContexts []schemaContext if req.SchemaContext != nil { legacyContexts = append(legacyContexts, *req.SchemaContext) } messages := make([]braincore.Message, 0, len(req.Messages)+1) messages = append(messages, braincore.Message{Role: "system", Content: h.buildSystemPrompt(legacyContexts)}) for _, msg := range req.Messages { role := strings.ToLower(strings.TrimSpace(msg.Role)) if role != "user" && role != "assistant" { continue } messages = append(messages, braincore.Message{Role: role, Content: msg.Content}) } w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") flusher, ok := w.(http.Flusher) if !ok { writeError(w, http.StatusInternalServerError, "Streaming not supported") return } var built strings.Builder streamStart := time.Now() chatResult, streamErr := provider.StreamChat(r.Context(), cfg, rt.ModelName, messages, func(delta string) error { built.WriteString(delta) return writeSSE(w, flusher, map[string]interface{}{"type": "delta", "delta": delta}) }) streamEnd := time.Now() if streamErr != nil { modelParameters := brainModelParameters(chatResult, rt.ProviderKind, rt.ModelName) _ = writeSSE(w, flusher, map[string]interface{}{"type": "error", "error": streamErr.Error()}) // Langfuse: trace legacy error if h.Langfuse.IsEnabled() { traceID := uuid.NewString() h.Langfuse.LogTrace(langfuse.TraceParams{ ID: traceID, Name: "brain.legacy_chat", UserID: session.ClickhouseUser, Release: version.Version, Tags: []string{rt.ProviderKind, rt.ModelName, "brain", "legacy", "error"}, }) h.Langfuse.LogGeneration(langfuse.GenerationParams{ ID: uuid.NewString(), TraceID: traceID, Name: "StreamChat", Model: rt.ModelName, Input: messages, Output: streamErr.Error(), ModelParameters: modelParameters, StartTime: streamStart, EndTime: streamEnd, Level: "ERROR", }) } return } // Langfuse: trace legacy success if h.Langfuse.IsEnabled() { traceID := uuid.NewString() modelParameters := brainModelParameters(chatResult, rt.ProviderKind, rt.ModelName) h.Langfuse.LogTrace(langfuse.TraceParams{ ID: traceID, Name: "brain.legacy_chat", UserID: session.ClickhouseUser, Release: version.Version, Tags: []string{rt.ProviderKind, rt.ModelName, "brain", "legacy"}, }) genParams := langfuse.GenerationParams{ ID: uuid.NewString(), TraceID: traceID, Name: "StreamChat", Model: rt.ModelName, ModelParameters: modelParameters, Input: messages, Output: built.String(), StartTime: streamStart, EndTime: streamEnd, } if chatResult != nil && (chatResult.InputTokens > 0 || chatResult.OutputTokens > 0) { genParams.Usage = &langfuse.Usage{ Input: chatResult.InputTokens, Output: chatResult.OutputTokens, Total: chatResult.InputTokens + chatResult.OutputTokens, } } h.Langfuse.LogGeneration(genParams) latencyMs := float64(streamEnd.Sub(streamStart).Milliseconds()) h.Langfuse.LogScore(langfuse.ScoreParams{ TraceID: traceID, Name: "latency_ms", Value: latencyMs, DataType: "NUMERIC", }) } _ = writeSSE(w, flusher, map[string]interface{}{"type": "done"}) } func brainModelParameters(result *braincore.ChatResult, providerKind, model string) map[string]interface{} { if result != nil && result.ModelParameters != nil { return result.ModelParameters } return braincore.DefaultModelParameters(providerKind, model) } func (h *BrainHandler) resolveRuntimeModel(chat *database.BrainChat, requestedModelID string) (*database.BrainModelRuntime, error) { if requestedModelID != "" { rt, err := h.DB.GetBrainModelRuntimeByID(requestedModelID) if err != nil { return nil, fmt.Errorf("failed to resolve model") } if rt == nil || !rt.ModelActive || !rt.ProviderActive { return nil, fmt.Errorf("selected model is not active") } return rt, nil } if chat != nil && chat.ModelID != nil && strings.TrimSpace(*chat.ModelID) != "" { rt, err := h.DB.GetBrainModelRuntimeByID(*chat.ModelID) if err == nil && rt != nil && rt.ModelActive && rt.ProviderActive { return rt, nil } } rt, err := h.DB.GetDefaultBrainModelRuntime() if err != nil { return nil, fmt.Errorf("failed to load default model") } if rt == nil { return nil, fmt.Errorf("no active AI model configured by admin") } return rt, nil } func (h *BrainHandler) buildSystemPrompt(contexts []schemaContext) string { skillPrompt := "" skill, err := h.DB.GetActiveBrainSkill() if err == nil && skill != nil { skillPrompt = strings.TrimSpace(skill.Content) } prompt := baseBrainPrompt if skillPrompt != "" { prompt += "\n\nActive skills:\n" + skillPrompt } if len(contexts) > 0 { prompt += buildMultiSchemaPrompt(contexts) } return prompt } func buildMultiSchemaPrompt(contexts []schemaContext) string { var sb strings.Builder sb.WriteString("\n\nSchema context:\n") for i, sc := range contexts { if i > 0 { sb.WriteString("\n") } label := "" if sc.Database != "" && sc.Table != "" { label = sc.Database + "." + sc.Table } else if sc.Database != "" { label = sc.Database } else if sc.Table != "" { label = sc.Table } sb.WriteString(fmt.Sprintf("Table %d: %s\n", i+1, label)) if len(sc.Columns) > 0 { sb.WriteString("Columns:\n") for _, col := range sc.Columns { sb.WriteString("- " + col.Name + " (" + col.Type + ")\n") } } } return sb.String() } func writeSSE(w http.ResponseWriter, flusher http.Flusher, payload map[string]interface{}) error { b, err := json.Marshal(payload) if err != nil { return err } if _, err := fmt.Fprintf(w, "data: %s\n\n", b); err != nil { return err } flusher.Flush() return nil } func autoTitle(prompt string) string { t := strings.TrimSpace(prompt) if t == "" { return "New Chat" } if len(t) <= 48 { return t } return strings.TrimSpace(t[:48]) + "..." } func isBrainReadOnlyQuery(query string) bool { re := regexp.MustCompile(`(?is)^\s*(SELECT|WITH|SHOW|DESC|DESCRIBE|EXPLAIN)\b`) return re.MatchString(query) } var sqlBlockPattern = regexp.MustCompile("(?i)```sql") func containsSQL(text string) bool { return sqlBlockPattern.MatchString(text) } ================================================ FILE: internal/server/handlers/connections.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "strings" "time" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/license" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // ConnectionsHandler handles connection management routes. type ConnectionsHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config } // connectionResponse extends Connection with live status information. type connectionResponse struct { database.Connection Online bool `json:"online"` LastSeen *time.Time `json:"last_seen,omitempty"` HostInfo any `json:"host_info,omitempty"` } // List returns all connections. // GET / func (h *ConnectionsHandler) List(w http.ResponseWriter, r *http.Request) { conns, err := h.DB.GetConnections() if err != nil { slog.Error("Failed to list connections", "error", err) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connections"}) return } results := make([]connectionResponse, 0, len(conns)) for _, c := range conns { results = append(results, h.buildConnectionResponse(c)) } connJSON(w, http.StatusOK, results) } // Get returns a single connection by ID. // GET /{id} func (h *ConnectionsHandler) Get(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection ID is required"}) return } conn, err := h.DB.GetConnectionByID(id) if err != nil { slog.Error("Failed to get connection", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connection"}) return } if conn == nil { connJSON(w, http.StatusNotFound, map[string]string{"error": "Connection not found"}) return } connJSON(w, http.StatusOK, h.buildConnectionResponse(*conn)) } // Create creates a new connection. // POST / func (h *ConnectionsHandler) Create(w http.ResponseWriter, r *http.Request) { var body struct { Name string `json:"name"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) if name == "" { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection name is required"}) return } token := license.GenerateTunnelToken() id, err := h.DB.CreateConnection(name, token, false) if err != nil { slog.Error("Failed to create connection", "error", err) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create connection"}) return } session := middleware.GetSession(r) var username *string if session != nil { username = strPtr(session.ClickhouseUser) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "connection.created", Username: username, ConnectionID: strPtr(id), Details: strPtr(fmt.Sprintf("Created connection %q", name)), IPAddress: strPtr(r.RemoteAddr), }) conn, err := h.DB.GetConnectionByID(id) if err != nil || conn == nil { slog.Error("Failed to retrieve created connection", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Connection created but failed to retrieve"}) return } connJSON(w, http.StatusCreated, map[string]interface{}{ "connection": conn, "tunnel_token": token, "setup_instructions": getSetupInstructions(token), }) } // Delete deletes a connection by ID. // DELETE /{id} func (h *ConnectionsHandler) Delete(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection ID is required"}) return } conn, err := h.DB.GetConnectionByID(id) if err != nil { slog.Error("Failed to get connection for deletion", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connection"}) return } if conn == nil { connJSON(w, http.StatusNotFound, map[string]string{"error": "Connection not found"}) return } if err := h.DB.DeleteConnection(id); err != nil { slog.Error("Failed to delete connection", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete connection"}) return } session := middleware.GetSession(r) var username *string if session != nil { username = strPtr(session.ClickhouseUser) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "connection.deleted", Username: username, ConnectionID: strPtr(id), Details: strPtr(fmt.Sprintf("Deleted connection %q", conn.Name)), IPAddress: strPtr(r.RemoteAddr), }) connJSON(w, http.StatusOK, map[string]string{"message": "Connection deleted successfully"}) } // TestConnection tests a ClickHouse connection through the tunnel. // POST /{id}/test func (h *ConnectionsHandler) TestConnection(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection ID is required"}) return } conn, err := h.DB.GetConnectionByID(id) if err != nil { slog.Error("Failed to get connection for test", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connection"}) return } if conn == nil { connJSON(w, http.StatusNotFound, map[string]string{"error": "Connection not found"}) return } if !h.Gateway.IsTunnelOnline(id) { connJSON(w, http.StatusOK, map[string]interface{}{ "success": false, "error": "Tunnel is not connected. Please ensure the agent is running.", }) return } var body struct { Username string `json:"username"` Password string `json:"password"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } username := strings.TrimSpace(body.Username) password := body.Password if username == "" { username = "default" } result, err := h.Gateway.TestConnection(id, username, password, 15*time.Second) if err != nil { slog.Warn("Connection test failed", "error", err, "id", id) connJSON(w, http.StatusOK, map[string]interface{}{ "success": false, "error": err.Error(), }) return } connJSON(w, http.StatusOK, result) } // GetToken returns the tunnel token for a connection. // GET /{id}/token func (h *ConnectionsHandler) GetToken(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection ID is required"}) return } conn, err := h.DB.GetConnectionByID(id) if err != nil { slog.Error("Failed to get connection for token", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connection"}) return } if conn == nil { connJSON(w, http.StatusNotFound, map[string]string{"error": "Connection not found"}) return } connJSON(w, http.StatusOK, map[string]interface{}{ "tunnel_token": conn.TunnelToken, "setup_instructions": getSetupInstructions(conn.TunnelToken), }) } // RegenerateToken generates a new tunnel token for a connection. // POST /{id}/regenerate-token func (h *ConnectionsHandler) RegenerateToken(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { connJSON(w, http.StatusBadRequest, map[string]string{"error": "Connection ID is required"}) return } conn, err := h.DB.GetConnectionByID(id) if err != nil { slog.Error("Failed to get connection for token regeneration", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve connection"}) return } if conn == nil { connJSON(w, http.StatusNotFound, map[string]string{"error": "Connection not found"}) return } newToken := license.GenerateTunnelToken() if err := h.DB.UpdateConnectionToken(id, newToken); err != nil { slog.Error("Failed to regenerate token", "error", err, "id", id) connJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to regenerate token"}) return } session := middleware.GetSession(r) var username *string if session != nil { username = strPtr(session.ClickhouseUser) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "connection.token_regenerated", Username: username, ConnectionID: strPtr(id), Details: strPtr(fmt.Sprintf("Regenerated token for connection %q", conn.Name)), IPAddress: strPtr(r.RemoteAddr), }) connJSON(w, http.StatusOK, map[string]interface{}{ "tunnel_token": newToken, "setup_instructions": getSetupInstructions(newToken), "message": "Token regenerated successfully. The previous token is now invalid.", }) } // buildConnectionResponse enriches a Connection with live status from the gateway. func (h *ConnectionsHandler) buildConnectionResponse(c database.Connection) connectionResponse { resp := connectionResponse{ Connection: c, } online, lastSeen := h.Gateway.GetTunnelStatus(c.ID) resp.Online = online if online && !lastSeen.IsZero() { resp.LastSeen = &lastSeen } if c.HostInfoJSON != nil && *c.HostInfoJSON != "" { var hostInfo database.HostInfo if err := json.Unmarshal([]byte(*c.HostInfoJSON), &hostInfo); err == nil { resp.HostInfo = hostInfo } } return resp } // getSetupInstructions returns setup instructions for connecting a tunnel. func getSetupInstructions(token string) map[string]string { return map[string]string{ "connect": fmt.Sprintf("ch-ui connect --url /connect --key %s", token), "service": fmt.Sprintf("ch-ui service install --url /connect --key %s", token), } } // connJSON writes a JSON response. func connJSON(w http.ResponseWriter, status int, v interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) json.NewEncoder(w).Encode(v) } ================================================ FILE: internal/server/handlers/dashboards.go ================================================ package handlers import ( "encoding/json" "log/slog" "net/http" "strings" "time" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/queryproc" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // DashboardsHandler handles dashboard and panel CRUD operations. type DashboardsHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config } // Routes returns a chi.Router with all dashboard and panel routes mounted. func (h *DashboardsHandler) Routes() chi.Router { r := chi.NewRouter() r.Get("/", h.ListDashboards) r.Post("/", h.CreateDashboard) r.Post("/query", h.ExecutePanelQuery) r.Route("/{id}", func(r chi.Router) { r.Get("/", h.GetDashboard) r.Put("/", h.UpdateDashboard) r.Delete("/", h.DeleteDashboard) // Panel CRUD r.Post("/panels", h.CreatePanel) r.Put("/panels/{panelId}", h.UpdatePanel) r.Delete("/panels/{panelId}", h.DeletePanel) }) return r } // ---------- Dashboard CRUD ---------- // ListDashboards returns all dashboards. func (h *DashboardsHandler) ListDashboards(w http.ResponseWriter, r *http.Request) { if err := h.DB.EnsureSystemOverviewDashboard(); err != nil { slog.Warn("Failed to ensure default system dashboard", "error", err) } dashboards, err := h.DB.GetDashboards() if err != nil { slog.Error("Failed to list dashboards", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list dashboards"}) return } if dashboards == nil { dashboards = []database.Dashboard{} } writeJSON(w, http.StatusOK, map[string]interface{}{"dashboards": dashboards}) } // GetDashboard returns a single dashboard with all its panels. func (h *DashboardsHandler) GetDashboard(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Dashboard ID is required"}) return } dashboard, err := h.DB.GetDashboardByID(id) if err != nil { slog.Error("Failed to get dashboard", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get dashboard"}) return } if dashboard == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Dashboard not found"}) return } panels, err := h.DB.GetPanelsByDashboard(id) if err != nil { slog.Error("Failed to get panels", "error", err, "dashboard", id) panels = []database.Panel{} } if panels == nil { panels = []database.Panel{} } writeJSON(w, http.StatusOK, map[string]interface{}{ "dashboard": dashboard, "panels": panels, }) } // CreateDashboard creates a new dashboard. func (h *DashboardsHandler) CreateDashboard(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` Description string `json:"description"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } id, err := h.DB.CreateDashboard(name, strings.TrimSpace(body.Description), session.ClickhouseUser) if err != nil { slog.Error("Failed to create dashboard", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create dashboard"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "dashboard.created", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) dashboard, err := h.DB.GetDashboardByID(id) if err != nil || dashboard == nil { writeJSON(w, http.StatusCreated, map[string]string{"id": id}) return } writeJSON(w, http.StatusCreated, map[string]interface{}{"dashboard": dashboard}) } // UpdateDashboard partially updates a dashboard. func (h *DashboardsHandler) UpdateDashboard(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Dashboard ID is required"}) return } existing, err := h.DB.GetDashboardByID(id) if err != nil { slog.Error("Failed to get dashboard for update", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get dashboard"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Dashboard not found"}) return } var body struct { Name *string `json:"name"` Description *string `json:"description"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } name := existing.Name description := "" if existing.Description != nil { description = *existing.Description } changed := false if body.Name != nil { n := strings.TrimSpace(*body.Name) if n == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name cannot be empty"}) return } name = n changed = true } if body.Description != nil { description = strings.TrimSpace(*body.Description) changed = true } if !changed { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "No fields to update"}) return } if err := h.DB.UpdateDashboard(id, name, description); err != nil { slog.Error("Failed to update dashboard", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update dashboard"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "dashboard.updated", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) dashboard, err := h.DB.GetDashboardByID(id) if err != nil || dashboard == nil { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"dashboard": dashboard}) } // DeleteDashboard deletes a dashboard and all its panels. func (h *DashboardsHandler) DeleteDashboard(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Dashboard ID is required"}) return } existing, err := h.DB.GetDashboardByID(id) if err != nil { slog.Error("Failed to get dashboard for delete", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get dashboard"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Dashboard not found"}) return } if err := h.DB.DeleteDashboard(id); err != nil { slog.Error("Failed to delete dashboard", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete dashboard"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "dashboard.deleted", Username: strPtr(session.ClickhouseUser), Details: strPtr(existing.Name), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // ---------- Panel CRUD ---------- // CreatePanel creates a new panel in a dashboard. func (h *DashboardsHandler) CreatePanel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dashboardID := chi.URLParam(r, "id") if dashboardID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Dashboard ID is required"}) return } dashboard, err := h.DB.GetDashboardByID(dashboardID) if err != nil || dashboard == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Dashboard not found"}) return } var body struct { Name string `json:"name"` PanelType string `json:"panel_type"` Query string `json:"query"` ConnectionID string `json:"connection_id"` Config string `json:"config"` LayoutX *int `json:"layout_x"` LayoutY *int `json:"layout_y"` LayoutW *int `json:"layout_w"` LayoutH *int `json:"layout_h"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } query := strings.TrimSpace(body.Query) if query == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query is required"}) return } panelType := strings.TrimSpace(body.PanelType) if panelType == "" { panelType = "table" } connectionID := strings.TrimSpace(body.ConnectionID) panelConfig := strings.TrimSpace(body.Config) x, y, w2, h2 := 0, 0, 6, 4 if body.LayoutX != nil { x = *body.LayoutX } if body.LayoutY != nil { y = *body.LayoutY } if body.LayoutW != nil { w2 = *body.LayoutW } if body.LayoutH != nil { h2 = *body.LayoutH } id, err := h.DB.CreatePanel(dashboardID, name, panelType, query, connectionID, panelConfig, x, y, w2, h2) if err != nil { slog.Error("Failed to create panel", "error", err, "dashboard", dashboardID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create panel"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "panel.created", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) panel, err := h.DB.GetPanelByID(id) if err != nil || panel == nil { writeJSON(w, http.StatusCreated, map[string]string{"id": id}) return } writeJSON(w, http.StatusCreated, map[string]interface{}{"panel": panel}) } // UpdatePanel partially updates a panel. func (h *DashboardsHandler) UpdatePanel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dashboardID := chi.URLParam(r, "id") panelID := chi.URLParam(r, "panelId") if dashboardID == "" || panelID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Dashboard ID and panel ID are required"}) return } existing, err := h.DB.GetPanelByID(panelID) if err != nil { slog.Error("Failed to get panel for update", "error", err, "panel", panelID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get panel"}) return } if existing == nil || existing.DashboardID != dashboardID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Panel not found"}) return } var body struct { Name *string `json:"name"` PanelType *string `json:"panel_type"` Query *string `json:"query"` ConnectionID *string `json:"connection_id"` Config *string `json:"config"` LayoutX *int `json:"layout_x"` LayoutY *int `json:"layout_y"` LayoutW *int `json:"layout_w"` LayoutH *int `json:"layout_h"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } name := existing.Name panelType := existing.PanelType query := existing.Query connectionID := "" if existing.ConnectionID != nil { connectionID = *existing.ConnectionID } panelConfig := existing.Config x, y, pw, ph := existing.LayoutX, existing.LayoutY, existing.LayoutW, existing.LayoutH changed := false if body.Name != nil { name = strings.TrimSpace(*body.Name) changed = true } if body.PanelType != nil { panelType = strings.TrimSpace(*body.PanelType) changed = true } if body.Query != nil { query = strings.TrimSpace(*body.Query) changed = true } if body.ConnectionID != nil { connectionID = strings.TrimSpace(*body.ConnectionID) changed = true } if body.Config != nil { panelConfig = *body.Config changed = true } if body.LayoutX != nil { x = *body.LayoutX changed = true } if body.LayoutY != nil { y = *body.LayoutY changed = true } if body.LayoutW != nil { pw = *body.LayoutW changed = true } if body.LayoutH != nil { ph = *body.LayoutH changed = true } if !changed { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "No fields to update"}) return } if err := h.DB.UpdatePanel(panelID, name, panelType, query, connectionID, panelConfig, x, y, pw, ph); err != nil { slog.Error("Failed to update panel", "error", err, "panel", panelID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update panel"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "panel.updated", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) panel, err := h.DB.GetPanelByID(panelID) if err != nil || panel == nil { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"panel": panel}) } // DeletePanel deletes a panel from a dashboard. func (h *DashboardsHandler) DeletePanel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dashboardID := chi.URLParam(r, "id") panelID := chi.URLParam(r, "panelId") if dashboardID == "" || panelID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Dashboard ID and panel ID are required"}) return } existing, err := h.DB.GetPanelByID(panelID) if err != nil { slog.Error("Failed to get panel for delete", "error", err, "panel", panelID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get panel"}) return } if existing == nil || existing.DashboardID != dashboardID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Panel not found"}) return } if err := h.DB.DeletePanel(panelID); err != nil { slog.Error("Failed to delete panel", "error", err, "panel", panelID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete panel"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "panel.deleted", Username: strPtr(session.ClickhouseUser), Details: strPtr(existing.Name), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // ---------- Panel Query Execution ---------- // ExecutePanelQuery executes a SQL query through the tunnel for a panel. func (h *DashboardsHandler) ExecutePanelQuery(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Query string `json:"query"` Timeout *int `json:"timeout"` TimeRange *queryproc.TimeRange `json:"time_range"` TimeField string `json:"time_field"` TimeFieldUnit string `json:"time_field_unit"` MaxDataPoints *int `json:"max_data_points"` Table string `json:"table"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } query := strings.TrimSpace(body.Query) if query == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query is required"}) return } maxDataPoints := 1000 if body.MaxDataPoints != nil && *body.MaxDataPoints > 0 { maxDataPoints = *body.MaxDataPoints } processed := queryproc.ProcessQueryVariables(queryproc.ProcessorOptions{ Query: query, TimeRange: body.TimeRange, TimeField: strings.TrimSpace(body.TimeField), TimeFieldUnit: strings.TrimSpace(body.TimeFieldUnit), MaxDataPoints: maxDataPoints, Table: strings.TrimSpace(body.Table), }) if len(processed.Errors) > 0 { writeJSON(w, http.StatusBadRequest, map[string]interface{}{ "success": false, "error": strings.Join(processed.Errors, "; "), }) return } query = strings.TrimSpace(processed.Query) if query == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Processed query is empty"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } timeout := 30 * time.Second if body.Timeout != nil && *body.Timeout > 0 { timeout = time.Duration(*body.Timeout) * time.Second if timeout > 5*time.Minute { timeout = 5 * time.Minute } } start := time.Now() result, err := h.Gateway.ExecuteQuery(session.ConnectionID, query, session.ClickhouseUser, password, timeout) elapsed := time.Since(start) if err != nil { slog.Warn("Panel query failed", "error", err, "user", session.ClickhouseUser) writeJSON(w, http.StatusBadGateway, map[string]interface{}{ "success": false, "error": err.Error(), "elapsed_ms": elapsed.Milliseconds(), }) return } rows := countRows(result.Data) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "data": result.Data, "meta": result.Meta, "statistics": result.Stats, "rows": rows, "elapsed_ms": elapsed.Milliseconds(), "query": query, "variables": processed.InterpolatedVars, }) } ================================================ FILE: internal/server/handlers/governance.go ================================================ package handlers import ( "context" "database/sql" "encoding/json" "fmt" "log/slog" "net/http" "strconv" "strings" "time" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/governance" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" "github.com/go-chi/chi/v5" ) // GovernanceHandler handles all governance-related HTTP endpoints. type GovernanceHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config Store *governance.Store Syncer *governance.Syncer } // Routes returns a chi.Router with all governance routes mounted. func (h *GovernanceHandler) Routes() chi.Router { r := chi.NewRouter() // Overview & Sync r.Get("/overview", h.GetOverview) r.Post("/sync", h.TriggerSync) r.Post("/sync/{type}", h.TriggerSingleSync) r.Get("/sync/status", h.GetSyncStatus) // Metadata r.Get("/databases", h.ListDatabases) r.Get("/tables", h.ListTables) r.Get("/tables/{db}/{table}", h.GetTableDetail) r.Get("/tables/{db}/{table}/notes", h.ListTableNotes) r.Get("/tables/{db}/{table}/columns/{column}/notes", h.ListColumnNotes) r.With(middleware.RequireAdmin(h.DB)).Post("/tables/{db}/{table}/notes", h.CreateTableNote) r.With(middleware.RequireAdmin(h.DB)).Post("/tables/{db}/{table}/columns/{column}/notes", h.CreateColumnNote) r.With(middleware.RequireAdmin(h.DB)).Delete("/notes/{id}", h.DeleteObjectNote) r.With(middleware.RequireAdmin(h.DB)).Put("/tables/{db}/{table}/comment", h.UpdateTableComment) r.With(middleware.RequireAdmin(h.DB)).Put("/tables/{db}/{table}/columns/{column}/comment", h.UpdateColumnComment) r.Get("/schema-changes", h.ListSchemaChanges) // Query Log r.Get("/query-log", h.ListQueryLog) r.Get("/query-log/top", h.TopQueries) r.Get("/query-log/{query_id}", h.GetQueryByQueryID) // Lineage r.Get("/lineage", h.GetLineage) r.Get("/lineage/graph", h.GetLineageGraph) // View dependency graph (structural lineage from MV/View definitions) r.Get("/view-graph", h.GetViewGraph) // Tags r.Get("/tags", h.ListTags) r.Post("/tags", h.CreateTag) r.Delete("/tags/{id}", h.DeleteTag) // Access r.Route("/access", func(ar chi.Router) { ar.Get("/users", h.ListChUsers) ar.With(middleware.RequireAdmin(h.DB)).Post("/users", h.CreateChUser) ar.With(middleware.RequireAdmin(h.DB)).Delete("/users/{name}", h.DeleteChUser) ar.Get("/roles", h.ListChRoles) ar.Get("/matrix", h.GetAccessMatrix) ar.Get("/over-permissions", h.GetOverPermissions) }) // Policies r.Route("/policies", func(pr chi.Router) { pr.With(middleware.RequireAdmin(h.DB)).Get("/", h.ListPolicies) pr.With(middleware.RequireAdmin(h.DB)).Post("/", h.CreatePolicy) pr.With(middleware.RequireAdmin(h.DB)).Get("/{id}", h.GetPolicy) pr.With(middleware.RequireAdmin(h.DB)).Put("/{id}", h.UpdatePolicy) pr.With(middleware.RequireAdmin(h.DB)).Delete("/{id}", h.DeletePolicy) }) // Violations r.Get("/violations", h.ListViolations) r.With(middleware.RequireAdmin(h.DB)).Post("/violations/{id}/incident", h.CreateIncidentFromViolation) // Incidents r.Get("/incidents", h.ListIncidents) r.Get("/incidents/{id}", h.GetIncident) r.With(middleware.RequireAdmin(h.DB)).Post("/incidents", h.CreateIncident) r.With(middleware.RequireAdmin(h.DB)).Put("/incidents/{id}", h.UpdateIncident) r.Get("/incidents/{id}/comments", h.ListIncidentComments) r.With(middleware.RequireAdmin(h.DB)).Post("/incidents/{id}/comments", h.CreateIncidentComment) // Audit logs r.Get("/audit-logs", h.GetAuditLogs) // ClickHouse query log r.Get("/clickhouse-query-log", h.GetClickHouseQueryLog) // Alerts management r.Route("/alerts", func(ar chi.Router) { ar.Get("/channels", h.ListAlertChannels) ar.Post("/channels", h.CreateAlertChannel) ar.Put("/channels/{id}", h.UpdateAlertChannel) ar.Delete("/channels/{id}", h.DeleteAlertChannel) ar.Post("/channels/{id}/test", h.TestAlertChannel) ar.Get("/rules", h.ListAlertRules) ar.Post("/rules", h.CreateAlertRule) ar.Put("/rules/{id}", h.UpdateAlertRule) ar.Delete("/rules/{id}", h.DeleteAlertRule) ar.Get("/events", h.ListAlertEvents) }) return r } // ── Helpers ────────────────────────────────────────────────────────────────── func (h *GovernanceHandler) getCredentials(r *http.Request) (*governance.CHCredentials, error) { session := middleware.GetSession(r) if session == nil { return nil, fmt.Errorf("not authenticated") } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { return nil, fmt.Errorf("failed to decrypt credentials: %w", err) } return &governance.CHCredentials{ ConnectionID: session.ConnectionID, User: session.ClickhouseUser, Password: password, }, nil } func (h *GovernanceHandler) executeClickHouseSQL(creds *governance.CHCredentials, sql string) error { _, err := h.Gateway.ExecuteQuery(creds.ConnectionID, sql, creds.User, creds.Password, 30*time.Second) if err != nil { return fmt.Errorf("execute clickhouse query: %w", err) } return nil } func (h *GovernanceHandler) triggerSyncAsync(creds governance.CHCredentials, syncType governance.SyncType) { if !h.DB.GovernanceSyncEnabled() { return } go func() { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() if err := h.Syncer.SyncSingle(ctx, creds, syncType); err != nil { slog.Warn("Governance async sync failed", "connection", creds.ConnectionID, "type", syncType, "error", err) } }() } func (h *GovernanceHandler) connectionID(r *http.Request) string { session := middleware.GetSession(r) if session == nil { return "" } return session.ConnectionID } func queryInt(r *http.Request, key string, defaultVal int) int { v := r.URL.Query().Get(key) if v == "" { return defaultVal } n, err := strconv.Atoi(v) if err != nil { return defaultVal } return n } func queryIntBounded(r *http.Request, key string, defaultVal, minVal, maxVal int) int { n := queryInt(r, key, defaultVal) if n < minVal { return minVal } if n > maxVal { return maxVal } return n } // ── Overview & Sync ────────────────────────────────────────────────────────── func (h *GovernanceHandler) GetOverview(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } overview, err := h.Store.GetOverview(connID) if err != nil { slog.Error("Failed to get governance overview", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get overview"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"overview": overview}) } func (h *GovernanceHandler) TriggerSync(w http.ResponseWriter, r *http.Request) { if !h.DB.GovernanceSyncEnabled() { writeJSON(w, http.StatusConflict, map[string]string{ "error": "governance_sync_disabled", "hint": "Enable governance sync in Governance → Settings before triggering a sync.", }) return } creds, err := h.getCredentials(r) if err != nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": err.Error()}) return } result, err := h.Syncer.SyncConnection(r.Context(), *creds) if err != nil { writeJSON(w, http.StatusConflict, map[string]string{"error": err.Error()}) return } session := middleware.GetSession(r) h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.sync", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr("full sync triggered"), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "result": result}) } func (h *GovernanceHandler) TriggerSingleSync(w http.ResponseWriter, r *http.Request) { if !h.DB.GovernanceSyncEnabled() { writeJSON(w, http.StatusConflict, map[string]string{ "error": "governance_sync_disabled", "hint": "Enable governance sync in Governance → Settings before triggering a sync.", }) return } syncType := governance.SyncType(chi.URLParam(r, "type")) if syncType != governance.SyncMetadata && syncType != governance.SyncQueryLog && syncType != governance.SyncAccess { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid sync type. Use: metadata, query_log, access"}) return } creds, err := h.getCredentials(r) if err != nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": err.Error()}) return } if err := h.Syncer.SyncSingle(r.Context(), *creds, syncType); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) GetSyncStatus(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } states, err := h.Store.GetSyncStates(connID) if err != nil { slog.Error("Failed to get sync status", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get sync status"}) return } if states == nil { states = []governance.SyncState{} } writeJSON(w, http.StatusOK, map[string]interface{}{"sync_states": states}) } // ── Metadata ───────────────────────────────────────────────────────────────── func (h *GovernanceHandler) ListDatabases(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } databases, err := h.Store.GetDatabases(connID) if err != nil { slog.Error("Failed to list governance databases", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list databases"}) return } if databases == nil { databases = []governance.GovDatabase{} } writeJSON(w, http.StatusOK, map[string]interface{}{"databases": databases}) } func (h *GovernanceHandler) ListTables(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbFilter := r.URL.Query().Get("database") var tables []governance.GovTable var err error if dbFilter != "" { tables, err = h.Store.GetTablesByDatabase(connID, dbFilter) } else { tables, err = h.Store.GetTables(connID) } if err != nil { slog.Error("Failed to list governance tables", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list tables"}) return } if tables == nil { tables = []governance.GovTable{} } // Enrich with tags for i := range tables { tags, _ := h.Store.GetTagsForTable(connID, tables[i].DatabaseName, tables[i].TableName) tagNames := make([]string, 0) for _, t := range tags { if t.ObjectType == "table" { tagNames = append(tagNames, t.Tag) } } tables[i].Tags = tagNames } writeJSON(w, http.StatusOK, map[string]interface{}{"tables": tables, "total": len(tables)}) } func (h *GovernanceHandler) GetTableDetail(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := chi.URLParam(r, "db") tableName := chi.URLParam(r, "table") table, err := h.Store.GetTableByName(connID, dbName, tableName) if err != nil { slog.Error("Failed to get table detail", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get table"}) return } if table == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Table not found"}) return } // Get columns columns, _ := h.Store.GetColumns(connID, dbName, tableName) if columns == nil { columns = []governance.GovColumn{} } // Enrich columns with tags for i := range columns { colTags, _ := h.Store.GetTagsForColumn(connID, dbName, tableName, columns[i].ColumnName) tagNames := make([]string, 0) for _, t := range colTags { tagNames = append(tagNames, t.Tag) } columns[i].Tags = tagNames } // Get table tags tableTags, _ := h.Store.GetTagsForTable(connID, dbName, tableName) tagNames := make([]string, 0) for _, t := range tableTags { if t.ObjectType == "table" { tagNames = append(tagNames, t.Tag) } } table.Tags = tagNames // Get recent queries queries, _, _ := h.Store.GetQueryLog(connID, 20, 0, "", dbName+"."+tableName) // Get lineage upstream, downstream, _ := h.Store.GetLineageForTable(connID, dbName, tableName) writeJSON(w, http.StatusOK, map[string]interface{}{ "table": table, "columns": columns, "tags": tableTags, "queries": queries, "recent_queries": queries, "upstream": upstream, "downstream": downstream, }) } func (h *GovernanceHandler) UpdateTableComment(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } creds, err := h.getCredentials(r) if err != nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": err.Error()}) return } dbName := strings.TrimSpace(chi.URLParam(r, "db")) tableName := strings.TrimSpace(chi.URLParam(r, "table")) if dbName == "" || tableName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Database and table are required"}) return } var body struct { Comment string `json:"comment"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } sql := fmt.Sprintf( "ALTER TABLE %s.%s MODIFY COMMENT '%s'", escapeIdentifier(dbName), escapeIdentifier(tableName), escapeLiteral(body.Comment), ) if err := h.executeClickHouseSQL(creds, sql); err != nil { slog.Error("Failed to update table comment", "connection", session.ConnectionID, "db", dbName, "table", tableName, "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.table.comment.updated", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("%s.%s", dbName, tableName)), }) h.triggerSyncAsync(*creds, governance.SyncMetadata) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) UpdateColumnComment(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } creds, err := h.getCredentials(r) if err != nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": err.Error()}) return } dbName := strings.TrimSpace(chi.URLParam(r, "db")) tableName := strings.TrimSpace(chi.URLParam(r, "table")) columnName := strings.TrimSpace(chi.URLParam(r, "column")) if dbName == "" || tableName == "" || columnName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Database, table, and column are required"}) return } var body struct { Comment string `json:"comment"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } sql := fmt.Sprintf( "ALTER TABLE %s.%s COMMENT COLUMN %s '%s'", escapeIdentifier(dbName), escapeIdentifier(tableName), escapeIdentifier(columnName), escapeLiteral(body.Comment), ) if err := h.executeClickHouseSQL(creds, sql); err != nil { slog.Error("Failed to update column comment", "connection", session.ConnectionID, "db", dbName, "table", tableName, "column", columnName, "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.column.comment.updated", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("%s.%s.%s", dbName, tableName, columnName)), }) h.triggerSyncAsync(*creds, governance.SyncMetadata) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) ListTableNotes(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := strings.TrimSpace(chi.URLParam(r, "db")) tableName := strings.TrimSpace(chi.URLParam(r, "table")) if dbName == "" || tableName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Database and table are required"}) return } notes, err := h.Store.ListObjectComments(connID, "table", dbName, tableName, "", 200) if err != nil { slog.Error("Failed to list table notes", "connection", connID, "db", dbName, "table", tableName, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list table notes"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"notes": notes}) } func (h *GovernanceHandler) ListColumnNotes(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := strings.TrimSpace(chi.URLParam(r, "db")) tableName := strings.TrimSpace(chi.URLParam(r, "table")) columnName := strings.TrimSpace(chi.URLParam(r, "column")) if dbName == "" || tableName == "" || columnName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Database, table and column are required"}) return } notes, err := h.Store.ListObjectComments(connID, "column", dbName, tableName, columnName, 200) if err != nil { slog.Error("Failed to list column notes", "connection", connID, "db", dbName, "table", tableName, "column", columnName, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list column notes"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"notes": notes}) } func (h *GovernanceHandler) CreateTableNote(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := strings.TrimSpace(chi.URLParam(r, "db")) tableName := strings.TrimSpace(chi.URLParam(r, "table")) if dbName == "" || tableName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Database and table are required"}) return } var body struct { CommentText string `json:"comment_text"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } commentText := strings.TrimSpace(body.CommentText) if commentText == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "comment_text is required"}) return } if len(commentText) > 4000 { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "comment_text must be <= 4000 characters"}) return } id, err := h.Store.CreateObjectComment(session.ConnectionID, "table", dbName, tableName, "", commentText, session.ClickhouseUser) if err != nil { slog.Error("Failed to create table note", "connection", session.ConnectionID, "db", dbName, "table", tableName, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create table note"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.table.note.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("%s.%s", dbName, tableName)), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": id, "success": true}) } func (h *GovernanceHandler) CreateColumnNote(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := strings.TrimSpace(chi.URLParam(r, "db")) tableName := strings.TrimSpace(chi.URLParam(r, "table")) columnName := strings.TrimSpace(chi.URLParam(r, "column")) if dbName == "" || tableName == "" || columnName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Database, table and column are required"}) return } var body struct { CommentText string `json:"comment_text"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } commentText := strings.TrimSpace(body.CommentText) if commentText == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "comment_text is required"}) return } if len(commentText) > 4000 { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "comment_text must be <= 4000 characters"}) return } id, err := h.Store.CreateObjectComment(session.ConnectionID, "column", dbName, tableName, columnName, commentText, session.ClickhouseUser) if err != nil { slog.Error("Failed to create column note", "connection", session.ConnectionID, "db", dbName, "table", tableName, "column", columnName, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create column note"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.column.note.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("%s.%s.%s", dbName, tableName, columnName)), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": id, "success": true}) } func (h *GovernanceHandler) DeleteObjectNote(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := strings.TrimSpace(chi.URLParam(r, "id")) if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "id is required"}) return } if err := h.Store.DeleteObjectComment(session.ConnectionID, id); err != nil { if err == sql.ErrNoRows { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Note not found"}) return } slog.Error("Failed to delete object note", "connection", session.ConnectionID, "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete note"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.object.note.deleted", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(id), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) ListSchemaChanges(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } limit := queryIntBounded(r, "limit", 50, 1, 500) changes, err := h.Store.GetSchemaChanges(connID, limit) if err != nil { slog.Error("Failed to list schema changes", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list schema changes"}) return } if changes == nil { changes = []governance.SchemaChange{} } writeJSON(w, http.StatusOK, map[string]interface{}{"changes": changes}) } // ── Query Log ──────────────────────────────────────────────────────────────── func (h *GovernanceHandler) ListQueryLog(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } limit := queryIntBounded(r, "limit", 100, 1, 5000) offset := queryIntBounded(r, "offset", 0, 0, 1000000) user := r.URL.Query().Get("user") table := r.URL.Query().Get("table") entries, total, err := h.Store.GetQueryLog(connID, limit, offset, user, table) if err != nil { slog.Error("Failed to list query log", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list query log"}) return } if entries == nil { entries = []governance.QueryLogEntry{} } writeJSON(w, http.StatusOK, map[string]interface{}{"entries": entries, "total": total}) } func (h *GovernanceHandler) TopQueries(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } limit := queryIntBounded(r, "limit", 20, 1, 200) top, err := h.Store.GetTopQueries(connID, limit) if err != nil { slog.Error("Failed to get top queries", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get top queries"}) return } if top == nil { top = []map[string]interface{}{} } writeJSON(w, http.StatusOK, map[string]interface{}{"queries": top, "top_queries": top}) } // ── Lineage ────────────────────────────────────────────────────────────────── func (h *GovernanceHandler) GetLineage(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := r.URL.Query().Get("database") tableName := r.URL.Query().Get("table") if dbName == "" || tableName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "database and table query params required"}) return } upstream, downstream, err := h.Store.GetLineageForTable(connID, dbName, tableName) if err != nil { slog.Error("Failed to get lineage", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get lineage"}) return } if upstream == nil { upstream = []governance.LineageEdge{} } if downstream == nil { downstream = []governance.LineageEdge{} } // Build graph representation nodeMap := make(map[string]governance.LineageNode) currentKey := dbName + "." + tableName nodeMap[currentKey] = governance.LineageNode{ ID: currentKey, Database: dbName, Table: tableName, Type: "current", } for _, e := range upstream { key := e.SourceDatabase + "." + e.SourceTable if _, ok := nodeMap[key]; !ok { nodeMap[key] = governance.LineageNode{ ID: key, Database: e.SourceDatabase, Table: e.SourceTable, Type: "source", } } } for _, e := range downstream { key := e.TargetDatabase + "." + e.TargetTable if _, ok := nodeMap[key]; !ok { nodeMap[key] = governance.LineageNode{ ID: key, Database: e.TargetDatabase, Table: e.TargetTable, Type: "target", } } } nodes := make([]governance.LineageNode, 0, len(nodeMap)) for _, n := range nodeMap { nodes = append(nodes, n) } allEdges := append(upstream, downstream...) // Enrich: include_columns=true attaches column metadata to nodes and column edges to edges if r.URL.Query().Get("include_columns") == "true" { enrichLineageNodes(h.Store, connID, nodes) enrichLineageEdges(h.Store, allEdges) } writeJSON(w, http.StatusOK, map[string]interface{}{ "graph": governance.LineageGraph{Nodes: nodes, Edges: allEdges}, }) } func (h *GovernanceHandler) GetLineageGraph(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } edges, err := h.Store.GetFullLineageGraph(connID) if err != nil { slog.Error("Failed to get lineage graph", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get lineage graph"}) return } if edges == nil { edges = []governance.LineageEdge{} } // Build nodes from edges nodeMap := make(map[string]governance.LineageNode) for _, e := range edges { srcKey := e.SourceDatabase + "." + e.SourceTable if _, ok := nodeMap[srcKey]; !ok { nodeMap[srcKey] = governance.LineageNode{ ID: srcKey, Database: e.SourceDatabase, Table: e.SourceTable, Type: "source", } } tgtKey := e.TargetDatabase + "." + e.TargetTable if _, ok := nodeMap[tgtKey]; !ok { nodeMap[tgtKey] = governance.LineageNode{ ID: tgtKey, Database: e.TargetDatabase, Table: e.TargetTable, Type: "target", } } } nodes := make([]governance.LineageNode, 0, len(nodeMap)) for _, n := range nodeMap { nodes = append(nodes, n) } // Enrich: include_columns=true attaches column metadata to nodes and column edges to edges if r.URL.Query().Get("include_columns") == "true" { enrichLineageNodes(h.Store, connID, nodes) enrichLineageEdges(h.Store, edges) } writeJSON(w, http.StatusOK, map[string]interface{}{ "graph": governance.LineageGraph{Nodes: nodes, Edges: edges}, }) } // enrichLineageNodes attaches column metadata from gov_columns to each node. func enrichLineageNodes(store *governance.Store, connID string, nodes []governance.LineageNode) { for i := range nodes { cols, err := store.GetColumns(connID, nodes[i].Database, nodes[i].Table) if err != nil { slog.Warn("Failed to get columns for lineage node", "node", nodes[i].ID, "error", err) continue } nodes[i].Columns = cols } } // enrichLineageEdges attaches column-level lineage edges to each table-level edge. func enrichLineageEdges(store *governance.Store, edges []governance.LineageEdge) { edgeIDs := make([]string, 0, len(edges)) for _, e := range edges { edgeIDs = append(edgeIDs, e.ID) } colEdgeMap, err := store.GetColumnEdgesForEdgeIDs(edgeIDs) if err != nil { slog.Warn("Failed to get column lineage edges", "error", err) return } for i := range edges { if colEdges, ok := colEdgeMap[edges[i].ID]; ok { edges[i].ColumnEdges = colEdges } } } // GetQueryByQueryID returns a single query log entry by ClickHouse query_id. func (h *GovernanceHandler) GetQueryByQueryID(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } queryID := chi.URLParam(r, "query_id") if queryID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "query_id is required"}) return } entry, err := h.Store.GetQueryByQueryID(connID, queryID) if err != nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Query not found"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"entry": entry}) } // ── Tags ───────────────────────────────────────────────────────────────────── func (h *GovernanceHandler) ListTags(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } dbName := r.URL.Query().Get("database") tableName := r.URL.Query().Get("table") var tags []governance.TagEntry var err error if dbName != "" && tableName != "" { tags, err = h.Store.GetTagsForTable(connID, dbName, tableName) } else { tags, err = h.Store.GetTags(connID) } if err != nil { slog.Error("Failed to list tags", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list tags"}) return } if tags == nil { tags = []governance.TagEntry{} } writeJSON(w, http.StatusOK, map[string]interface{}{"tags": tags}) } func (h *GovernanceHandler) CreateTag(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { ObjectType string `json:"object_type"` DatabaseName string `json:"database_name"` TableName string `json:"table_name"` ColumnName string `json:"column_name"` Tag string `json:"tag"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } tag := governance.SensitivityTag(strings.ToUpper(body.Tag)) if !governance.ValidTags[tag] { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid tag. Valid: PII, FINANCIAL, INTERNAL, PUBLIC, CRITICAL"}) return } if body.ObjectType != "table" && body.ObjectType != "column" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "object_type must be 'table' or 'column'"}) return } if body.DatabaseName == "" || body.TableName == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "database_name and table_name are required"}) return } id, err := h.Store.CreateTag( session.ConnectionID, body.ObjectType, body.DatabaseName, body.TableName, body.ColumnName, tag, session.ClickhouseUser, ) if err != nil { slog.Error("Failed to create tag", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create tag"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.tag.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("%s on %s.%s", tag, body.DatabaseName, body.TableName)), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": id}) } func (h *GovernanceHandler) DeleteTag(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Tag ID required"}) return } if err := h.Store.DeleteTag(id); err != nil { slog.Error("Failed to delete tag", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete tag"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.tag.deleted", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("tag %s deleted", id)), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // ── Access ─────────────────────────────────────────────────────────────────── func (h *GovernanceHandler) ListChUsers(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } users, err := h.Store.GetChUsers(connID) if err != nil { slog.Error("Failed to list CH users", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list users"}) return } if users == nil { users = []governance.ChUser{} } writeJSON(w, http.StatusOK, map[string]interface{}{"users": users}) } func (h *GovernanceHandler) CreateChUser(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } creds, err := h.getCredentials(r) if err != nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": err.Error()}) return } var body struct { Name string `json:"name"` Password string `json:"password"` AuthType string `json:"auth_type"` DefaultRoles []string `json:"default_roles"` IfNotExists *bool `json:"if_not_exists"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } authType := strings.TrimSpace(strings.ToLower(body.AuthType)) if authType == "" { if strings.TrimSpace(body.Password) == "" { authType = "no_password" } else { authType = "plaintext_password" } } switch authType { case "no_password", "plaintext_password", "sha256_password", "double_sha1_password": default: writeJSON(w, http.StatusBadRequest, map[string]string{"error": "auth_type must be one of: no_password, plaintext_password, sha256_password, double_sha1_password"}) return } if authType != "no_password" && strings.TrimSpace(body.Password) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "password is required for selected auth_type"}) return } allRoles, roleNames, parseErr := parseDefaultRolesInput(body.DefaultRoles) if parseErr != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": parseErr.Error()}) return } var createSQL strings.Builder createSQL.WriteString("CREATE USER ") if body.IfNotExists == nil || *body.IfNotExists { createSQL.WriteString("IF NOT EXISTS ") } createSQL.WriteString(escapeIdentifier(name)) createSQL.WriteString(buildClickHouseCreateAuthClause(authType, body.Password)) if err := h.executeClickHouseSQL(creds, createSQL.String()); err != nil { slog.Error("Failed to create ClickHouse user", "connection", session.ConnectionID, "name", name, "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } escapedRoles := make([]string, 0, len(roleNames)) for _, role := range roleNames { escapedRoles = append(escapedRoles, escapeIdentifier(role)) } if len(escapedRoles) > 0 { grantSQL := "GRANT " + strings.Join(escapedRoles, ", ") + " TO " + escapeIdentifier(name) if err := h.executeClickHouseSQL(creds, grantSQL); err != nil { slog.Error("ClickHouse user created but role grant failed", "connection", session.ConnectionID, "name", name, "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("user created but failed to grant roles: %v", err)}) return } } if allRoles || len(escapedRoles) > 0 { defaultRoleClause := "ALL" if !allRoles { defaultRoleClause = strings.Join(escapedRoles, ", ") } alterSQL := "ALTER USER " + escapeIdentifier(name) + " DEFAULT ROLE " + defaultRoleClause if err := h.executeClickHouseSQL(creds, alterSQL); err != nil { slog.Error("ClickHouse user created but default role assignment failed", "connection", session.ConnectionID, "name", name, "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": fmt.Sprintf("user created but failed to set default role: %v", err)}) return } } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.access.user.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(name), }) h.triggerSyncAsync(*creds, governance.SyncAccess) writeJSON(w, http.StatusCreated, map[string]interface{}{"success": true, "name": name}) } func (h *GovernanceHandler) DeleteChUser(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } creds, err := h.getCredentials(r) if err != nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": err.Error()}) return } name := strings.TrimSpace(chi.URLParam(r, "name")) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } ifExists := true if raw := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("if_exists"))); raw == "false" || raw == "0" { ifExists = false } sql := "DROP USER " if ifExists { sql += "IF EXISTS " } sql += escapeIdentifier(name) if err := h.executeClickHouseSQL(creds, sql); err != nil { slog.Error("Failed to delete ClickHouse user", "connection", session.ConnectionID, "name", name, "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.access.user.deleted", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(name), }) h.triggerSyncAsync(*creds, governance.SyncAccess) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) ListChRoles(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } roles, err := h.Store.GetChRoles(connID) if err != nil { slog.Error("Failed to list CH roles", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list roles"}) return } if roles == nil { roles = []governance.ChRole{} } writeJSON(w, http.StatusOK, map[string]interface{}{"roles": roles}) } func (h *GovernanceHandler) GetAccessMatrix(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } user := r.URL.Query().Get("user") var matrix []governance.AccessMatrixEntry var err error if user != "" { matrix, err = h.Store.GetAccessMatrixForUser(connID, user) } else { matrix, err = h.Store.GetAccessMatrix(connID) } if err != nil { slog.Error("Failed to get access matrix", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get access matrix"}) return } if matrix == nil { matrix = []governance.AccessMatrixEntry{} } writeJSON(w, http.StatusOK, map[string]interface{}{"matrix": matrix}) } func (h *GovernanceHandler) GetOverPermissions(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } days := queryIntBounded(r, "days", 30, 1, 3650) perms, err := h.Store.GetOverPermissionsWithDays(connID, days) if err != nil { slog.Error("Failed to get over-permissions", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get over-permissions"}) return } if perms == nil { perms = []governance.OverPermission{} } writeJSON(w, http.StatusOK, map[string]interface{}{"over_permissions": perms}) } // ── Policies ───────────────────────────────────────────────────────────────── func (h *GovernanceHandler) ListPolicies(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } policies, err := h.Store.GetPolicies(connID) if err != nil { slog.Error("Failed to list policies", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list policies"}) return } if policies == nil { policies = []governance.Policy{} } writeJSON(w, http.StatusOK, map[string]interface{}{"policies": policies}) } func (h *GovernanceHandler) CreatePolicy(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` Description string `json:"description"` ObjectType string `json:"object_type"` ObjectDatabase string `json:"object_database"` ObjectTable string `json:"object_table"` ObjectColumn string `json:"object_column"` RequiredRole string `json:"required_role"` Severity string `json:"severity"` EnforcementMode string `json:"enforcement_mode"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } if strings.TrimSpace(body.Name) == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Policy name is required"}) return } if body.ObjectType != "database" && body.ObjectType != "table" && body.ObjectType != "column" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "object_type must be database, table, or column"}) return } if body.Severity == "" { body.Severity = "warn" } enforcementMode, err := normalizePolicyEnforcementMode(body.EnforcementMode) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } id, err := h.Store.CreatePolicy( session.ConnectionID, body.Name, body.Description, body.ObjectType, body.ObjectDatabase, body.ObjectTable, body.ObjectColumn, body.RequiredRole, body.Severity, enforcementMode, session.ClickhouseUser, ) if err != nil { slog.Error("Failed to create policy", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create policy"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.policy.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(body.Name), }) policy, _ := h.Store.GetPolicyByID(id) writeJSON(w, http.StatusCreated, map[string]interface{}{"policy": policy}) } func (h *GovernanceHandler) GetPolicy(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") policy, err := h.Store.GetPolicyByID(id) if err != nil { slog.Error("Failed to get policy", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get policy"}) return } if policy == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Policy not found"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"policy": policy}) } func (h *GovernanceHandler) UpdatePolicy(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") var body struct { Name string `json:"name"` Description string `json:"description"` RequiredRole string `json:"required_role"` Severity string `json:"severity"` EnforcementMode string `json:"enforcement_mode"` Enabled *bool `json:"enabled"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } enforcementMode, err := normalizePolicyEnforcementMode(body.EnforcementMode) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } enabled := true if body.Enabled != nil { enabled = *body.Enabled } if err := h.Store.UpdatePolicy(id, body.Name, body.Description, body.RequiredRole, body.Severity, enforcementMode, enabled); err != nil { slog.Error("Failed to update policy", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update policy"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.policy.updated", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(id), }) policy, _ := h.Store.GetPolicyByID(id) writeJSON(w, http.StatusOK, map[string]interface{}{"policy": policy}) } func (h *GovernanceHandler) DeletePolicy(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if err := h.Store.DeletePolicy(id); err != nil { slog.Error("Failed to delete policy", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete policy"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.policy.deleted", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(id), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // ── Violations ─────────────────────────────────────────────────────────────── func (h *GovernanceHandler) ListViolations(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } limit := queryIntBounded(r, "limit", 50, 1, 500) policyID := r.URL.Query().Get("policy_id") violations, err := h.Store.GetViolations(connID, limit, policyID) if err != nil { slog.Error("Failed to list violations", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list violations"}) return } if violations == nil { violations = []governance.PolicyViolation{} } writeJSON(w, http.StatusOK, map[string]interface{}{"violations": violations}) } func (h *GovernanceHandler) CreateIncidentFromViolation(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } violationID := strings.TrimSpace(chi.URLParam(r, "id")) if violationID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "violation id is required"}) return } violation, err := h.Store.GetViolationByID(violationID) if err != nil { slog.Error("Failed to load violation", "id", violationID, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load violation"}) return } if violation == nil || violation.ConnectionID != session.ConnectionID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Violation not found"}) return } incidentID, created, err := h.Store.UpsertIncidentFromViolation( session.ConnectionID, violation.ID, violation.PolicyName, violation.User, normalizeIncidentSeverity(violation.Severity), violation.ViolationDetail, ) if err != nil { slog.Error("Failed to upsert incident from violation", "violation", violation.ID, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create incident"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.incident.from_violation", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("violation=%s incident=%s created=%t", violation.ID, incidentID, created)), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"incident_id": incidentID, "created": created, "success": true}) } func (h *GovernanceHandler) ListIncidents(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } status := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("status"))) severity := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("severity"))) limit := queryIntBounded(r, "limit", 100, 1, 1000) incidents, err := h.Store.ListIncidents(connID, status, severity, limit) if err != nil { slog.Error("Failed to list incidents", "connection", connID, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list incidents"}) return } if incidents == nil { incidents = []governance.Incident{} } writeJSON(w, http.StatusOK, map[string]interface{}{"incidents": incidents}) } func (h *GovernanceHandler) GetIncident(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := strings.TrimSpace(chi.URLParam(r, "id")) if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "incident id is required"}) return } incident, err := h.Store.GetIncidentByID(id) if err != nil { slog.Error("Failed to load incident", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load incident"}) return } if incident == nil || incident.ConnectionID != connID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Incident not found"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"incident": incident}) } func (h *GovernanceHandler) CreateIncident(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { SourceType string `json:"source_type"` SourceRef string `json:"source_ref"` Title string `json:"title"` Severity string `json:"severity"` Status string `json:"status"` Assignee string `json:"assignee"` Details string `json:"details"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } title := strings.TrimSpace(body.Title) if title == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "title is required"}) return } severity := normalizeIncidentSeverity(body.Severity) status := normalizeIncidentStatus(body.Status) sourceType := strings.TrimSpace(strings.ToLower(body.SourceType)) if sourceType == "" { sourceType = "manual" } if sourceType != "manual" && sourceType != "violation" && sourceType != "over_permission" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "source_type must be manual, violation, or over_permission"}) return } id, err := h.Store.CreateIncident( session.ConnectionID, sourceType, body.SourceRef, "", title, severity, status, body.Assignee, body.Details, session.ClickhouseUser, ) if err != nil { slog.Error("Failed to create incident", "connection", session.ConnectionID, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create incident"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.incident.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(id), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": id, "success": true}) } func (h *GovernanceHandler) UpdateIncident(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := strings.TrimSpace(chi.URLParam(r, "id")) if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "incident id is required"}) return } existing, err := h.Store.GetIncidentByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load incident"}) return } if existing == nil || existing.ConnectionID != session.ConnectionID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Incident not found"}) return } var body struct { Title *string `json:"title"` Severity *string `json:"severity"` Status *string `json:"status"` Assignee *string `json:"assignee"` Details *string `json:"details"` ResolutionNote *string `json:"resolution_note"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } title := existing.Title if body.Title != nil { title = strings.TrimSpace(*body.Title) } if title == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "title is required"}) return } severity := existing.Severity if body.Severity != nil { severity = normalizeIncidentSeverity(*body.Severity) } status := existing.Status if body.Status != nil { status = normalizeIncidentStatus(*body.Status) } assignee := derefString(existing.Assignee) if body.Assignee != nil { assignee = strings.TrimSpace(*body.Assignee) } details := derefString(existing.Details) if body.Details != nil { details = strings.TrimSpace(*body.Details) } resolution := derefString(existing.ResolutionNote) if body.ResolutionNote != nil { resolution = strings.TrimSpace(*body.ResolutionNote) } if err := h.Store.UpdateIncident(id, title, severity, status, assignee, details, resolution); err != nil { slog.Error("Failed to update incident", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update incident"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.incident.updated", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(id), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) ListIncidentComments(w http.ResponseWriter, r *http.Request) { connID := h.connectionID(r) if connID == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := strings.TrimSpace(chi.URLParam(r, "id")) if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "incident id is required"}) return } incident, err := h.Store.GetIncidentByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load incident"}) return } if incident == nil || incident.ConnectionID != connID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Incident not found"}) return } comments, err := h.Store.ListIncidentComments(id, 500) if err != nil { slog.Error("Failed to list incident comments", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list incident comments"}) return } if comments == nil { comments = []governance.IncidentComment{} } writeJSON(w, http.StatusOK, map[string]interface{}{"comments": comments}) } func (h *GovernanceHandler) CreateIncidentComment(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := strings.TrimSpace(chi.URLParam(r, "id")) if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "incident id is required"}) return } incident, err := h.Store.GetIncidentByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load incident"}) return } if incident == nil || incident.ConnectionID != session.ConnectionID { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Incident not found"}) return } var body struct { CommentText string `json:"comment_text"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } comment := strings.TrimSpace(body.CommentText) if comment == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "comment_text is required"}) return } if len(comment) > 4000 { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "comment_text must be <= 4000 characters"}) return } commentID, err := h.Store.CreateIncidentComment(id, comment, session.ClickhouseUser) if err != nil { slog.Error("Failed to create incident comment", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create incident comment"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "governance.incident.comment.created", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(id), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": commentID, "success": true}) } func normalizeIncidentSeverity(v string) string { switch strings.ToLower(strings.TrimSpace(v)) { case "info", "warn", "error", "critical": return strings.ToLower(strings.TrimSpace(v)) default: return "warn" } } func normalizePolicyEnforcementMode(v string) (string, error) { mode := strings.ToLower(strings.TrimSpace(v)) switch mode { case "", "warn": return "warn", nil case "block": return "block", nil default: return "", fmt.Errorf("enforcement_mode must be warn or block") } } func normalizeIncidentStatus(v string) string { switch strings.ToLower(strings.TrimSpace(v)) { case "open", "triaged", "in_progress", "resolved", "dismissed": return strings.ToLower(strings.TrimSpace(v)) default: return "open" } } func derefString(v *string) string { if v == nil { return "" } return *v } ================================================ FILE: internal/server/handlers/governance_alerts.go ================================================ package handlers import ( "context" "encoding/json" "fmt" "log/slog" "net/http" "regexp" "strconv" "strings" "github.com/caioricciuti/ch-ui/internal/alerts" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/go-chi/chi/v5" ) var emailRegex = regexp.MustCompile(`^[^@\s]+@[^@\s]+\.[^@\s]+$`) type alertRuleRoutePayload struct { ChannelID string `json:"channel_id"` Recipients []string `json:"recipients"` IsActive *bool `json:"is_active"` DeliveryMode string `json:"delivery_mode"` DigestWindowMinutes *int `json:"digest_window_minutes"` EscalationChannelID *string `json:"escalation_channel_id"` EscalationRecipients []string `json:"escalation_recipients"` EscalationAfterFailures *int `json:"escalation_after_failures"` } type alertRuleResponse struct { database.AlertRule Routes []database.AlertRuleRouteView `json:"routes"` } func (h *GovernanceHandler) ListAlertChannels(w http.ResponseWriter, r *http.Request) { channels, err := h.DB.ListAlertChannels() if err != nil { slog.Error("Failed to list alert channels", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list alert channels"}) return } type responseItem struct { database.AlertChannel Config map[string]interface{} `json:"config"` HasSecret bool `json:"has_secret"` } out := make([]responseItem, 0, len(channels)) for _, channel := range channels { decrypted, err := crypto.Decrypt(channel.ConfigEncrypted, h.Config.AppSecretKey) if err != nil { slog.Warn("Failed to decrypt alert channel config", "channel", channel.ID, "error", err) continue } cfg := map[string]interface{}{} if err := json.Unmarshal([]byte(decrypted), &cfg); err != nil { slog.Warn("Failed to parse alert channel config", "channel", channel.ID, "error", err) continue } sanitized, hasSecret := sanitizeChannelConfig(channel.ChannelType, cfg) out = append(out, responseItem{ AlertChannel: channel, Config: sanitized, HasSecret: hasSecret, }) } writeJSON(w, http.StatusOK, map[string]interface{}{"channels": out}) } func (h *GovernanceHandler) CreateAlertChannel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` ChannelType string `json:"channel_type"` Config map[string]interface{} `json:"config"` IsActive *bool `json:"is_active"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) channelType := strings.ToLower(strings.TrimSpace(body.ChannelType)) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } if !isSupportedChannelType(channelType) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "channel_type must be smtp, resend, or brevo"}) return } if err := validateChannelConfig(channelType, body.Config, false); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } rawConfig, _ := json.Marshal(body.Config) encrypted, err := crypto.Encrypt(string(rawConfig), h.Config.AppSecretKey) if err != nil { slog.Error("Failed to encrypt alert channel config", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to store alert channel config"}) return } isActive := true if body.IsActive != nil { isActive = *body.IsActive } id, err := h.DB.CreateAlertChannel(name, channelType, encrypted, isActive, session.ClickhouseUser) if err != nil { slog.Error("Failed to create alert channel", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create alert channel"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "alerts.channel.created", Username: strPtr(session.ClickhouseUser), Details: strPtr(fmt.Sprintf("%s (%s)", name, channelType)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": id, "success": true}) } func (h *GovernanceHandler) UpdateAlertChannel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") channel, err := h.DB.GetAlertChannelByID(id) if err != nil { slog.Error("Failed to load alert channel", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load alert channel"}) return } if channel == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Alert channel not found"}) return } var body struct { Name *string `json:"name"` ChannelType *string `json:"channel_type"` Config map[string]interface{} `json:"config"` IsActive *bool `json:"is_active"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := channel.Name if body.Name != nil { name = strings.TrimSpace(*body.Name) } channelType := channel.ChannelType if body.ChannelType != nil { channelType = strings.ToLower(strings.TrimSpace(*body.ChannelType)) } if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } if !isSupportedChannelType(channelType) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "channel_type must be smtp, resend, or brevo"}) return } isActive := channel.IsActive if body.IsActive != nil { isActive = *body.IsActive } var encryptedConfig *string if body.Config != nil { if err := validateChannelConfig(channelType, body.Config, true); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } rawConfig, _ := json.Marshal(body.Config) enc, err := crypto.Encrypt(string(rawConfig), h.Config.AppSecretKey) if err != nil { slog.Error("Failed to encrypt alert channel config", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to store alert channel config"}) return } encryptedConfig = &enc } if err := h.DB.UpdateAlertChannel(id, name, channelType, encryptedConfig, isActive); err != nil { slog.Error("Failed to update alert channel", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update alert channel"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "alerts.channel.updated", Username: strPtr(session.ClickhouseUser), Details: strPtr(fmt.Sprintf("%s (%s)", name, channelType)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) DeleteAlertChannel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") channel, err := h.DB.GetAlertChannelByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load alert channel"}) return } if channel == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Alert channel not found"}) return } if err := h.DB.DeleteAlertChannel(id); err != nil { slog.Error("Failed to delete alert channel", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete alert channel"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "alerts.channel.deleted", Username: strPtr(session.ClickhouseUser), Details: strPtr(fmt.Sprintf("%s (%s)", channel.Name, channel.ChannelType)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) TestAlertChannel(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") channel, err := h.DB.GetAlertChannelByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load alert channel"}) return } if channel == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Alert channel not found"}) return } var body struct { Recipients []string `json:"recipients"` Subject string `json:"subject"` Message string `json:"message"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } recipients, err := validateRecipients(body.Recipients) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } decrypted, err := crypto.Decrypt(channel.ConfigEncrypted, h.Config.AppSecretKey) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt alert channel config"}) return } cfg := map[string]interface{}{} if err := json.Unmarshal([]byte(decrypted), &cfg); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to parse alert channel config"}) return } subject := strings.TrimSpace(body.Subject) if subject == "" { subject = "CH-UI Alert Channel Test" } message := strings.TrimSpace(body.Message) if message == "" { message = "This is a test notification from CH-UI." } msgID, err := alerts.SendDirect(context.Background(), channel.ChannelType, cfg, recipients, subject, message) if err != nil { writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"success": true, "provider_message_id": msgID}) } func (h *GovernanceHandler) ListAlertRules(w http.ResponseWriter, r *http.Request) { rules, err := h.DB.ListAlertRules() if err != nil { slog.Error("Failed to list alert rules", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list alert rules"}) return } out := make([]alertRuleResponse, 0, len(rules)) for _, rule := range rules { routes, err := h.DB.ListAlertRuleRoutes(rule.ID) if err != nil { slog.Warn("Failed to load alert rule routes", "rule", rule.ID, "error", err) routes = []database.AlertRuleRouteView{} } out = append(out, alertRuleResponse{ AlertRule: rule, Routes: routes, }) } writeJSON(w, http.StatusOK, map[string]interface{}{"rules": out}) } func (h *GovernanceHandler) CreateAlertRule(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` EventType string `json:"event_type"` SeverityMin string `json:"severity_min"` Enabled *bool `json:"enabled"` CooldownSeconds *int `json:"cooldown_seconds"` MaxAttempts *int `json:"max_attempts"` SubjectTemplate string `json:"subject_template"` BodyTemplate string `json:"body_template"` Routes []alertRuleRoutePayload `json:"routes"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) eventType := strings.ToLower(strings.TrimSpace(body.EventType)) severityMin := strings.ToLower(strings.TrimSpace(body.SeverityMin)) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } if !isSupportedEventType(eventType) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "event_type must be policy.violation, schedule.failed, schedule.slow, or *"}) return } if !isSupportedSeverity(severityMin) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "severity_min must be info, warn, error, or critical"}) return } enabled := true if body.Enabled != nil { enabled = *body.Enabled } cooldownSeconds := 300 if body.CooldownSeconds != nil { cooldownSeconds = *body.CooldownSeconds } maxAttempts := 5 if body.MaxAttempts != nil { maxAttempts = *body.MaxAttempts } routes, err := h.validateRuleRoutes(body.Routes) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } id, err := h.DB.CreateAlertRule(name, eventType, severityMin, enabled, cooldownSeconds, maxAttempts, body.SubjectTemplate, body.BodyTemplate, session.ClickhouseUser) if err != nil { slog.Error("Failed to create alert rule", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create alert rule"}) return } if err := h.DB.ReplaceAlertRuleRoutes(id, routes); err != nil { _ = h.DB.DeleteAlertRule(id) slog.Error("Failed to create alert rule routes", "rule", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create alert rule routes"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "alerts.rule.created", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{"id": id, "success": true}) } func (h *GovernanceHandler) UpdateAlertRule(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") existing, err := h.DB.GetAlertRuleByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load alert rule"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Alert rule not found"}) return } var body struct { Name *string `json:"name"` EventType *string `json:"event_type"` SeverityMin *string `json:"severity_min"` Enabled *bool `json:"enabled"` CooldownSeconds *int `json:"cooldown_seconds"` MaxAttempts *int `json:"max_attempts"` SubjectTemplate *string `json:"subject_template"` BodyTemplate *string `json:"body_template"` Routes *[]alertRuleRoutePayload `json:"routes"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := existing.Name if body.Name != nil { name = strings.TrimSpace(*body.Name) } eventType := existing.EventType if body.EventType != nil { eventType = strings.ToLower(strings.TrimSpace(*body.EventType)) } severityMin := existing.SeverityMin if body.SeverityMin != nil { severityMin = strings.ToLower(strings.TrimSpace(*body.SeverityMin)) } enabled := existing.Enabled if body.Enabled != nil { enabled = *body.Enabled } cooldownSeconds := existing.CooldownSeconds if body.CooldownSeconds != nil { cooldownSeconds = *body.CooldownSeconds } maxAttempts := existing.MaxAttempts if body.MaxAttempts != nil { maxAttempts = *body.MaxAttempts } subjectTemplate := coalesceStringPtr(body.SubjectTemplate, existing.SubjectTemplate) bodyTemplate := coalesceStringPtr(body.BodyTemplate, existing.BodyTemplate) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name is required"}) return } if !isSupportedEventType(eventType) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "event_type must be policy.violation, schedule.failed, schedule.slow, or *"}) return } if !isSupportedSeverity(severityMin) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "severity_min must be info, warn, error, or critical"}) return } if err := h.DB.UpdateAlertRule(id, name, eventType, severityMin, enabled, cooldownSeconds, maxAttempts, subjectTemplate, bodyTemplate); err != nil { slog.Error("Failed to update alert rule", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update alert rule"}) return } if body.Routes != nil { routes, err := h.validateRuleRoutes(*body.Routes) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } if err := h.DB.ReplaceAlertRuleRoutes(id, routes); err != nil { slog.Error("Failed to replace alert rule routes", "rule", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update alert rule routes"}) return } } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "alerts.rule.updated", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) DeleteAlertRule(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") existing, err := h.DB.GetAlertRuleByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load alert rule"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Alert rule not found"}) return } if err := h.DB.DeleteAlertRule(id); err != nil { slog.Error("Failed to delete alert rule", "id", id, "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete alert rule"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "alerts.rule.deleted", Username: strPtr(session.ClickhouseUser), Details: strPtr(existing.Name), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } func (h *GovernanceHandler) ListAlertEvents(w http.ResponseWriter, r *http.Request) { limit := 100 if raw := strings.TrimSpace(r.URL.Query().Get("limit")); raw != "" { if n, err := strconv.Atoi(raw); err == nil { limit = n } } eventType := strings.TrimSpace(r.URL.Query().Get("event_type")) status := strings.TrimSpace(r.URL.Query().Get("status")) events, err := h.DB.ListAlertEvents(limit, eventType, status) if err != nil { slog.Error("Failed to list alert events", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list alert events"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"events": events}) } func (h *GovernanceHandler) validateRuleRoutes(payload []alertRuleRoutePayload) ([]database.AlertRuleRoute, error) { routes := make([]database.AlertRuleRoute, 0, len(payload)) for _, item := range payload { channelID := strings.TrimSpace(item.ChannelID) if channelID == "" { return nil, fmt.Errorf("route channel_id is required") } channel, err := h.DB.GetAlertChannelByID(channelID) if err != nil { return nil, fmt.Errorf("failed to load channel %s", channelID) } if channel == nil { return nil, fmt.Errorf("channel %s not found", channelID) } recipients, err := validateRecipients(item.Recipients) if err != nil { return nil, fmt.Errorf("route channel %s: %w", channelID, err) } active := true if item.IsActive != nil { active = *item.IsActive } deliveryMode := strings.ToLower(strings.TrimSpace(item.DeliveryMode)) if deliveryMode == "" { deliveryMode = "immediate" } if deliveryMode != "immediate" && deliveryMode != "digest" { return nil, fmt.Errorf("route channel %s: delivery_mode must be immediate or digest", channelID) } digestWindow := 0 if item.DigestWindowMinutes != nil { digestWindow = *item.DigestWindowMinutes } if digestWindow < 0 || digestWindow > 1440 { return nil, fmt.Errorf("route channel %s: digest_window_minutes must be between 0 and 1440", channelID) } if deliveryMode == "digest" && digestWindow == 0 { digestWindow = 15 } var escalationChannelID *string if item.EscalationChannelID != nil && strings.TrimSpace(*item.EscalationChannelID) != "" { escID := strings.TrimSpace(*item.EscalationChannelID) escalationChannel, err := h.DB.GetAlertChannelByID(escID) if err != nil { return nil, fmt.Errorf("route channel %s: failed to load escalation channel %s", channelID, escID) } if escalationChannel == nil { return nil, fmt.Errorf("route channel %s: escalation channel %s not found", channelID, escID) } escalationChannelID = &escID } escalationRecipients := []string{} if len(item.EscalationRecipients) > 0 { escalationRecipients, err = validateRecipients(item.EscalationRecipients) if err != nil { return nil, fmt.Errorf("route channel %s escalation_recipients: %w", channelID, err) } } escalationAfterFailures := 0 if item.EscalationAfterFailures != nil { escalationAfterFailures = *item.EscalationAfterFailures } if escalationAfterFailures < 0 || escalationAfterFailures > 10 { return nil, fmt.Errorf("route channel %s: escalation_after_failures must be between 0 and 10", channelID) } routes = append(routes, database.AlertRuleRoute{ ChannelID: channelID, Recipients: recipients, IsActive: active, DeliveryMode: deliveryMode, DigestWindowMinutes: digestWindow, EscalationChannelID: escalationChannelID, EscalationRecipients: escalationRecipients, EscalationAfterFailures: escalationAfterFailures, }) } return routes, nil } func sanitizeChannelConfig(channelType string, cfg map[string]interface{}) (map[string]interface{}, bool) { out := make(map[string]interface{}, len(cfg)) for k, v := range cfg { out[k] = v } hasSecret := false switch strings.ToLower(strings.TrimSpace(channelType)) { case alerts.ChannelTypeSMTP: if _, ok := out["password"]; ok { hasSecret = strings.TrimSpace(fmt.Sprintf("%v", out["password"])) != "" out["password"] = "" } case alerts.ChannelTypeResend, alerts.ChannelTypeBrevo: if _, ok := out["api_key"]; ok { hasSecret = strings.TrimSpace(fmt.Sprintf("%v", out["api_key"])) != "" out["api_key"] = "" } } return out, hasSecret } func validateChannelConfig(channelType string, cfg map[string]interface{}, allowEmptySecret bool) error { if cfg == nil { return fmt.Errorf("config is required") } get := func(key string) string { raw := strings.TrimSpace(fmt.Sprintf("%v", cfg[key])) if raw == "" { return "" } return raw } switch strings.ToLower(strings.TrimSpace(channelType)) { case alerts.ChannelTypeSMTP: if get("host") == "" { return fmt.Errorf("smtp config requires host") } if get("from_email") == "" { return fmt.Errorf("smtp config requires from_email") } if !allowEmptySecret && get("username") != "" && get("password") == "" { return fmt.Errorf("smtp config requires password when username is set") } case alerts.ChannelTypeResend, alerts.ChannelTypeBrevo: if get("from_email") == "" { return fmt.Errorf("%s config requires from_email", channelType) } if !allowEmptySecret && get("api_key") == "" { return fmt.Errorf("%s config requires api_key", channelType) } default: return fmt.Errorf("unsupported channel type: %s", channelType) } return nil } func validateRecipients(values []string) ([]string, error) { if len(values) == 0 { return nil, fmt.Errorf("at least one recipient is required") } out := make([]string, 0, len(values)) for _, raw := range values { email := strings.TrimSpace(strings.ToLower(raw)) if email == "" { continue } if !emailRegex.MatchString(email) { return nil, fmt.Errorf("invalid recipient email: %s", raw) } out = append(out, email) } if len(out) == 0 { return nil, fmt.Errorf("at least one valid recipient is required") } return out, nil } func isSupportedChannelType(v string) bool { switch strings.ToLower(strings.TrimSpace(v)) { case alerts.ChannelTypeSMTP, alerts.ChannelTypeResend, alerts.ChannelTypeBrevo: return true default: return false } } func isSupportedEventType(v string) bool { switch strings.ToLower(strings.TrimSpace(v)) { case "*", "any", alerts.EventTypePolicyViolation, alerts.EventTypeScheduleFailed, alerts.EventTypeScheduleSlow: return true default: return false } } func isSupportedSeverity(v string) bool { switch strings.ToLower(strings.TrimSpace(v)) { case alerts.SeverityInfo, alerts.SeverityWarn, alerts.SeverityError, alerts.SeverityCritical: return true default: return false } } func coalesceStringPtr(v *string, fallback *string) string { if v != nil { return strings.TrimSpace(*v) } if fallback == nil { return "" } return strings.TrimSpace(*fallback) } ================================================ FILE: internal/server/handlers/governance_auditlog.go ================================================ package handlers import ( "encoding/json" "log/slog" "net/http" "strconv" "strings" "github.com/caioricciuti/ch-ui/internal/database" ) // ---------- GET /audit-logs ---------- func (h *GovernanceHandler) GetAuditLogs(w http.ResponseWriter, r *http.Request) { limit := 100 if l := r.URL.Query().Get("limit"); l != "" { if parsed, err := strconv.Atoi(l); err == nil && parsed > 0 { limit = parsed } } if limit > 1000 { limit = 1000 } timeRange := strings.TrimSpace(r.URL.Query().Get("timeRange")) action := strings.TrimSpace(r.URL.Query().Get("action")) username := strings.TrimSpace(r.URL.Query().Get("username")) search := strings.TrimSpace(r.URL.Query().Get("search")) logs, err := h.DB.GetAuditLogsFiltered(limit, timeRange, action, username, search) if err != nil { slog.Error("Failed to get audit logs", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to retrieve audit logs"}) return } if logs == nil { logs = []database.AuditLog{} } type auditLogResponse struct { database.AuditLog ParsedDetails interface{} `json:"parsed_details,omitempty"` } results := make([]auditLogResponse, 0, len(logs)) for _, log := range logs { entry := auditLogResponse{AuditLog: log} if log.Details != nil && *log.Details != "" { var parsed interface{} if err := json.Unmarshal([]byte(*log.Details), &parsed); err == nil { entry.ParsedDetails = parsed } } results = append(results, entry) } writeJSON(w, http.StatusOK, results) } ================================================ FILE: internal/server/handlers/governance_querylog.go ================================================ package handlers import ( "fmt" "log/slog" "net/http" "strconv" "strings" "time" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/server/middleware" ) // ---------- GET /clickhouse-query-log ---------- var timeRangeDurations = map[string]string{ "5m": "5 MINUTE", "15m": "15 MINUTE", "30m": "30 MINUTE", "1h": "1 HOUR", "6h": "6 HOUR", "12h": "12 HOUR", "24h": "24 HOUR", "3d": "3 DAY", "7d": "7 DAY", } func (h *GovernanceHandler) GetClickHouseQueryLog(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "Tunnel is offline"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } timeRange := r.URL.Query().Get("timeRange") limitStr := r.URL.Query().Get("limit") offsetStr := r.URL.Query().Get("offset") search := r.URL.Query().Get("search") queryKind := r.URL.Query().Get("queryKind") status := r.URL.Query().Get("status") limit := 100 if limitStr != "" { if parsed, err := strconv.Atoi(limitStr); err == nil && parsed > 0 { limit = parsed } } if limit > 1000 { limit = 1000 } offset := 0 if offsetStr != "" { if parsed, err := strconv.Atoi(offsetStr); err == nil && parsed >= 0 { offset = parsed } } var prewhereConditions []string var whereConditions []string if timeRange != "" { if duration, ok := timeRangeDurations[timeRange]; ok { prewhereConditions = append(prewhereConditions, fmt.Sprintf("event_time >= now() - INTERVAL %s", duration)) } } if search != "" { escaped := escapeString(search) whereConditions = append(whereConditions, fmt.Sprintf("(query ILIKE '%%%s%%' OR user ILIKE '%%%s%%')", escaped, escaped)) } if queryKind != "" { normalized := strings.ToLower(strings.TrimSpace(queryKind)) if normalized != "" && normalized != "all" { escaped := escapeString(normalized) whereConditions = append(whereConditions, fmt.Sprintf("lowerUTF8(query_kind) = '%s'", escaped)) } } if status != "" { switch status { case "success": whereConditions = append(whereConditions, "exception_code = 0") case "error": whereConditions = append(whereConditions, "exception_code != 0") } } sql := `SELECT type, event_time, query_start_time, query_duration_ms, read_rows, read_bytes, written_rows, written_bytes, result_rows, result_bytes, memory_usage, query, query_kind, user, exception_code, exception, is_initial_query, databases, tables FROM system.query_log` if len(prewhereConditions) > 0 { sql += "\nPREWHERE " + strings.Join(prewhereConditions, " AND ") } if len(whereConditions) > 0 { sql += "\nWHERE " + strings.Join(whereConditions, " AND ") } sql += "\nORDER BY event_time DESC" sql += fmt.Sprintf("\nLIMIT %d OFFSET %d", limit, offset) sql += "\nFORMAT JSON" result, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 60*time.Second, ) if err != nil { slog.Warn("Failed to query system.query_log", "error", err, "connection", session.ConnectionID) if shouldFallbackToQueryThreadLog(err) { fallbackSQL := strings.Replace(sql, "system.query_log", "system.query_thread_log", 1) result, err = h.Gateway.ExecuteQuery( session.ConnectionID, fallbackSQL, session.ClickhouseUser, password, 60*time.Second, ) if err != nil { slog.Warn("Fallback to query_thread_log also failed", "error", err) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } } else { writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } } writeJSON(w, http.StatusOK, map[string]interface{}{ "data": result.Data, "meta": result.Meta, }) } func shouldFallbackToQueryThreadLog(err error) bool { if err == nil { return false } msg := strings.ToLower(err.Error()) if strings.Contains(msg, "system.query_log") && strings.Contains(msg, "unknown_table") { return true } if strings.Contains(msg, "unknown table expression identifier") && strings.Contains(msg, "system.query_log") { return true } return false } ================================================ FILE: internal/server/handlers/health.go ================================================ package handlers import ( "encoding/json" "net/http" "time" "github.com/caioricciuti/ch-ui/internal/version" ) type HealthHandler struct{} func (h *HealthHandler) Health(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]string{ "status": "ok", "service": "ch-ui", "version": version.Version, "timestamp": time.Now().UTC().Format(time.RFC3339), }) } ================================================ FILE: internal/server/handlers/license.go ================================================ package handlers import ( "encoding/json" "net/http" "strings" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/license" ) // LicenseHandler handles license status and activation endpoints. type LicenseHandler struct { DB *database.DB Config *config.Config } // GetLicense returns the current license status. // GET /api/license func (h *LicenseHandler) GetLicense(w http.ResponseWriter, r *http.Request) { info := license.ValidateLicense(h.Config.LicenseJSON) writeJSON(w, http.StatusOK, info) } // ActivateLicense activates a new license by validating and storing the signed JSON. // POST /api/license/activate func (h *LicenseHandler) ActivateLicense(w http.ResponseWriter, r *http.Request) { var body struct { License string `json:"license"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } licenseJSON := strings.TrimSpace(body.License) if licenseJSON == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "License JSON is required"}) return } info := license.ValidateLicense(licenseJSON) if !info.Valid { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid or expired license"}) return } // Store in settings if err := h.DB.SetSetting("license_json", licenseJSON); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to save license"}) return } // Update runtime config h.Config.LicenseJSON = licenseJSON writeJSON(w, http.StatusOK, info) } // DeactivateLicense removes the current license (downgrade to community). // POST /api/license/deactivate func (h *LicenseHandler) DeactivateLicense(w http.ResponseWriter, r *http.Request) { if err := h.DB.DeleteSetting("license_json"); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to remove license"}) return } h.Config.LicenseJSON = "" writeJSON(w, http.StatusOK, license.CommunityLicense()) } ================================================ FILE: internal/server/handlers/models.go ================================================ package handlers import ( "encoding/json" "fmt" "net/http" "strconv" "time" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/models" "github.com/caioricciuti/ch-ui/internal/scheduler" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // ModelsHandler handles model CRUD and execution. type ModelsHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config Runner *models.Runner } // Routes returns a chi.Router with all model routes. func (h *ModelsHandler) Routes() chi.Router { r := chi.NewRouter() r.Get("/", h.ListModels) r.Post("/", h.CreateModel) r.Get("/dag", h.GetDAG) r.Get("/validate", h.ValidateAll) r.Post("/run", h.RunAll) r.Get("/runs", h.ListRuns) r.Get("/runs/{runId}", h.GetRun) r.Get("/pipelines", h.ListPipelines) r.Post("/pipelines/{anchorId}/run", h.RunPipeline) r.Get("/schedules", h.ListSchedules) r.Get("/schedule/{anchorId}", h.GetSchedule) r.Put("/schedule/{anchorId}", h.UpsertSchedule) r.Delete("/schedule/{anchorId}", h.DeleteSchedule) r.Route("/{id}", func(r chi.Router) { r.Get("/", h.GetModel) r.Put("/", h.UpdateModel) r.Delete("/", h.DeleteModel) r.Post("/run", h.RunSingle) }) return r } // ListModels returns all models for the current connection. func (h *ModelsHandler) ListModels(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } modelList, err := h.DB.GetModelsByConnection(session.ConnectionID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list models"}) return } if modelList == nil { modelList = []database.Model{} } writeJSON(w, http.StatusOK, map[string]interface{}{"models": modelList}) } // CreateModel creates a new model. func (h *ModelsHandler) CreateModel(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` Description string `json:"description"` TargetDatabase string `json:"target_database"` Materialization string `json:"materialization"` SQLBody string `json:"sql_body"` TableEngine string `json:"table_engine"` OrderBy string `json:"order_by"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } if err := models.ValidateModelName(body.Name); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } if body.TargetDatabase == "" { body.TargetDatabase = "default" } if body.Materialization == "" { body.Materialization = "view" } if body.Materialization != "view" && body.Materialization != "table" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "materialization must be 'view' or 'table'"}) return } if body.Materialization == "table" { if body.TableEngine == "" { body.TableEngine = "MergeTree" } if body.OrderBy == "" { body.OrderBy = "tuple()" } } id, err := h.DB.CreateModel( session.ConnectionID, body.Name, body.Description, body.TargetDatabase, body.Materialization, body.SQLBody, body.TableEngine, body.OrderBy, session.ClickhouseUser, ) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": fmt.Sprintf("Failed to create model: %v", err)}) return } model, _ := h.DB.GetModelByID(id) writeJSON(w, http.StatusCreated, map[string]interface{}{"model": model}) } // GetModel returns a single model. func (h *ModelsHandler) GetModel(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") model, err := h.DB.GetModelByID(id) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get model"}) return } if model == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Model not found"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"model": model}) } // UpdateModel updates an existing model. func (h *ModelsHandler) UpdateModel(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") existing, err := h.DB.GetModelByID(id) if err != nil || existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Model not found"}) return } var body struct { Name string `json:"name"` Description string `json:"description"` TargetDatabase string `json:"target_database"` Materialization string `json:"materialization"` SQLBody string `json:"sql_body"` TableEngine string `json:"table_engine"` OrderBy string `json:"order_by"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } if body.Name != "" { if err := models.ValidateModelName(body.Name); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } } else { body.Name = existing.Name } if body.TargetDatabase == "" { body.TargetDatabase = existing.TargetDatabase } if body.Materialization == "" { body.Materialization = existing.Materialization } if body.Materialization != "view" && body.Materialization != "table" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "materialization must be 'view' or 'table'"}) return } if body.TableEngine == "" { body.TableEngine = existing.TableEngine } if body.OrderBy == "" { body.OrderBy = existing.OrderBy } if body.SQLBody == "" { body.SQLBody = existing.SQLBody } if err := h.DB.UpdateModel(id, body.Name, body.Description, body.TargetDatabase, body.Materialization, body.SQLBody, body.TableEngine, body.OrderBy); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": fmt.Sprintf("Failed to update model: %v", err)}) return } model, _ := h.DB.GetModelByID(id) writeJSON(w, http.StatusOK, map[string]interface{}{"model": model}) } // DeleteModel removes a model. func (h *ModelsHandler) DeleteModel(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if err := h.DB.DeleteModel(id); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete model"}) return } writeJSON(w, http.StatusOK, map[string]string{"status": "deleted"}) } // GetDAG returns the dependency graph for XyFlow visualization. func (h *ModelsHandler) GetDAG(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } allModels, err := h.DB.GetModelsByConnection(session.ConnectionID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load models"}) return } if len(allModels) == 0 { writeJSON(w, http.StatusOK, map[string]interface{}{ "nodes": []interface{}{}, "edges": []interface{}{}, }) return } // Build DAG for layout computation nameToID := make(map[string]string) var modelIDs []string refsByID := make(map[string][]string) idToModel := make(map[string]database.Model) for _, m := range allModels { nameToID[m.Name] = m.ID idToModel[m.ID] = m modelIDs = append(modelIDs, m.ID) refsByID[m.ID] = models.ExtractRefs(m.SQLBody) } dag, dagErr := models.BuildDAG(modelIDs, refsByID, nameToID) // Compute depth for layout depth := make(map[string]int) if dagErr == nil { for _, id := range dag.Order { d := 0 for _, depID := range dag.Deps[id] { if depth[depID] >= d { d = depth[depID] + 1 } } depth[id] = d } } // Group by depth for y positioning layers := make(map[int]int) // depth -> count at that depth type dagNode struct { ID string `json:"id"` Data interface{} `json:"data"` Position struct { X float64 `json:"x"` Y float64 `json:"y"` } `json:"position"` } type dagEdge struct { ID string `json:"id"` Source string `json:"source"` Target string `json:"target"` } var nodes []dagNode var edges []dagEdge for _, m := range allModels { d := depth[m.ID] idx := layers[d] layers[d]++ n := dagNode{ ID: m.ID, Data: map[string]interface{}{ "name": m.Name, "materialization": m.Materialization, "status": m.Status, "target_database": m.TargetDatabase, }, } n.Position.X = float64(d) * 300 n.Position.Y = float64(idx) * 120 nodes = append(nodes, n) } // Build edges from refs for _, m := range allModels { refs := models.ExtractRefs(m.SQLBody) for _, ref := range refs { if srcID, ok := nameToID[ref]; ok { edges = append(edges, dagEdge{ ID: fmt.Sprintf("e-%s-%s", srcID, m.ID), Source: srcID, Target: m.ID, }) } } } writeJSON(w, http.StatusOK, map[string]interface{}{ "nodes": nodes, "edges": edges, }) } // ValidateAll checks all models for reference errors and cycles. func (h *ModelsHandler) ValidateAll(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } errors, err := h.Runner.Validate(session.ConnectionID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": fmt.Sprintf("Validation failed: %v", err)}) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "valid": len(errors) == 0, "errors": errors, }) } // RunAll triggers execution of all models. func (h *ModelsHandler) RunAll(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } runID, err := h.Runner.RunAll(session.ConnectionID, session.ClickhouseUser) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"run_id": runID}) } // RunSingle triggers execution of a single model and its deps. func (h *ModelsHandler) RunSingle(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") runID, err := h.Runner.RunSingle(session.ConnectionID, id, session.ClickhouseUser) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"run_id": runID}) } // ListRuns returns recent model runs. func (h *ModelsHandler) ListRuns(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } limit, _ := strconv.Atoi(r.URL.Query().Get("limit")) if limit <= 0 { limit = 20 } offset, _ := strconv.Atoi(r.URL.Query().Get("offset")) runs, err := h.DB.GetModelRuns(session.ConnectionID, limit, offset) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list runs"}) return } if runs == nil { runs = []database.ModelRun{} } writeJSON(w, http.StatusOK, map[string]interface{}{"runs": runs}) } // GetRun returns a single run with per-model results. func (h *ModelsHandler) GetRun(w http.ResponseWriter, r *http.Request) { runID := chi.URLParam(r, "runId") run, err := h.DB.GetModelRunByID(runID) if err != nil || run == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Run not found"}) return } results, err := h.DB.GetModelRunResults(runID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load run results"}) return } if results == nil { results = []database.ModelRunResult{} } writeJSON(w, http.StatusOK, map[string]interface{}{ "run": run, "results": results, }) } // ── Pipeline endpoints ────────────────────────────────────────────── // ListPipelines returns connected components with their schedules. func (h *ModelsHandler) ListPipelines(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } allModels, err := h.DB.GetModelsByConnection(session.ConnectionID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load models"}) return } if len(allModels) == 0 { writeJSON(w, http.StatusOK, map[string]interface{}{"pipelines": []interface{}{}}) return } nameToID := make(map[string]string) var modelIDs []string refsByID := make(map[string][]string) for _, m := range allModels { nameToID[m.Name] = m.ID modelIDs = append(modelIDs, m.ID) refsByID[m.ID] = models.ExtractRefs(m.SQLBody) } dag, dagErr := models.BuildDAG(modelIDs, refsByID, nameToID) if dagErr != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": fmt.Sprintf("DAG error: %v", dagErr)}) return } components := dag.ConnectedComponents() // Load all schedules for this connection schedules, err := h.DB.GetModelSchedulesByConnection(session.ConnectionID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to load schedules"}) return } schedByAnchor := make(map[string]database.ModelSchedule) for _, s := range schedules { if s.AnchorModelID != nil { schedByAnchor[*s.AnchorModelID] = s } } type pipelineResp struct { AnchorModelID string `json:"anchor_model_id"` ModelIDs []string `json:"model_ids"` Schedule *database.ModelSchedule `json:"schedule"` } var pipelines []pipelineResp for _, comp := range components { if len(comp) == 0 { continue } anchor := comp[0] // first in topo order p := pipelineResp{ AnchorModelID: anchor, ModelIDs: comp, } if s, ok := schedByAnchor[anchor]; ok { p.Schedule = &s } else { // Check if any model in this component has a schedule for _, id := range comp { if s, ok := schedByAnchor[id]; ok { p.Schedule = &s break } } } pipelines = append(pipelines, p) } writeJSON(w, http.StatusOK, map[string]interface{}{"pipelines": pipelines}) } // RunPipeline triggers execution of a single pipeline (connected component). func (h *ModelsHandler) RunPipeline(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } anchorID := chi.URLParam(r, "anchorId") runID, err := h.Runner.RunPipeline(session.ConnectionID, anchorID, session.ClickhouseUser) if err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"run_id": runID}) } // ── Schedule endpoints ────────────────────────────────────────────── // ListSchedules returns all schedules for the current connection. func (h *ModelsHandler) ListSchedules(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } schedules, err := h.DB.GetModelSchedulesByConnection(session.ConnectionID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list schedules"}) return } if schedules == nil { schedules = []database.ModelSchedule{} } writeJSON(w, http.StatusOK, map[string]interface{}{"schedules": schedules}) } // GetSchedule returns the schedule for a specific pipeline anchor. func (h *ModelsHandler) GetSchedule(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } anchorID := chi.URLParam(r, "anchorId") sched, err := h.DB.GetModelScheduleByAnchor(session.ConnectionID, anchorID) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get schedule"}) return } writeJSON(w, http.StatusOK, map[string]interface{}{"schedule": sched}) } // UpsertSchedule creates or updates the schedule for a specific pipeline anchor. func (h *ModelsHandler) UpsertSchedule(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } anchorID := chi.URLParam(r, "anchorId") var body struct { Cron string `json:"cron"` Enabled bool `json:"enabled"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } if body.Cron == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "cron expression is required"}) return } if !scheduler.ValidateCron(body.Cron) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid cron expression"}) return } var nextRunAt string if next := scheduler.ComputeNextRun(body.Cron, time.Now().UTC()); next != nil { nextRunAt = next.Format(time.RFC3339) } _, err := h.DB.UpsertModelSchedule(session.ConnectionID, anchorID, body.Cron, nextRunAt, session.ClickhouseUser) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": fmt.Sprintf("Failed to save schedule: %v", err)}) return } sched, _ := h.DB.GetModelScheduleByAnchor(session.ConnectionID, anchorID) writeJSON(w, http.StatusOK, map[string]interface{}{"schedule": sched}) } // DeleteSchedule removes the schedule for a specific pipeline anchor. func (h *ModelsHandler) DeleteSchedule(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } anchorID := chi.URLParam(r, "anchorId") if err := h.DB.DeleteModelScheduleByAnchor(session.ConnectionID, anchorID); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete schedule"}) return } writeJSON(w, http.StatusOK, map[string]string{"status": "deleted"}) } ================================================ FILE: internal/server/handlers/pipelines.go ================================================ package handlers import ( "encoding/json" "log/slog" "net/http" "strconv" "strings" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/pipelines" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // PipelinesHandler handles pipeline CRUD and lifecycle operations. type PipelinesHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config Runner *pipelines.Runner } // Routes returns a chi.Router with all pipeline routes mounted. func (h *PipelinesHandler) Routes() chi.Router { r := chi.NewRouter() r.Get("/", h.ListPipelines) r.Post("/", h.CreatePipeline) r.Route("/{id}", func(r chi.Router) { r.Get("/", h.GetPipeline) r.Put("/", h.UpdatePipeline) r.Delete("/", h.DeletePipeline) // Graph operations r.Put("/graph", h.SaveGraph) // Lifecycle r.Post("/start", h.StartPipeline) r.Post("/stop", h.StopPipeline) // Status & monitoring r.Get("/status", h.GetStatus) r.Get("/runs", h.ListRuns) r.Get("/runs/{runId}/logs", h.GetRunLogs) }) return r } // ListPipelines returns all pipelines. func (h *PipelinesHandler) ListPipelines(w http.ResponseWriter, r *http.Request) { pipelines, err := h.DB.GetPipelines() if err != nil { slog.Error("Failed to list pipelines", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list pipelines"}) return } if pipelines == nil { pipelines = []database.Pipeline{} } writeJSON(w, http.StatusOK, map[string]interface{}{"pipelines": pipelines}) } // GetPipeline returns a single pipeline with its graph. func (h *PipelinesHandler) GetPipeline(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Pipeline ID is required"}) return } pipeline, err := h.DB.GetPipelineByID(id) if err != nil { slog.Error("Failed to get pipeline", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get pipeline"}) return } if pipeline == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Pipeline not found"}) return } nodes, edges, err := h.DB.GetPipelineGraph(id) if err != nil { slog.Error("Failed to get pipeline graph", "error", err, "pipeline", id) nodes = []database.PipelineNode{} edges = []database.PipelineEdge{} } if nodes == nil { nodes = []database.PipelineNode{} } if edges == nil { edges = []database.PipelineEdge{} } writeJSON(w, http.StatusOK, map[string]interface{}{ "pipeline": pipeline, "graph": map[string]interface{}{ "nodes": nodes, "edges": edges, }, }) } // CreatePipeline creates a new pipeline. func (h *PipelinesHandler) CreatePipeline(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` Description string `json:"description"` ConnectionID string `json:"connection_id"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } connectionID := strings.TrimSpace(body.ConnectionID) if connectionID == "" { // Use the session's connection ID as default connectionID = session.ConnectionID } id, err := h.DB.CreatePipeline(name, strings.TrimSpace(body.Description), connectionID, session.ClickhouseUser) if err != nil { slog.Error("Failed to create pipeline", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create pipeline"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "pipeline.created", Username: &session.ClickhouseUser, Details: &name, }) pipeline, _ := h.DB.GetPipelineByID(id) writeJSON(w, http.StatusCreated, map[string]interface{}{"pipeline": pipeline}) } // UpdatePipeline updates a pipeline's name and description. func (h *PipelinesHandler) UpdatePipeline(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") var body struct { Name string `json:"name"` Description string `json:"description"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } name := strings.TrimSpace(body.Name) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } if err := h.DB.UpdatePipeline(id, name, strings.TrimSpace(body.Description)); err != nil { slog.Error("Failed to update pipeline", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update pipeline"}) return } pipeline, _ := h.DB.GetPipelineByID(id) writeJSON(w, http.StatusOK, map[string]interface{}{"pipeline": pipeline}) } // DeletePipeline deletes a pipeline. func (h *PipelinesHandler) DeletePipeline(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") // Check if pipeline exists and is not running pipeline, err := h.DB.GetPipelineByID(id) if err != nil { slog.Error("Failed to get pipeline", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get pipeline"}) return } if pipeline == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Pipeline not found"}) return } if pipeline.Status == "running" || pipeline.Status == "starting" { writeJSON(w, http.StatusConflict, map[string]string{"error": "Cannot delete a running pipeline. Stop it first."}) return } if err := h.DB.DeletePipeline(id); err != nil { slog.Error("Failed to delete pipeline", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete pipeline"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "pipeline.deleted", Username: &session.ClickhouseUser, Details: &pipeline.Name, }) writeJSON(w, http.StatusOK, map[string]string{"success": "true"}) } // SaveGraph saves the entire pipeline graph (nodes + edges). func (h *PipelinesHandler) SaveGraph(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") var body struct { Nodes []graphNode `json:"nodes"` Edges []graphEdge `json:"edges"` Viewport *graphViewport `json:"viewport"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid JSON body"}) return } // Convert to database types var nodes []database.PipelineNode for _, n := range body.Nodes { configJSON, _ := json.Marshal(n.Config) nodes = append(nodes, database.PipelineNode{ ID: n.ID, PipelineID: id, NodeType: n.NodeType, Label: n.Label, PositionX: n.PositionX, PositionY: n.PositionY, ConfigEncrypted: string(configJSON), }) } var edges []database.PipelineEdge for _, e := range body.Edges { edges = append(edges, database.PipelineEdge{ ID: e.ID, PipelineID: id, SourceNodeID: e.SourceNodeID, TargetNodeID: e.TargetNodeID, SourceHandle: e.SourceHandle, TargetHandle: e.TargetHandle, }) } viewportJSON := "" if body.Viewport != nil { vp, _ := json.Marshal(body.Viewport) viewportJSON = string(vp) } if err := h.DB.SavePipelineGraph(id, nodes, edges, viewportJSON); err != nil { slog.Error("Failed to save pipeline graph", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to save pipeline graph"}) return } writeJSON(w, http.StatusOK, map[string]string{"success": "true"}) } // StartPipeline starts a pipeline (placeholder for Phase 3). func (h *PipelinesHandler) StartPipeline(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") pipeline, err := h.DB.GetPipelineByID(id) if err != nil || pipeline == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Pipeline not found"}) return } if pipeline.Status == "running" || pipeline.Status == "starting" { writeJSON(w, http.StatusConflict, map[string]string{"error": "Pipeline is already running"}) return } if err := h.Runner.StartPipeline(id); err != nil { slog.Error("Failed to start pipeline", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "pipeline.started", Username: &session.ClickhouseUser, Details: &pipeline.Name, }) writeJSON(w, http.StatusOK, map[string]string{"success": "true"}) } // StopPipeline stops a running pipeline (placeholder for Phase 3). func (h *PipelinesHandler) StopPipeline(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") pipeline, err := h.DB.GetPipelineByID(id) if err != nil || pipeline == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Pipeline not found"}) return } if pipeline.Status != "running" && pipeline.Status != "starting" { writeJSON(w, http.StatusConflict, map[string]string{"error": "Pipeline is not running"}) return } if err := h.Runner.StopPipeline(id); err != nil { slog.Error("Failed to stop pipeline", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "pipeline.stopped", Username: &session.ClickhouseUser, Details: &pipeline.Name, }) writeJSON(w, http.StatusOK, map[string]string{"success": "true"}) } // GetStatus returns the current status of a pipeline. func (h *PipelinesHandler) GetStatus(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") pipeline, err := h.DB.GetPipelineByID(id) if err != nil || pipeline == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Pipeline not found"}) return } resp := map[string]interface{}{ "pipeline_id": pipeline.ID, "status": pipeline.Status, "last_error": pipeline.LastError, } // Add live metrics if pipeline is running if metrics := h.Runner.GetRunningMetrics(id); metrics != nil { resp["rows_ingested"] = metrics.RowsIngested.Load() resp["bytes_ingested"] = metrics.BytesIngested.Load() resp["batches_sent"] = metrics.BatchesSent.Load() resp["errors_count"] = metrics.ErrorsCount.Load() } writeJSON(w, http.StatusOK, resp) } // ListRuns returns execution runs for a pipeline. func (h *PipelinesHandler) ListRuns(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") limit := 20 offset := 0 if v := r.URL.Query().Get("limit"); v != "" { if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 { limit = parsed } } if v := r.URL.Query().Get("offset"); v != "" { if parsed, err := strconv.Atoi(v); err == nil && parsed >= 0 { offset = parsed } } runs, err := h.DB.GetPipelineRuns(id, limit, offset) if err != nil { slog.Error("Failed to list pipeline runs", "error", err, "pipeline", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to list runs"}) return } if runs == nil { runs = []database.PipelineRun{} } writeJSON(w, http.StatusOK, map[string]interface{}{"runs": runs}) } // GetRunLogs returns logs for a specific pipeline run. func (h *PipelinesHandler) GetRunLogs(w http.ResponseWriter, r *http.Request) { runID := chi.URLParam(r, "runId") limit := 200 if v := r.URL.Query().Get("limit"); v != "" { if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 { limit = parsed } } logs, err := h.DB.GetPipelineRunLogs(runID, limit) if err != nil { slog.Error("Failed to get run logs", "error", err, "run", runID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to get run logs"}) return } if logs == nil { logs = []database.PipelineRunLog{} } writeJSON(w, http.StatusOK, map[string]interface{}{"logs": logs}) } // ── Graph request types ──────────────────────────────────────────── type graphNode struct { ID string `json:"id"` NodeType string `json:"node_type"` Label string `json:"label"` PositionX float64 `json:"position_x"` PositionY float64 `json:"position_y"` Config map[string]interface{} `json:"config"` } type graphEdge struct { ID string `json:"id"` SourceNodeID string `json:"source_node_id"` TargetNodeID string `json:"target_node_id"` SourceHandle *string `json:"source_handle"` TargetHandle *string `json:"target_handle"` } type graphViewport struct { X float64 `json:"x"` Y float64 `json:"y"` Zoom float64 `json:"zoom"` } ================================================ FILE: internal/server/handlers/query.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "regexp" "sort" "strconv" "strings" "time" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/governance" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" "github.com/go-chi/chi/v5" ) const maxQueryTimeout = 5 * time.Minute // QueryHandler handles SQL query execution and schema exploration endpoints. type QueryHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config Guardrails *governance.GuardrailService } // Routes registers all query-related routes on the given chi.Router. func (h *QueryHandler) Routes(r chi.Router) { r.Post("/", h.ExecuteQuery) r.Post("/run", h.ExecuteQuery) r.Post("/stream", h.StreamQuery) r.Post("/sample", h.SampleQuery) r.Post("/explorer-data", h.ExplorerData) r.Post("/format", h.FormatSQL) r.Post("/explain", h.ExplainQuery) r.Post("/plan", h.QueryPlan) r.Post("/profile", h.QueryProfile) r.Post("/estimate", h.EstimateQuery) r.Get("/databases", h.ListDatabases) r.Get("/tables", h.ListTables) r.Get("/columns", h.ListColumns) r.Get("/data-types", h.ListDataTypes) r.Get("/clusters", h.ListClusters) r.Post("/schema/database", h.CreateDatabase) r.Post("/schema/database/drop", h.DropDatabase) r.Post("/schema/table", h.CreateTable) r.Post("/schema/table/drop", h.DropTable) r.Post("/upload/discover", h.DiscoverUploadSchema) r.Post("/upload/ingest", h.IngestUpload) r.Get("/host-info", h.GetHostInfo) r.Get("/completions", h.ListCompletions) } // --- Request / Response types --- type executeQueryRequest struct { Query string `json:"query"` Timeout int `json:"timeout"` // seconds MaxResultRows int `json:"maxResultRows"` // server-side row cap via ClickHouse max_result_rows } type executeQueryResponse struct { Success bool `json:"success"` Data json.RawMessage `json:"data,omitempty"` Meta json.RawMessage `json:"meta,omitempty"` Statistics json.RawMessage `json:"statistics,omitempty"` Rows int `json:"rows"` ElapsedMS int64 `json:"elapsed_ms"` } type formatRequest struct { Query string `json:"query"` } type formatResponse struct { Formatted string `json:"formatted"` } type explainRequest struct { Query string `json:"query"` } type sampleRequest struct { Query string `json:"query"` PerShard int `json:"per_shard"` ShardBy string `json:"shard_by"` Timeout int `json:"timeout"` } type planNode struct { ID string `json:"id"` ParentID *string `json:"parent_id,omitempty"` Level int `json:"level"` Label string `json:"label"` } type createDatabaseRequest struct { Name string `json:"name"` Engine string `json:"engine"` OnCluster string `json:"on_cluster"` IfNotExists *bool `json:"if_not_exists"` } type dropDatabaseRequest struct { Name string `json:"name"` OnCluster string `json:"on_cluster"` IfExists *bool `json:"if_exists"` Sync bool `json:"sync"` } type createTableColumn struct { Name string `json:"name"` Type string `json:"type"` DefaultExpression string `json:"default_expression"` Comment string `json:"comment"` } type createTableRequest struct { Database string `json:"database"` Name string `json:"name"` Engine string `json:"engine"` OnCluster string `json:"on_cluster"` IfNotExists *bool `json:"if_not_exists"` Columns []createTableColumn `json:"columns"` OrderBy string `json:"order_by"` PartitionBy string `json:"partition_by"` PrimaryKey string `json:"primary_key"` SampleBy string `json:"sample_by"` TTL string `json:"ttl"` Settings string `json:"settings"` Comment string `json:"comment"` } type dropTableRequest struct { Database string `json:"database"` Name string `json:"name"` OnCluster string `json:"on_cluster"` IfExists *bool `json:"if_exists"` Sync bool `json:"sync"` } // --- Handlers --- // ExecuteQuery handles POST / and POST /run. func (h *QueryHandler) ExecuteQuery(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req executeQueryRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !h.enforceGuardrailsForQuery(w, r, query, r.URL.Path) { return } // Determine timeout timeout := 30 * time.Second if req.Timeout > 0 { timeout = time.Duration(req.Timeout) * time.Second } if timeout > maxQueryTimeout { timeout = maxQueryTimeout } // Decrypt password password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } // Execute query via tunnel start := time.Now() result, err := h.Gateway.ExecuteQuery( session.ConnectionID, query, session.ClickhouseUser, password, timeout, ) elapsed := time.Since(start).Milliseconds() if err != nil { slog.Warn("Query execution failed", "error", err, "connection", session.ConnectionID) writeError(w, http.StatusBadGateway, err.Error()) return } // Count rows from data rows := countRows(result.Data) // Audit log (truncate query preview to 100 chars) preview := query if len(preview) > 100 { preview = preview[:100] + "..." } go func() { ip := r.RemoteAddr h.DB.CreateAuditLog(database.AuditLogParams{ Action: "query.execute", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(preview), IPAddress: strPtr(ip), }) }() resp := executeQueryResponse{ Success: true, Data: result.Data, Meta: result.Meta, Statistics: result.Stats, Rows: rows, ElapsedMS: elapsed, } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resp) } // FormatSQL handles POST /format. func (h *QueryHandler) FormatSQL(w http.ResponseWriter, r *http.Request) { var req formatRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } formatted := formatSQL(query) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(formatResponse{Formatted: formatted}) } // ExplainQuery handles POST /explain. func (h *QueryHandler) ExplainQuery(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req explainRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !h.enforceGuardrailsForQuery(w, r, query, r.URL.Path) { return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } explainSQL := "EXPLAIN " + query result, err := h.Gateway.ExecuteQuery( session.ConnectionID, explainSQL, session.ClickhouseUser, password, 30*time.Second, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "data": result.Data, "meta": result.Meta, }) } // QueryPlan handles POST /plan and returns a parsed plan tree for visualization. func (h *QueryHandler) QueryPlan(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req explainRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !h.enforceGuardrailsForQuery(w, r, query, r.URL.Path) { return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } candidates := []struct { source string sql string }{ {source: "plan", sql: "EXPLAIN PLAN " + query}, {source: "ast", sql: "EXPLAIN AST " + query}, {source: "generic", sql: "EXPLAIN " + query}, } var lastErr error for _, candidate := range candidates { result, execErr := h.Gateway.ExecuteQuery( session.ConnectionID, candidate.sql, session.ClickhouseUser, password, 45*time.Second, ) if execErr != nil { lastErr = execErr continue } lines := extractExplainLines(result.Data) if len(lines) == 0 { continue } nodes := buildPlanTree(lines) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "source": candidate.source, "lines": lines, "nodes": nodes, }) return } if lastErr != nil { writeError(w, http.StatusBadGateway, lastErr.Error()) return } writeError(w, http.StatusBadGateway, "No plan information returned by ClickHouse") } // EstimateQuery handles POST /estimate and returns cost estimation via EXPLAIN ESTIMATE. func (h *QueryHandler) EstimateQuery(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req explainRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } result, err := h.Gateway.ExecuteQuery( session.ConnectionID, "EXPLAIN ESTIMATE "+query, session.ClickhouseUser, password, 15*time.Second, ) if err != nil { writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "tables": []interface{}{}, "total_rows": 0, "total_parts": 0, "total_marks": 0, "error": err.Error(), }) return } rows := decodeRows(result.Data) type tableEstimate struct { Database string `json:"database"` Table string `json:"table"` Parts int64 `json:"parts"` Rows int64 `json:"rows"` Marks int64 `json:"marks"` } var tables []tableEstimate var totalRows, totalParts, totalMarks int64 for _, row := range rows { te := tableEstimate{ Database: fmt.Sprint(row["database"]), Table: fmt.Sprint(row["table"]), } if v, ok := row["parts"]; ok { te.Parts = toInt64(v) } if v, ok := row["rows"]; ok { te.Rows = toInt64(v) } if v, ok := row["marks"]; ok { te.Marks = toInt64(v) } tables = append(tables, te) totalRows += te.Rows totalParts += te.Parts totalMarks += te.Marks } if tables == nil { tables = []tableEstimate{} } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "tables": tables, "total_rows": totalRows, "total_parts": totalParts, "total_marks": totalMarks, }) } // toInt64 converts an interface value to int64. func toInt64(v interface{}) int64 { switch val := v.(type) { case float64: return int64(val) case int64: return val case int: return int64(val) case string: n, _ := strconv.ParseInt(val, 10, 64) return n case json.Number: n, _ := val.Int64() return n default: s := fmt.Sprint(v) n, _ := strconv.ParseInt(s, 10, 64) return n } } // SampleQuery handles POST /sample and returns first N rows per shard when available. func (h *QueryHandler) SampleQuery(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req sampleRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !isReadOnlyQuery(query) { writeError(w, http.StatusBadRequest, "Sampling only supports read-only SELECT/WITH queries") return } if !h.enforceGuardrailsForQuery(w, r, query, r.URL.Path) { return } perShard := req.PerShard if perShard <= 0 { perShard = 25 } if perShard > 500 { perShard = 500 } shardBy := strings.TrimSpace(req.ShardBy) if shardBy == "" { shardBy = "_shard_num" } timeout := 30 * time.Second if req.Timeout > 0 { timeout = time.Duration(req.Timeout) * time.Second } if timeout > maxQueryTimeout { timeout = maxQueryTimeout } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } base := stripFormatClause(stripTrailingSemicolon(query)) perShardSQL := fmt.Sprintf( "SELECT * FROM (%s) AS __ch_ui_sample LIMIT %d BY %s", base, perShard, escapeIdentifier(shardBy), ) start := time.Now() result, runErr := h.Gateway.ExecuteQuery( session.ConnectionID, perShardSQL, session.ClickhouseUser, password, timeout, ) elapsed := time.Since(start).Milliseconds() samplingMode := "per_shard" warning := "" if runErr != nil && shouldFallbackToGlobalSample(runErr.Error()) { // Fallback for local/non-distributed queries where _shard_num is unavailable. fallbackSQL := fmt.Sprintf( "SELECT * FROM (%s) AS __ch_ui_sample LIMIT %d", base, perShard, ) start = time.Now() result, runErr = h.Gateway.ExecuteQuery( session.ConnectionID, fallbackSQL, session.ClickhouseUser, password, timeout, ) elapsed = time.Since(start).Milliseconds() samplingMode = "global" warning = "Shard virtual column not available for this query; returned global sample instead." } if runErr != nil { writeError(w, http.StatusBadGateway, runErr.Error()) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "data": result.Data, "meta": result.Meta, "statistics": result.Stats, "rows": countRows(result.Data), "elapsed_ms": elapsed, "sampling_mode": samplingMode, "warning": warning, }) } // QueryProfile handles POST /profile and returns latest query_log metrics for the exact SQL. func (h *QueryHandler) QueryProfile(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req explainRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !h.enforceGuardrailsForQuery(w, r, query, r.URL.Path) { return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } escapedQuery := escapeLiteral(stripTrailingSemicolon(query)) escapedUser := escapeLiteral(session.ClickhouseUser) profileSQL := fmt.Sprintf(`SELECT query_duration_ms, read_rows, read_bytes, result_rows, result_bytes, memory_usage, ProfileEvents['SelectedRows'] AS selected_rows, ProfileEvents['SelectedBytes'] AS selected_bytes, ProfileEvents['SelectedMarks'] AS selected_marks FROM system.query_log WHERE type = 'QueryFinish' AND query = '%s' AND user = '%s' ORDER BY event_time DESC LIMIT 1`, escapedQuery, escapedUser) result, execErr := h.Gateway.ExecuteQuery( session.ConnectionID, profileSQL, session.ClickhouseUser, password, 10*time.Second, ) if execErr != nil { // query_log may be unavailable depending on ClickHouse config/version. writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "available": false, "reason": "system.query_log is unavailable for this connection", }) return } rows := decodeRows(result.Data) if len(rows) == 0 { writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "available": false, "reason": "No query profile row found yet (query_log flush can be delayed)", }) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "available": true, "profile": rows[0], }) } // StreamQuery handles POST /stream — streaming query execution via NDJSON chunked response. func (h *QueryHandler) StreamQuery(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req executeQueryRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } query := strings.TrimSpace(req.Query) if query == "" { writeError(w, http.StatusBadRequest, "Query is required") return } if !h.enforceGuardrailsForQuery(w, r, query, r.URL.Path) { return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } flusher, ok := w.(http.Flusher) if !ok { writeError(w, http.StatusInternalServerError, "Streaming not supported") return } maxRows := req.MaxResultRows if maxRows <= 0 { maxRows = 1000 } if maxRows > 1_000_000 { maxRows = 1_000_000 } requestID, stream, err := h.Gateway.ExecuteStreamQuery( session.ConnectionID, query, session.ClickhouseUser, password, map[string]string{ "max_result_rows": strconv.Itoa(maxRows), "result_overflow_mode": "break", }, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } defer h.Gateway.CleanupStream(session.ConnectionID, requestID) w.Header().Set("Content-Type", "application/x-ndjson") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(http.StatusOK) enc := json.NewEncoder(w) ctx := r.Context() // Wait for meta or error select { case meta := <-stream.MetaCh: enc.Encode(map[string]interface{}{"type": "meta", "meta": meta}) flusher.Flush() case err := <-stream.ErrorCh: enc.Encode(map[string]interface{}{"type": "error", "error": err.Error()}) flusher.Flush() return case <-ctx.Done(): return } // Read chunks until channel is closed or client disconnects seq := 0 for { select { case chunk, ok := <-stream.ChunkCh: if !ok { goto streamDone } enc.Encode(map[string]interface{}{"type": "chunk", "data": chunk, "seq": seq}) flusher.Flush() seq++ case <-ctx.Done(): return } } streamDone: // ChunkCh closed — read final done or error select { case donePayload := <-stream.DoneCh: var done tunnel.StreamDone json.Unmarshal(donePayload, &done) enc.Encode(map[string]interface{}{ "type": "done", "statistics": done.Statistics, "total_rows": done.TotalRows, }) flusher.Flush() case err := <-stream.ErrorCh: enc.Encode(map[string]interface{}{"type": "error", "error": err.Error()}) flusher.Flush() case <-ctx.Done(): return } // Audit log preview := query if len(preview) > 100 { preview = preview[:100] + "..." } go func() { h.DB.CreateAuditLog(database.AuditLogParams{ Action: "query.stream", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(preview), IPAddress: strPtr(r.RemoteAddr), }) }() } // ExplorerData handles POST /explorer-data — server-side paginated data browsing. func (h *QueryHandler) ExplorerData(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } var req struct { Database string `json:"database"` Table string `json:"table"` Page int `json:"page"` PageSize int `json:"page_size"` SortColumn string `json:"sort_column"` SortDir string `json:"sort_dir"` } if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } if req.Database == "" || req.Table == "" { writeError(w, http.StatusBadRequest, "database and table are required") return } if !h.enforceGuardrailsForTable(w, r, req.Database, req.Table, r.URL.Path) { return } if req.PageSize <= 0 || req.PageSize > 1000 { req.PageSize = 100 } if req.Page < 0 { req.Page = 0 } sortDir := "ASC" if strings.EqualFold(req.SortDir, "desc") { sortDir = "DESC" } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } // Build data query with LIMIT/OFFSET offset := req.Page * req.PageSize dataSQL := fmt.Sprintf("SELECT * FROM %s.%s", escapeIdentifier(req.Database), escapeIdentifier(req.Table)) if req.SortColumn != "" { dataSQL += fmt.Sprintf(" ORDER BY %s %s", escapeIdentifier(req.SortColumn), sortDir) } dataSQL += fmt.Sprintf(" LIMIT %d OFFSET %d", req.PageSize, offset) // Build count query countSQL := fmt.Sprintf("SELECT count() FROM %s.%s", escapeIdentifier(req.Database), escapeIdentifier(req.Table)) // Execute data query (JSONCompact — positional arrays, smaller payload) dataRaw, err := h.Gateway.ExecuteQueryWithFormat( session.ConnectionID, dataSQL, session.ClickhouseUser, password, "JSONCompact", 30*time.Second, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } // Execute count query countRaw, err := h.Gateway.ExecuteQueryWithFormat( session.ConnectionID, countSQL, session.ClickhouseUser, password, "JSONCompact", 30*time.Second, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } // Parse JSONCompact data result var dataCompact struct { Meta json.RawMessage `json:"meta"` Data json.RawMessage `json:"data"` Rows int `json:"rows"` } json.Unmarshal(dataRaw, &dataCompact) // Parse count result — JSONCompact: {"data":[[12345]]} var totalRows int64 var countCompact struct { Data [][]json.RawMessage `json:"data"` } if err := json.Unmarshal(countRaw, &countCompact); err == nil && len(countCompact.Data) > 0 && len(countCompact.Data[0]) > 0 { var v interface{} if json.Unmarshal(countCompact.Data[0][0], &v) == nil { switch n := v.(type) { case float64: totalRows = int64(n) case string: fmt.Sscanf(n, "%d", &totalRows) } } } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "meta": dataCompact.Meta, "data": dataCompact.Data, "rows": dataCompact.Rows, "total_rows": totalRows, "page": req.Page, "page_size": req.PageSize, }) } // ListDatabases handles GET /databases. func (h *QueryHandler) ListDatabases(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } result, err := h.Gateway.ExecuteQuery( session.ConnectionID, "SHOW DATABASES", session.ClickhouseUser, password, 30*time.Second, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } names := extractNames(result.Data) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "databases": names, }) } // ListTables handles GET /tables?database=X. func (h *QueryHandler) ListTables(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } db := r.URL.Query().Get("database") if db == "" { writeError(w, http.StatusBadRequest, "database query parameter is required") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } query := fmt.Sprintf("SELECT name, engine FROM system.tables WHERE database = '%s' ORDER BY name", escapeLiteral(db)) result, err := h.Gateway.ExecuteQuery( session.ConnectionID, query, session.ClickhouseUser, password, 30*time.Second, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } type tableInfo struct { Name string `json:"name"` Engine string `json:"engine"` } var rows []map[string]interface{} tables := []tableInfo{} if len(result.Data) > 0 { if err := json.Unmarshal(result.Data, &rows); err == nil { for _, row := range rows { name, _ := row["name"].(string) engine, _ := row["engine"].(string) if name != "" { tables = append(tables, tableInfo{Name: name, Engine: engine}) } } } } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "tables": tables, }) } // ListColumns handles GET /columns?database=X&table=Y. func (h *QueryHandler) ListColumns(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } dbName := r.URL.Query().Get("database") table := r.URL.Query().Get("table") if dbName == "" || table == "" { writeError(w, http.StatusBadRequest, "database and table query parameters are required") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } query := fmt.Sprintf("DESCRIBE TABLE %s.%s", escapeIdentifier(dbName), escapeIdentifier(table)) result, err := h.Gateway.ExecuteQuery( session.ConnectionID, query, session.ClickhouseUser, password, 30*time.Second, ) if err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "columns": result.Data, "meta": result.Meta, }) } // ListDataTypes handles GET /data-types. func (h *QueryHandler) ListDataTypes(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } result, err := h.Gateway.ExecuteQuery( session.ConnectionID, "SELECT name FROM system.data_type_families WHERE is_parametric = 0 ORDER BY name", session.ClickhouseUser, password, 15*time.Second, ) if err != nil { // Fallback for older ClickHouse versions where is_parametric might not exist. slog.Warn("Failed to list non-parametric data types; trying fallback query", "error", err, "connection", session.ConnectionID) result, err = h.Gateway.ExecuteQuery( session.ConnectionID, "SELECT name FROM system.data_type_families ORDER BY name", session.ClickhouseUser, password, 15*time.Second, ) if err != nil { slog.Warn("Failed to list data types; returning empty list", "error", err, "connection", session.ConnectionID) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "data_types": []string{}, }) return } } rawTypes := extractNames(result.Data) uniq := make(map[string]struct{}, len(rawTypes)) types := make([]string, 0, len(rawTypes)) for _, t := range rawTypes { t = strings.TrimSpace(t) if t == "" { continue } if _, exists := uniq[t]; exists { continue } uniq[t] = struct{}{} types = append(types, t) } sort.Strings(types) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "data_types": types, }) } // ListClusters handles GET /clusters. func (h *QueryHandler) ListClusters(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } result, err := h.Gateway.ExecuteQuery( session.ConnectionID, "SELECT DISTINCT cluster FROM system.clusters WHERE cluster != '' ORDER BY cluster", session.ClickhouseUser, password, 15*time.Second, ) if err != nil { // Some deployments/users cannot read system.clusters; return an empty list instead of hard failing UI. slog.Warn("Failed to list clusters; returning empty list", "error", err, "connection", session.ConnectionID) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "clusters": []string{}, }) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "clusters": extractNames(result.Data), }) } // CreateDatabase handles POST /schema/database. func (h *QueryHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) { session := h.requireSchemaAdmin(w, r) if session == nil { return } var req createDatabaseRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } name := strings.TrimSpace(req.Name) if err := validateSimpleObjectName(name, "database"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if isSystemDatabaseName(name) { writeError(w, http.StatusBadRequest, "Cannot create reserved system database") return } engine := strings.TrimSpace(req.Engine) if engine == "" { engine = "Atomic" } if isUnsafeSQLFragment(engine) { writeError(w, http.StatusBadRequest, "Invalid engine expression") return } cluster := strings.TrimSpace(req.OnCluster) if cluster != "" && isUnsafeSQLFragment(cluster) { writeError(w, http.StatusBadRequest, "Invalid cluster name") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } ifNotExists := req.IfNotExists == nil || *req.IfNotExists var sqlBuilder strings.Builder sqlBuilder.WriteString("CREATE DATABASE ") if ifNotExists { sqlBuilder.WriteString("IF NOT EXISTS ") } sqlBuilder.WriteString(escapeIdentifier(name)) if cluster != "" { sqlBuilder.WriteString(" ON CLUSTER ") sqlBuilder.WriteString(escapeIdentifier(cluster)) } sqlBuilder.WriteString(" ENGINE = ") sqlBuilder.WriteString(engine) sql := sqlBuilder.String() if _, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 45*time.Second, ); err != nil { writeError(w, http.StatusBadGateway, fmt.Sprintf("%s\n\nCommand:\n%s", err.Error(), sql)) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schema.database.create", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("database=%s engine=%s cluster=%s", name, engine, cluster)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{ "success": true, "database": name, }) } // DropDatabase handles POST /schema/database/drop. func (h *QueryHandler) DropDatabase(w http.ResponseWriter, r *http.Request) { session := h.requireSchemaAdmin(w, r) if session == nil { return } var req dropDatabaseRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } name := strings.TrimSpace(req.Name) if err := validateSimpleObjectName(name, "database"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if isSystemDatabaseName(name) { writeError(w, http.StatusBadRequest, "Cannot drop system database") return } cluster := strings.TrimSpace(req.OnCluster) if cluster != "" && isUnsafeSQLFragment(cluster) { writeError(w, http.StatusBadRequest, "Invalid cluster name") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } ifExists := req.IfExists == nil || *req.IfExists var sqlBuilder strings.Builder sqlBuilder.WriteString("DROP DATABASE ") if ifExists { sqlBuilder.WriteString("IF EXISTS ") } sqlBuilder.WriteString(escapeIdentifier(name)) if cluster != "" { sqlBuilder.WriteString(" ON CLUSTER ") sqlBuilder.WriteString(escapeIdentifier(cluster)) } if req.Sync { sqlBuilder.WriteString(" SYNC") } sql := sqlBuilder.String() if _, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 45*time.Second, ); err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schema.database.drop", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("database=%s cluster=%s sync=%t", name, cluster, req.Sync)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "database": name, }) } // CreateTable handles POST /schema/table. func (h *QueryHandler) CreateTable(w http.ResponseWriter, r *http.Request) { session := h.requireSchemaAdmin(w, r) if session == nil { return } var req createTableRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } dbName := strings.TrimSpace(req.Database) tableName := strings.TrimSpace(req.Name) if err := validateSimpleObjectName(dbName, "database"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if err := validateSimpleObjectName(tableName, "table"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if isSystemDatabaseName(dbName) { writeError(w, http.StatusBadRequest, "Cannot create tables in system databases") return } if len(req.Columns) == 0 { writeError(w, http.StatusBadRequest, "At least one column is required") return } engine := strings.TrimSpace(req.Engine) if engine == "" { engine = "MergeTree" } if isUnsafeSQLFragment(engine) { writeError(w, http.StatusBadRequest, "Invalid engine expression") return } cluster := strings.TrimSpace(req.OnCluster) if cluster != "" && isUnsafeSQLFragment(cluster) { writeError(w, http.StatusBadRequest, "Invalid cluster name") return } columnsSQL := make([]string, 0, len(req.Columns)) for i, col := range req.Columns { colName := strings.TrimSpace(col.Name) colType := strings.TrimSpace(col.Type) if err := validateSimpleObjectName(colName, fmt.Sprintf("column #%d", i+1)); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if colType == "" || isUnsafeSQLFragment(colType) { writeError(w, http.StatusBadRequest, fmt.Sprintf("Invalid type for column %q", colName)) return } part := escapeIdentifier(colName) + " " + colType if def := strings.TrimSpace(col.DefaultExpression); def != "" { if isUnsafeSQLFragment(def) { writeError(w, http.StatusBadRequest, fmt.Sprintf("Invalid default expression for column %q", colName)) return } part += " DEFAULT " + def } if comment := strings.TrimSpace(col.Comment); comment != "" { part += " COMMENT '" + escapeLiteral(comment) + "'" } columnsSQL = append(columnsSQL, part) } orderBy := strings.TrimSpace(req.OrderBy) partitionBy := strings.TrimSpace(req.PartitionBy) primaryKey := strings.TrimSpace(req.PrimaryKey) sampleBy := strings.TrimSpace(req.SampleBy) ttl := strings.TrimSpace(req.TTL) settings := strings.TrimSpace(req.Settings) comment := strings.TrimSpace(req.Comment) expressions := []struct { name string value string }{ {name: "order_by", value: orderBy}, {name: "partition_by", value: partitionBy}, {name: "primary_key", value: primaryKey}, {name: "sample_by", value: sampleBy}, {name: "ttl", value: ttl}, {name: "settings", value: settings}, } for _, expr := range expressions { if expr.value != "" && isUnsafeSQLFragment(expr.value) { writeError(w, http.StatusBadRequest, fmt.Sprintf("Invalid %s expression", expr.name)) return } } if strings.Contains(strings.ToLower(engine), "mergetree") && orderBy == "" { orderBy = "tuple()" } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } ifNotExists := req.IfNotExists == nil || *req.IfNotExists var sqlBuilder strings.Builder sqlBuilder.WriteString("CREATE TABLE ") if ifNotExists { sqlBuilder.WriteString("IF NOT EXISTS ") } sqlBuilder.WriteString(escapeIdentifier(dbName)) sqlBuilder.WriteString(".") sqlBuilder.WriteString(escapeIdentifier(tableName)) if cluster != "" { sqlBuilder.WriteString(" ON CLUSTER ") sqlBuilder.WriteString(escapeIdentifier(cluster)) } sqlBuilder.WriteString(" (\n ") sqlBuilder.WriteString(strings.Join(columnsSQL, ",\n ")) sqlBuilder.WriteString("\n)") sqlBuilder.WriteString("\nENGINE = ") sqlBuilder.WriteString(engine) if partitionBy != "" { sqlBuilder.WriteString("\nPARTITION BY ") sqlBuilder.WriteString(partitionBy) } if orderBy != "" { sqlBuilder.WriteString("\nORDER BY ") sqlBuilder.WriteString(orderBy) } if primaryKey != "" { sqlBuilder.WriteString("\nPRIMARY KEY ") sqlBuilder.WriteString(primaryKey) } if sampleBy != "" { sqlBuilder.WriteString("\nSAMPLE BY ") sqlBuilder.WriteString(sampleBy) } if ttl != "" { sqlBuilder.WriteString("\nTTL ") sqlBuilder.WriteString(ttl) } if settings != "" { sqlBuilder.WriteString("\nSETTINGS ") sqlBuilder.WriteString(settings) } if comment != "" { sqlBuilder.WriteString("\nCOMMENT '") sqlBuilder.WriteString(escapeLiteral(comment)) sqlBuilder.WriteString("'") } sql := sqlBuilder.String() if _, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 45*time.Second, ); err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schema.table.create", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("table=%s.%s engine=%s cluster=%s", dbName, tableName, engine, cluster)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusCreated, map[string]interface{}{ "success": true, "database": dbName, "table": tableName, "command": sql, }) } // DropTable handles POST /schema/table/drop. func (h *QueryHandler) DropTable(w http.ResponseWriter, r *http.Request) { session := h.requireSchemaAdmin(w, r) if session == nil { return } var req dropTableRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { writeError(w, http.StatusBadRequest, "Invalid request body") return } dbName := strings.TrimSpace(req.Database) tableName := strings.TrimSpace(req.Name) if err := validateSimpleObjectName(dbName, "database"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if err := validateSimpleObjectName(tableName, "table"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if isSystemDatabaseName(dbName) { writeError(w, http.StatusBadRequest, "Cannot drop tables from system databases") return } cluster := strings.TrimSpace(req.OnCluster) if cluster != "" && isUnsafeSQLFragment(cluster) { writeError(w, http.StatusBadRequest, "Invalid cluster name") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } ifExists := req.IfExists == nil || *req.IfExists var sqlBuilder strings.Builder sqlBuilder.WriteString("DROP TABLE ") if ifExists { sqlBuilder.WriteString("IF EXISTS ") } sqlBuilder.WriteString(escapeIdentifier(dbName)) sqlBuilder.WriteString(".") sqlBuilder.WriteString(escapeIdentifier(tableName)) if cluster != "" { sqlBuilder.WriteString(" ON CLUSTER ") sqlBuilder.WriteString(escapeIdentifier(cluster)) } if req.Sync { sqlBuilder.WriteString(" SYNC") } sql := sqlBuilder.String() if _, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 45*time.Second, ); err != nil { writeError(w, http.StatusBadGateway, err.Error()) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schema.table.drop", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("table=%s.%s cluster=%s sync=%t", dbName, tableName, cluster, req.Sync)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "database": dbName, "table": tableName, }) } // GetHostInfo handles GET /host-info. func (h *QueryHandler) GetHostInfo(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } info, err := h.DB.GetConnectionHostInfo(session.ConnectionID) if err != nil { slog.Error("Failed to get host info", "error", err) writeError(w, http.StatusInternalServerError, "Failed to retrieve host info") return } if info == nil { writeError(w, http.StatusNotFound, "Host info not available") return } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "host_info": info, }) } // ListCompletions handles GET /completions — returns ClickHouse functions and keywords for autocomplete. func (h *QueryHandler) ListCompletions(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } // Fetch functions fnResult, err := h.Gateway.ExecuteQuery( session.ConnectionID, "SELECT name FROM system.functions", session.ClickhouseUser, password, 15*time.Second, ) functions := []string{} if err == nil { functions = extractNames(fnResult.Data) } // Fetch keywords kwResult, err := h.Gateway.ExecuteQuery( session.ConnectionID, "SELECT keyword FROM system.keywords", session.ClickhouseUser, password, 15*time.Second, ) keywords := []string{} if err == nil { keywords = extractNames(kwResult.Data) } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ "success": true, "functions": functions, "keywords": keywords, }) } // --- Helpers --- // writeJSON writes a JSON response with the given status code. func writeJSON(w http.ResponseWriter, status int, v interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) json.NewEncoder(w).Encode(v) } // writeError writes a JSON error response. func writeError(w http.ResponseWriter, status int, message string) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) json.NewEncoder(w).Encode(map[string]interface{}{ "success": false, "error": message, }) } func (h *QueryHandler) guardrailsEnabled() bool { if h.Guardrails == nil { return false } if h.Config == nil { return true } return h.Config.IsPro() } func (h *QueryHandler) enforceGuardrailsForQuery(w http.ResponseWriter, r *http.Request, queryText, requestEndpoint string) bool { if !h.guardrailsEnabled() { return true } session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return false } decision, err := h.Guardrails.EvaluateQuery(session.ConnectionID, session.ClickhouseUser, queryText, requestEndpoint) if err != nil { slog.Error("Guardrail pre-exec evaluation failed", "connection", session.ConnectionID, "endpoint", requestEndpoint, "error", err) writeError(w, http.StatusInternalServerError, "Failed to evaluate governance guardrails") return false } if decision.Allowed { return true } h.writePolicyBlocked(w, decision.Block) return false } func (h *QueryHandler) enforceGuardrailsForTable(w http.ResponseWriter, r *http.Request, databaseName, tableName, requestEndpoint string) bool { if !h.guardrailsEnabled() { return true } session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return false } decision, err := h.Guardrails.EvaluateTable(session.ConnectionID, session.ClickhouseUser, databaseName, tableName, requestEndpoint) if err != nil { slog.Error("Guardrail table pre-exec evaluation failed", "connection", session.ConnectionID, "database", databaseName, "table", tableName, "endpoint", requestEndpoint, "error", err) writeError(w, http.StatusInternalServerError, "Failed to evaluate governance guardrails") return false } if decision.Allowed { return true } h.writePolicyBlocked(w, decision.Block) return false } func (h *QueryHandler) writePolicyBlocked(w http.ResponseWriter, block *governance.GuardrailBlock) { if block == nil { writeJSON(w, http.StatusForbidden, map[string]interface{}{ "success": false, "error": "Query blocked by governance policy", "code": "policy_blocked", }) return } writeJSON(w, http.StatusForbidden, map[string]interface{}{ "success": false, "error": block.Detail, "code": "policy_blocked", "policy_id": block.PolicyID, "policy_name": block.PolicyName, "severity": block.Severity, "enforcement_mode": block.EnforcementMode, "violation_id": block.ViolationID, }) } // escapeIdentifier wraps a SQL identifier in backticks and escapes any inner backticks. func escapeIdentifier(name string) string { escaped := strings.ReplaceAll(name, "`", "``") return "`" + escaped + "`" } // escapeLiteral escapes single quotes and backslashes for ClickHouse SQL string literals. // ClickHouse uses ” (doubled single-quote) to escape single quotes in string literals. func escapeLiteral(value string) string { return strings.ReplaceAll(strings.ReplaceAll(value, "\\", "\\\\"), "'", "''") } func stripTrailingSemicolon(query string) string { return strings.TrimRight(query, " \n\t;") } func stripFormatClause(query string) string { re := regexp.MustCompile(`(?is)\s+FORMAT\s+\w+\s*$`) return strings.TrimSpace(re.ReplaceAllString(query, "")) } func isReadOnlyQuery(query string) bool { re := regexp.MustCompile(`(?is)^\s*(SELECT|WITH|SHOW|DESC|DESCRIBE|EXPLAIN)\b`) return re.MatchString(query) } func (h *QueryHandler) requireSchemaAdmin(w http.ResponseWriter, r *http.Request) *middleware.SessionInfo { session := middleware.GetSession(r) if session == nil { writeError(w, http.StatusUnauthorized, "Not authenticated") return nil } isAdmin, err := h.DB.IsUserRole(session.ClickhouseUser, "admin") if err != nil { writeError(w, http.StatusInternalServerError, "Role check failed") return nil } if !isAdmin { writeError(w, http.StatusForbidden, "Admin role required for schema changes") return nil } return session } func validateSimpleObjectName(name string, label string) error { if strings.TrimSpace(name) == "" { return fmt.Errorf("%s name is required", label) } if strings.Contains(name, ".") { return fmt.Errorf("%s name cannot contain '.'", label) } if strings.ContainsAny(name, "\x00\r\n\t") { return fmt.Errorf("%s name contains invalid control characters", label) } return nil } func isUnsafeSQLFragment(value string) bool { v := strings.TrimSpace(strings.ToLower(value)) if v == "" { return false } // Check for SQL injection patterns: statement terminators, comments, and null bytes return strings.Contains(v, ";") || strings.Contains(v, "--") || strings.Contains(v, "/*") || strings.Contains(v, "*/") || strings.ContainsAny(v, "\x00\r\n") } func isSystemDatabaseName(name string) bool { switch strings.ToLower(strings.TrimSpace(name)) { case "system", "information_schema": return true default: return false } } func shouldFallbackToGlobalSample(message string) bool { msg := strings.ToLower(message) return strings.Contains(msg, "_shard_num") || strings.Contains(msg, "unknown identifier") || strings.Contains(msg, "unknown column") } // strPtr returns a pointer to the given string. func strPtr(s string) *string { return &s } func decodeRows(data json.RawMessage) []map[string]interface{} { if len(data) == 0 { return nil } var rows []map[string]interface{} if err := json.Unmarshal(data, &rows); err == nil { return rows } // Fallback for positional arrays (rare on this path) var arrRows [][]interface{} if err := json.Unmarshal(data, &arrRows); err == nil { normalized := make([]map[string]interface{}, 0, len(arrRows)) for _, row := range arrRows { obj := map[string]interface{}{"value": row} normalized = append(normalized, obj) } return normalized } return nil } func extractExplainLines(data json.RawMessage) []string { rows := decodeRows(data) if len(rows) == 0 { return nil } lines := make([]string, 0, len(rows)) for _, row := range rows { if len(row) == 0 { continue } // Common column names in EXPLAIN output first. priority := []string{"explain", "plan", "explain_plan", "explain_ast", "value"} picked := "" for _, key := range priority { if v, ok := row[key]; ok { picked = fmt.Sprint(v) break } } if picked == "" { keys := make([]string, 0, len(row)) for k := range row { keys = append(keys, k) } sort.Strings(keys) picked = fmt.Sprint(row[keys[0]]) } picked = strings.TrimSpace(picked) if picked != "" { lines = append(lines, picked) } } return lines } func buildPlanTree(lines []string) []planNode { nodes := make([]planNode, 0, len(lines)) stack := make([]string, 0, 16) for i, line := range lines { level := planLineLevel(line) label := cleanPlanLabel(line) if label == "" { continue } if level < 0 { level = 0 } if level > len(stack) { level = len(stack) } id := fmt.Sprintf("n%d", i+1) var parentID *string if level > 0 && level-1 < len(stack) { parent := stack[level-1] parentID = &parent } if level == len(stack) { stack = append(stack, id) } else { stack[level] = id stack = stack[:level+1] } nodes = append(nodes, planNode{ ID: id, ParentID: parentID, Level: level, Label: label, }) } return nodes } func planLineLevel(line string) int { level := 0 runes := []rune(line) for i := 0; i < len(runes); { if i+1 < len(runes) && runes[i] == ' ' && runes[i+1] == ' ' { level++ i += 2 continue } if i+1 < len(runes) && runes[i] == '│' && runes[i+1] == ' ' { level++ i += 2 continue } if runes[i] == ' ' || runes[i] == '│' { i++ continue } break } return level } func cleanPlanLabel(line string) string { label := strings.TrimSpace(line) label = strings.TrimLeft(label, "│ ") label = strings.TrimPrefix(label, "└─") label = strings.TrimPrefix(label, "├─") label = strings.TrimPrefix(label, "─") return strings.TrimSpace(label) } // countRows attempts to determine the number of rows in a JSON data payload. // The data is expected to be a JSON array. func countRows(data json.RawMessage) int { if len(data) == 0 { return 0 } var arr []json.RawMessage if err := json.Unmarshal(data, &arr); err != nil { return 0 } return len(arr) } // extractNames extracts the first string value from each object in a JSON array. // Used for SHOW DATABASES and SHOW TABLES results where each row has a single column. func extractNames(data json.RawMessage) []string { if len(data) == 0 { return []string{} } var rows []map[string]interface{} if err := json.Unmarshal(data, &rows); err != nil { return []string{} } names := make([]string, 0, len(rows)) for _, row := range rows { // Take the first (and typically only) value from the row for _, v := range row { if s, ok := v.(string); ok { names = append(names, s) break } } } return names } // formatSQL performs basic SQL formatting: uppercases keywords and adds newlines // before major clauses. func formatSQL(sql string) string { // Uppercase SQL keywords keywords := []string{ "SELECT", "FROM", "WHERE", "AND", "OR", "ORDER BY", "GROUP BY", "HAVING", "LIMIT", "OFFSET", "JOIN", "LEFT JOIN", "RIGHT JOIN", "INNER JOIN", "OUTER JOIN", "FULL JOIN", "CROSS JOIN", "ON", "AS", "IN", "NOT", "NULL", "IS", "BETWEEN", "LIKE", "INSERT", "INTO", "VALUES", "UPDATE", "SET", "DELETE", "CREATE", "TABLE", "ALTER", "DROP", "INDEX", "DISTINCT", "UNION", "ALL", "EXISTS", "CASE", "WHEN", "THEN", "ELSE", "END", "ASC", "DESC", "WITH", "FORMAT", } result := sql // Replace keywords with uppercase versions (word-boundary aware) for _, kw := range keywords { pattern := `(?i)\b` + regexp.QuoteMeta(kw) + `\b` re := regexp.MustCompile(pattern) result = re.ReplaceAllString(result, kw) } // Add newlines before major clauses clauses := []string{ "SELECT", "FROM", "WHERE", "ORDER BY", "GROUP BY", "HAVING", "LIMIT", "LEFT JOIN", "RIGHT JOIN", "INNER JOIN", "OUTER JOIN", "FULL JOIN", "CROSS JOIN", "JOIN", "UNION", } for _, clause := range clauses { pattern := `(?m)\s+` + regexp.QuoteMeta(clause) + `\b` re := regexp.MustCompile(pattern) result = re.ReplaceAllString(result, "\n"+clause) } return strings.TrimSpace(result) } ================================================ FILE: internal/server/handlers/query_guardrails_test.go ================================================ package handlers import ( "bytes" "net/http" "net/http/httptest" "path/filepath" "testing" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/governance" "github.com/caioricciuti/ch-ui/internal/server/middleware" ) func TestQueryEndpointsBlockedByGuardrailPolicy(t *testing.T) { h, cleanup := newBlockedQueryHandler(t) defer cleanup() tests := []struct { name string path string body string invoke func(http.ResponseWriter, *http.Request) wantCTJSON bool }{ {name: "run", path: "/api/query/run", body: `{"query":"SELECT * FROM db.tbl"}`, invoke: h.ExecuteQuery, wantCTJSON: true}, {name: "stream", path: "/api/query/stream", body: `{"query":"SELECT * FROM db.tbl"}`, invoke: h.StreamQuery, wantCTJSON: true}, {name: "sample", path: "/api/query/sample", body: `{"query":"SELECT * FROM db.tbl"}`, invoke: h.SampleQuery, wantCTJSON: true}, {name: "explain", path: "/api/query/explain", body: `{"query":"SELECT * FROM db.tbl"}`, invoke: h.ExplainQuery, wantCTJSON: true}, {name: "plan", path: "/api/query/plan", body: `{"query":"SELECT * FROM db.tbl"}`, invoke: h.QueryPlan, wantCTJSON: true}, {name: "profile", path: "/api/query/profile", body: `{"query":"SELECT * FROM db.tbl"}`, invoke: h.QueryProfile, wantCTJSON: true}, {name: "explorer", path: "/api/query/explorer-data", body: `{"database":"db","table":"tbl"}`, invoke: h.ExplorerData, wantCTJSON: true}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { req := httptest.NewRequest(http.MethodPost, tc.path, bytes.NewBufferString(tc.body)) req.Header.Set("Content-Type", "application/json") req = req.WithContext(middleware.SetSession(req.Context(), &middleware.SessionInfo{ ConnectionID: "conn-1", ClickhouseUser: "alice", EncryptedPassword: "unused", })) rr := httptest.NewRecorder() tc.invoke(rr, req) if rr.Code != http.StatusForbidden { t.Fatalf("expected status 403, got %d body=%s", rr.Code, rr.Body.String()) } if tc.wantCTJSON && rr.Header().Get("Content-Type") != "application/json" { t.Fatalf("expected application/json content type, got %q", rr.Header().Get("Content-Type")) } if !bytes.Contains(rr.Body.Bytes(), []byte(`"code":"policy_blocked"`)) { t.Fatalf("expected policy_blocked code in response, got %s", rr.Body.String()) } }) } } func newBlockedQueryHandler(t *testing.T) (*QueryHandler, func()) { t.Helper() dbPath := filepath.Join(t.TempDir(), "query_guardrails.db") db, err := database.Open(dbPath) if err != nil { t.Fatalf("open db: %v", err) } store := governance.NewStore(db) service := governance.NewGuardrailService(store, db) if _, err := db.Conn().Exec( `INSERT INTO connections (id, name, tunnel_token, status) VALUES (?, ?, ?, ?)`, "conn-1", "Local", "token-1", "connected", ); err != nil { t.Fatalf("insert connection: %v", err) } if err := store.UpsertSyncState("conn-1", string(governance.SyncAccess), "idle", nil, nil, 0); err != nil { t.Fatalf("upsert access sync state: %v", err) } if _, err := store.CreatePolicy( "conn-1", "Block table", "", "table", "db", "tbl", "", "analyst", "warn", "block", "admin", ); err != nil { t.Fatalf("create policy: %v", err) } h := &QueryHandler{ DB: db, Guardrails: service, Config: nil, } cleanup := func() { _ = db.Close() } return h, cleanup } ================================================ FILE: internal/server/handlers/query_upload.go ================================================ package handlers import ( "bufio" "bytes" "encoding/csv" "encoding/json" "errors" "fmt" "io" "log/slog" "math" "net/http" "os" "path/filepath" "sort" "strconv" "strings" "time" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/xitongsys/parquet-go-source/local" "github.com/xitongsys/parquet-go/reader" ) const ( maxUploadBytes = 25 * 1024 * 1024 maxUploadPreviewRows = 20 ) type uploadDiscoveredColumn struct { Name string `json:"name"` Type string `json:"type"` Nullable bool `json:"nullable"` Sample string `json:"sample,omitempty"` } type parsedUploadDataset struct { Rows []map[string]interface{} ColumnOrder []string } type uploadInsertColumn struct { Name string Type string } // DiscoverUploadSchema handles POST /upload/discover. func (h *QueryHandler) DiscoverUploadSchema(w http.ResponseWriter, r *http.Request) { session := h.requireSchemaAdmin(w, r) if session == nil { return } filename, format, payload, err := readUploadFile(w, r) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } dataset, err := parseUploadDataset(format, payload) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if len(dataset.Rows) == 0 { writeError(w, http.StatusBadRequest, "Uploaded file has no rows") return } columns := inferUploadColumns(dataset) preview := dataset.Rows if len(preview) > maxUploadPreviewRows { preview = preview[:maxUploadPreviewRows] } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "filename": filename, "format": format, "rows": len(dataset.Rows), "columns": columns, "preview": preview, }) } // IngestUpload handles POST /upload/ingest. func (h *QueryHandler) IngestUpload(w http.ResponseWriter, r *http.Request) { session := h.requireSchemaAdmin(w, r) if session == nil { return } filename, format, payload, err := readUploadFile(w, r) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } dataset, err := parseUploadDataset(format, payload) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if len(dataset.Rows) == 0 { writeError(w, http.StatusBadRequest, "Uploaded file has no rows") return } dbName := strings.TrimSpace(r.FormValue("database")) tableName := strings.TrimSpace(r.FormValue("table")) if err := validateSimpleObjectName(dbName, "database"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if err := validateSimpleObjectName(tableName, "table"); err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if isSystemDatabaseName(dbName) { writeError(w, http.StatusBadRequest, "Cannot upload into system databases") return } createTable := parseMultipartBool(r.FormValue("create_table"), false) columns, err := parseUploadColumnsForm(r.FormValue("columns")) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if len(columns) == 0 { inferred := inferUploadColumns(dataset) columns = make([]uploadDiscoveredColumn, 0, len(inferred)) for _, col := range inferred { columns = append(columns, uploadDiscoveredColumn{ Name: col.Name, Type: col.Type, }) } } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeError(w, http.StatusInternalServerError, "Failed to decrypt credentials") return } createdTable := false createTableSQL := "" if createTable { createReq := createTableRequest{ Database: dbName, Name: tableName, Engine: strings.TrimSpace(r.FormValue("engine")), OnCluster: strings.TrimSpace(r.FormValue("on_cluster")), IfNotExists: boolPtr(parseMultipartBool(r.FormValue("if_not_exists"), true)), OrderBy: strings.TrimSpace(r.FormValue("order_by")), PartitionBy: strings.TrimSpace(r.FormValue("partition_by")), PrimaryKey: strings.TrimSpace(r.FormValue("primary_key")), SampleBy: strings.TrimSpace(r.FormValue("sample_by")), TTL: strings.TrimSpace(r.FormValue("ttl")), Settings: strings.TrimSpace(r.FormValue("settings")), Comment: strings.TrimSpace(r.FormValue("comment")), Columns: make([]createTableColumn, 0, len(columns)), } for _, col := range columns { createReq.Columns = append(createReq.Columns, createTableColumn{ Name: col.Name, Type: col.Type, }) } sql, err := buildCreateTableSQL(createReq) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } createTableSQL = sql if _, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 45*time.Second, ); err != nil { writeError(w, http.StatusBadGateway, fmt.Sprintf("%s\n\nCreate table command:\n%s", err.Error(), truncateUploadCommand(sql, 3000))) return } createdTable = true } insertColumns, err := h.resolveInsertColumns(session, password, dbName, tableName, columns, createTable) if err != nil { writeError(w, http.StatusBadRequest, err.Error()) return } if len(insertColumns) == 0 { writeError(w, http.StatusBadRequest, "No columns available for insert") return } rowsInserted, insertPreviewSQL, err := h.insertJSONEachRowBatches(session, password, dbName, tableName, insertColumns, dataset.Rows) if err != nil { message := err.Error() if insertPreviewSQL != "" { message += "\n\nInsert command preview:\n" + truncateUploadCommand(insertPreviewSQL, 3000) } writeError(w, http.StatusBadGateway, message) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schema.upload.ingest", Username: strPtr(session.ClickhouseUser), ConnectionID: strPtr(session.ConnectionID), Details: strPtr(fmt.Sprintf("file=%s format=%s target=%s.%s rows=%d created_table=%t", filename, format, dbName, tableName, rowsInserted, createdTable)), IPAddress: strPtr(r.RemoteAddr), }) writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "database": dbName, "table": tableName, "rows_inserted": rowsInserted, "created_table": createdTable, "commands": map[string]string{ "create_table": createTableSQL, "insert": insertPreviewSQL, }, }) } func readUploadFile(w http.ResponseWriter, r *http.Request) (filename, format string, payload []byte, err error) { r.Body = http.MaxBytesReader(w, r.Body, maxUploadBytes+1024*64) if err := r.ParseMultipartForm(maxUploadBytes); err != nil { return "", "", nil, fmt.Errorf("invalid upload form: %w", err) } if r.MultipartForm != nil { defer r.MultipartForm.RemoveAll() } file, header, err := r.FormFile("file") if err != nil { return "", "", nil, errors.New("file is required") } defer file.Close() filename = strings.TrimSpace(header.Filename) if filename == "" { filename = "upload" } format, err = detectUploadFormat(filename, r.FormValue("format")) if err != nil { return "", "", nil, err } reader := io.LimitReader(file, maxUploadBytes+1) payload, err = io.ReadAll(reader) if err != nil { return "", "", nil, fmt.Errorf("failed to read uploaded file: %w", err) } if len(payload) == 0 { return "", "", nil, errors.New("uploaded file is empty") } if len(payload) > maxUploadBytes { return "", "", nil, fmt.Errorf("file exceeds %d MB limit", maxUploadBytes/(1024*1024)) } return filename, format, payload, nil } func detectUploadFormat(filename string, explicit string) (string, error) { if explicit != "" { switch strings.ToLower(strings.TrimSpace(explicit)) { case "csv", "parquet", "json", "jsonl": return strings.ToLower(strings.TrimSpace(explicit)), nil default: return "", errors.New("unsupported format: use csv, parquet, json, or jsonl") } } switch strings.ToLower(filepath.Ext(filename)) { case ".csv": return "csv", nil case ".parquet": return "parquet", nil case ".json": return "json", nil case ".jsonl": return "jsonl", nil default: return "", errors.New("unsupported file type: only csv, parquet, json, and jsonl are allowed") } } func parseUploadDataset(format string, payload []byte) (parsedUploadDataset, error) { switch format { case "csv": return parseCSVDataset(payload) case "json": return parseJSONDataset(payload) case "jsonl": return parseJSONLinesDataset(payload) case "parquet": return parseParquetDataset(payload) default: return parsedUploadDataset{}, errors.New("unsupported file type: only csv, parquet, json, and jsonl are allowed") } } func parseCSVDataset(payload []byte) (parsedUploadDataset, error) { reader := csv.NewReader(bytes.NewReader(payload)) reader.FieldsPerRecord = -1 reader.ReuseRecord = false header, err := reader.Read() if err != nil { return parsedUploadDataset{}, errors.New("invalid csv file: missing header row") } headers := normalizeCSVHeaders(header) if len(headers) == 0 { return parsedUploadDataset{}, errors.New("invalid csv file: no columns found") } rows := make([]map[string]interface{}, 0, 512) for { record, err := reader.Read() if err == io.EOF { break } if err != nil { return parsedUploadDataset{}, fmt.Errorf("invalid csv row: %w", err) } row := make(map[string]interface{}, len(headers)) for i, col := range headers { if i >= len(record) { row[col] = nil continue } value := strings.TrimSpace(record[i]) if value == "" { row[col] = nil } else { row[col] = value } } rows = append(rows, row) } return parsedUploadDataset{ Rows: rows, ColumnOrder: headers, }, nil } func parseJSONDataset(payload []byte) (parsedUploadDataset, error) { var raw interface{} if err := json.Unmarshal(payload, &raw); err != nil { return parsedUploadDataset{}, fmt.Errorf("invalid json file: %w", err) } rows := make([]map[string]interface{}, 0, 512) switch value := raw.(type) { case []interface{}: for _, item := range value { rows = append(rows, normalizeRowFromAny(item)) } case map[string]interface{}: if dataField, ok := value["data"]; ok { if arr, ok := dataField.([]interface{}); ok { for _, item := range arr { rows = append(rows, normalizeRowFromAny(item)) } } else { rows = append(rows, normalizeRowFromAny(value)) } } else { rows = append(rows, normalizeRowFromAny(value)) } default: rows = append(rows, normalizeRowFromAny(value)) } return parsedUploadDataset{ Rows: rows, }, nil } func parseJSONLinesDataset(payload []byte) (parsedUploadDataset, error) { scanner := bufio.NewScanner(bytes.NewReader(payload)) scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024) rows := make([]map[string]interface{}, 0, 512) lineNo := 0 for scanner.Scan() { lineNo++ line := strings.TrimSpace(scanner.Text()) if line == "" { continue } var raw interface{} if err := json.Unmarshal([]byte(line), &raw); err != nil { return parsedUploadDataset{}, fmt.Errorf("invalid jsonl at line %d: %w", lineNo, err) } rows = append(rows, normalizeRowFromAny(raw)) } if err := scanner.Err(); err != nil { return parsedUploadDataset{}, fmt.Errorf("failed to read jsonl: %w", err) } return parsedUploadDataset{ Rows: rows, }, nil } func parseParquetDataset(payload []byte) (parsedUploadDataset, error) { tmp, err := os.CreateTemp("", "ch-ui-upload-*.parquet") if err != nil { return parsedUploadDataset{}, fmt.Errorf("failed to create temp file for parquet: %w", err) } tmpPath := tmp.Name() defer os.Remove(tmpPath) // Ensure restrictive permissions regardless of umask if err := os.Chmod(tmpPath, 0600); err != nil { tmp.Close() return parsedUploadDataset{}, fmt.Errorf("failed to set temp file permissions: %w", err) } if _, err := tmp.Write(payload); err != nil { tmp.Close() return parsedUploadDataset{}, fmt.Errorf("failed to write parquet temp file: %w", err) } if err := tmp.Close(); err != nil { return parsedUploadDataset{}, fmt.Errorf("failed to finalize parquet temp file: %w", err) } fr, err := local.NewLocalFileReader(tmpPath) if err != nil { return parsedUploadDataset{}, fmt.Errorf("failed to open parquet file: %w", err) } defer fr.Close() pr, err := reader.NewParquetReader(fr, new(interface{}), 1) if err != nil { return parsedUploadDataset{}, fmt.Errorf("failed to read parquet schema: %w", err) } defer pr.ReadStop() totalRows := int(pr.GetNumRows()) if totalRows == 0 { return parsedUploadDataset{Rows: []map[string]interface{}{}}, nil } rows := make([]map[string]interface{}, 0, totalRows) batchSize := 512 for readCount := 0; readCount < totalRows; { toRead := batchSize if totalRows-readCount < toRead { toRead = totalRows - readCount } batch := make([]interface{}, toRead) if err := pr.Read(&batch); err != nil { return parsedUploadDataset{}, fmt.Errorf("failed to read parquet rows: %w", err) } for _, item := range batch { rows = append(rows, normalizeRowFromAny(item)) } readCount += len(batch) } return parsedUploadDataset{ Rows: rows, }, nil } func normalizeCSVHeaders(header []string) []string { normalized := make([]string, 0, len(header)) used := map[string]int{} for idx, raw := range header { name := strings.TrimSpace(raw) if idx == 0 { name = strings.TrimPrefix(name, "\uFEFF") } if name == "" { name = fmt.Sprintf("column_%d", idx+1) } base := name counter := used[base] if counter > 0 { name = fmt.Sprintf("%s_%d", base, counter+1) } used[base] = counter + 1 normalized = append(normalized, name) } return normalized } func normalizeRowFromAny(raw interface{}) map[string]interface{} { switch value := raw.(type) { case map[string]interface{}: row := make(map[string]interface{}, len(value)) for k, v := range value { row[strings.TrimSpace(k)] = normalizeUploadValue(v) } return row case map[interface{}]interface{}: row := make(map[string]interface{}, len(value)) for k, v := range value { row[fmt.Sprint(k)] = normalizeUploadValue(v) } return row default: // Fallback for struct or scalar payloads. asMap := map[string]interface{}{} rawJSON, err := json.Marshal(raw) if err == nil { if json.Unmarshal(rawJSON, &asMap) == nil && len(asMap) > 0 { row := make(map[string]interface{}, len(asMap)) for k, v := range asMap { row[strings.TrimSpace(k)] = normalizeUploadValue(v) } return row } } return map[string]interface{}{"value": normalizeUploadValue(raw)} } } func normalizeUploadValue(value interface{}) interface{} { switch v := value.(type) { case nil: return nil case bool: return v case string: trimmed := strings.TrimSpace(v) if trimmed == "" { return nil } return trimmed case json.Number: if i, err := v.Int64(); err == nil { return i } if f, err := v.Float64(); err == nil { return f } return v.String() case float32: return float64(v) case float64: if math.IsNaN(v) || math.IsInf(v, 0) { return nil } return v case int: return int64(v) case int8: return int64(v) case int16: return int64(v) case int32: return int64(v) case int64: return v case uint: return int64(v) case uint8: return int64(v) case uint16: return int64(v) case uint32: return int64(v) case uint64: if v > math.MaxInt64 { return fmt.Sprint(v) } return int64(v) case time.Time: return v.UTC().Format(time.RFC3339Nano) case []interface{}, map[string]interface{}: raw, err := json.Marshal(v) if err != nil { return fmt.Sprint(v) } return string(raw) default: raw, err := json.Marshal(v) if err != nil { return fmt.Sprint(v) } if len(raw) > 0 && raw[0] == '{' { return string(raw) } var scalar interface{} if json.Unmarshal(raw, &scalar) == nil { return normalizeUploadValue(scalar) } return string(raw) } } func inferUploadColumns(dataset parsedUploadDataset) []uploadDiscoveredColumn { order := make([]string, 0, len(dataset.ColumnOrder)) seen := map[string]struct{}{} for _, name := range dataset.ColumnOrder { trimmed := strings.TrimSpace(name) if trimmed == "" { continue } if _, exists := seen[trimmed]; exists { continue } seen[trimmed] = struct{}{} order = append(order, trimmed) } extras := make([]string, 0) for _, row := range dataset.Rows { for key := range row { trimmed := strings.TrimSpace(key) if trimmed == "" { continue } if _, exists := seen[trimmed]; exists { continue } seen[trimmed] = struct{}{} extras = append(extras, trimmed) } } sort.Strings(extras) order = append(order, extras...) columns := make([]uploadDiscoveredColumn, 0, len(order)) for _, name := range order { values := make([]interface{}, 0, len(dataset.Rows)) sample := "" for _, row := range dataset.Rows { v, ok := row[name] if !ok { values = append(values, nil) continue } values = append(values, v) if sample == "" && v != nil { sample = fmt.Sprint(v) } } baseType, nullable := inferUploadColumnType(values) columnType := baseType if nullable { columnType = fmt.Sprintf("Nullable(%s)", baseType) } columns = append(columns, uploadDiscoveredColumn{ Name: name, Type: columnType, Nullable: nullable, Sample: sample, }) } return columns } func inferUploadColumnType(values []interface{}) (baseType string, nullable bool) { allBool := true allInt := true allFloat := true allDate := true allDateTime := true hasValue := false for _, raw := range values { if raw == nil { nullable = true continue } switch v := raw.(type) { case bool: hasValue = true allInt = false allFloat = false allDate = false allDateTime = false case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: hasValue = true allBool = false allDate = false allDateTime = false case float32: hasValue = true allBool = false allDate = false allDateTime = false if math.Trunc(float64(v)) != float64(v) { allInt = false } case float64: hasValue = true allBool = false allDate = false allDateTime = false if math.Trunc(v) != v { allInt = false } case string: trimmed := strings.TrimSpace(v) if trimmed == "" { nullable = true continue } hasValue = true if _, err := strconv.ParseBool(trimmed); err != nil && trimmed != "0" && trimmed != "1" { allBool = false } if _, err := strconv.ParseInt(trimmed, 10, 64); err != nil { allInt = false } if _, err := strconv.ParseFloat(trimmed, 64); err != nil { allFloat = false } if _, err := time.Parse("2006-01-02", trimmed); err != nil { allDate = false } if !isDateTimeString(trimmed) { allDateTime = false } default: hasValue = true allBool = false allInt = false allFloat = false allDate = false allDateTime = false } } if !hasValue { return "String", true } switch { case allBool: return "Bool", nullable case allInt: return "Int64", nullable case allFloat: return "Float64", nullable case allDateTime: return "DateTime", nullable case allDate: return "Date", nullable default: return "String", nullable } } func isDateTimeString(value string) bool { layouts := []string{ time.RFC3339, time.RFC3339Nano, "2006-01-02 15:04:05", "2006-01-02 15:04:05.000", "2006-01-02T15:04:05", "2006-01-02T15:04:05.000", } for _, layout := range layouts { if _, err := time.Parse(layout, value); err == nil { return true } } return false } func parseUploadColumnsForm(raw string) ([]uploadDiscoveredColumn, error) { trimmed := strings.TrimSpace(raw) if trimmed == "" { return nil, nil } var cols []uploadDiscoveredColumn if err := json.Unmarshal([]byte(trimmed), &cols); err != nil { return nil, errors.New("invalid columns payload") } result := make([]uploadDiscoveredColumn, 0, len(cols)) for idx, col := range cols { name := strings.TrimSpace(col.Name) colType := strings.TrimSpace(col.Type) if err := validateSimpleObjectName(name, fmt.Sprintf("column #%d", idx+1)); err != nil { return nil, err } if colType == "" || isUnsafeSQLFragment(colType) { return nil, fmt.Errorf("invalid type for column %q", name) } result = append(result, uploadDiscoveredColumn{ Name: name, Type: colType, }) } return result, nil } func parseMultipartBool(raw string, defaultValue bool) bool { trimmed := strings.TrimSpace(strings.ToLower(raw)) if trimmed == "" { return defaultValue } switch trimmed { case "1", "true", "yes", "on": return true case "0", "false", "no", "off": return false default: return defaultValue } } func boolPtr(value bool) *bool { return &value } func buildCreateTableSQL(req createTableRequest) (string, error) { dbName := strings.TrimSpace(req.Database) tableName := strings.TrimSpace(req.Name) if err := validateSimpleObjectName(dbName, "database"); err != nil { return "", err } if err := validateSimpleObjectName(tableName, "table"); err != nil { return "", err } if isSystemDatabaseName(dbName) { return "", errors.New("cannot create tables in system databases") } if len(req.Columns) == 0 { return "", errors.New("at least one column is required") } engine := strings.TrimSpace(req.Engine) if engine == "" { engine = "MergeTree" } if isUnsafeSQLFragment(engine) { return "", errors.New("invalid engine expression") } cluster := strings.TrimSpace(req.OnCluster) if cluster != "" && isUnsafeSQLFragment(cluster) { return "", errors.New("invalid cluster name") } columnsSQL := make([]string, 0, len(req.Columns)) for i, col := range req.Columns { colName := strings.TrimSpace(col.Name) colType := strings.TrimSpace(col.Type) if err := validateSimpleObjectName(colName, fmt.Sprintf("column #%d", i+1)); err != nil { return "", err } if colType == "" || isUnsafeSQLFragment(colType) { return "", fmt.Errorf("invalid type for column %q", colName) } part := escapeIdentifier(colName) + " " + colType if def := strings.TrimSpace(col.DefaultExpression); def != "" { if isUnsafeSQLFragment(def) { return "", fmt.Errorf("invalid default expression for column %q", colName) } part += " DEFAULT " + def } if comment := strings.TrimSpace(col.Comment); comment != "" { part += " COMMENT '" + escapeLiteral(comment) + "'" } columnsSQL = append(columnsSQL, part) } orderBy := strings.TrimSpace(req.OrderBy) partitionBy := strings.TrimSpace(req.PartitionBy) primaryKey := strings.TrimSpace(req.PrimaryKey) sampleBy := strings.TrimSpace(req.SampleBy) ttl := strings.TrimSpace(req.TTL) settings := strings.TrimSpace(req.Settings) comment := strings.TrimSpace(req.Comment) expressions := []struct { name string value string }{ {name: "order_by", value: orderBy}, {name: "partition_by", value: partitionBy}, {name: "primary_key", value: primaryKey}, {name: "sample_by", value: sampleBy}, {name: "ttl", value: ttl}, {name: "settings", value: settings}, } for _, expr := range expressions { if expr.value != "" && isUnsafeSQLFragment(expr.value) { return "", fmt.Errorf("invalid %s expression", expr.name) } } if strings.Contains(strings.ToLower(engine), "mergetree") && orderBy == "" { orderBy = "tuple()" } ifNotExists := req.IfNotExists == nil || *req.IfNotExists var sqlBuilder strings.Builder sqlBuilder.WriteString("CREATE TABLE ") if ifNotExists { sqlBuilder.WriteString("IF NOT EXISTS ") } sqlBuilder.WriteString(escapeIdentifier(dbName)) sqlBuilder.WriteString(".") sqlBuilder.WriteString(escapeIdentifier(tableName)) if cluster != "" { sqlBuilder.WriteString(" ON CLUSTER ") sqlBuilder.WriteString(escapeIdentifier(cluster)) } sqlBuilder.WriteString(" (\n ") sqlBuilder.WriteString(strings.Join(columnsSQL, ",\n ")) sqlBuilder.WriteString("\n)") sqlBuilder.WriteString("\nENGINE = ") sqlBuilder.WriteString(engine) if partitionBy != "" { sqlBuilder.WriteString("\nPARTITION BY ") sqlBuilder.WriteString(partitionBy) } if orderBy != "" { sqlBuilder.WriteString("\nORDER BY ") sqlBuilder.WriteString(orderBy) } if primaryKey != "" { sqlBuilder.WriteString("\nPRIMARY KEY ") sqlBuilder.WriteString(primaryKey) } if sampleBy != "" { sqlBuilder.WriteString("\nSAMPLE BY ") sqlBuilder.WriteString(sampleBy) } if ttl != "" { sqlBuilder.WriteString("\nTTL ") sqlBuilder.WriteString(ttl) } if settings != "" { sqlBuilder.WriteString("\nSETTINGS ") sqlBuilder.WriteString(settings) } if comment != "" { sqlBuilder.WriteString("\nCOMMENT '") sqlBuilder.WriteString(escapeLiteral(comment)) sqlBuilder.WriteString("'") } return sqlBuilder.String(), nil } func (h *QueryHandler) resolveInsertColumns( session *middleware.SessionInfo, password string, databaseName string, tableName string, discovered []uploadDiscoveredColumn, createTable bool, ) ([]uploadInsertColumn, error) { if createTable { cols := make([]uploadInsertColumn, 0, len(discovered)) for _, col := range discovered { name := strings.TrimSpace(col.Name) colType := strings.TrimSpace(col.Type) if name == "" { continue } if colType == "" { colType = "String" } cols = append(cols, uploadInsertColumn{ Name: name, Type: colType, }) } return cols, nil } result, err := h.Gateway.ExecuteQuery( session.ConnectionID, fmt.Sprintf("DESCRIBE TABLE %s.%s", escapeIdentifier(databaseName), escapeIdentifier(tableName)), session.ClickhouseUser, password, 20*time.Second, ) if err != nil { return nil, fmt.Errorf("failed to read table schema: %w", err) } rows := decodeRows(result.Data) tableCols := make([]uploadInsertColumn, 0, len(rows)) for _, row := range rows { rawName, ok := row["name"] if !ok { continue } colName := strings.TrimSpace(fmt.Sprint(rawName)) if colName == "" { continue } colType := strings.TrimSpace(fmt.Sprint(row["type"])) if colType == "" { colType = "String" } tableCols = append(tableCols, uploadInsertColumn{ Name: colName, Type: colType, }) } if len(tableCols) == 0 { return nil, errors.New("target table has no columns") } byLower := make(map[string]uploadInsertColumn, len(tableCols)) for _, col := range tableCols { byLower[strings.ToLower(col.Name)] = col } selected := make([]uploadInsertColumn, 0, len(discovered)) used := map[string]struct{}{} for _, col := range discovered { name := strings.TrimSpace(col.Name) if name == "" { continue } var matched uploadInsertColumn for _, tableCol := range tableCols { if tableCol.Name == name { matched = tableCol break } } if matched.Name == "" { if value, ok := byLower[strings.ToLower(name)]; ok { matched = value } } if matched.Name == "" { continue } if _, exists := used[matched.Name]; exists { continue } used[matched.Name] = struct{}{} selected = append(selected, matched) } if len(selected) == 0 { return nil, errors.New("no matching columns between uploaded file and target table") } return selected, nil } func (h *QueryHandler) insertJSONEachRowBatches( session *middleware.SessionInfo, password string, databaseName string, tableName string, targetColumns []uploadInsertColumn, rows []map[string]interface{}, ) (int, string, error) { if len(rows) == 0 { return 0, "", nil } const batchSize = 500 inserted := 0 insertPreview := "" for start := 0; start < len(rows); start += batchSize { end := start + batchSize if end > len(rows) { end = len(rows) } query, rowCount, err := buildJSONEachRowInsertQuery(databaseName, tableName, targetColumns, rows[start:end], start) if err != nil { return inserted, insertPreview, err } if rowCount == 0 { continue } if insertPreview == "" { insertPreview = query } if _, err := h.Gateway.ExecuteQuery( session.ConnectionID, query, session.ClickhouseUser, password, 90*time.Second, ); err != nil { return inserted, insertPreview, fmt.Errorf("insert failed after %d rows: %s", inserted, humanizeUploadInsertError(err.Error())) } inserted += rowCount } return inserted, insertPreview, nil } func buildJSONEachRowInsertQuery( databaseName string, tableName string, targetColumns []uploadInsertColumn, sourceRows []map[string]interface{}, baseRowOffset int, ) (string, int, error) { if len(targetColumns) == 0 { return "", 0, errors.New("insert requires at least one column") } seen := map[string]struct{}{} columns := make([]uploadInsertColumn, 0, len(targetColumns)) for _, col := range targetColumns { name := strings.TrimSpace(col.Name) if name == "" { continue } if _, exists := seen[name]; exists { continue } seen[name] = struct{}{} typ := strings.TrimSpace(col.Type) if typ == "" { typ = "String" } columns = append(columns, uploadInsertColumn{Name: name, Type: typ}) } if len(columns) == 0 { return "", 0, errors.New("insert requires at least one valid column") } var builder strings.Builder builder.WriteString("INSERT INTO ") builder.WriteString(escapeIdentifier(databaseName)) builder.WriteString(".") builder.WriteString(escapeIdentifier(tableName)) builder.WriteString(" (") for idx, col := range columns { if idx > 0 { builder.WriteString(", ") } builder.WriteString(escapeIdentifier(col.Name)) } builder.WriteString(") FORMAT JSONEachRow\n") inserted := 0 for rowIdx, source := range sourceRows { row := make(map[string]interface{}, len(columns)) hasData := false for _, col := range columns { val, ok := source[col.Name] if !ok { row[col.Name] = nil continue } coerced, err := coerceUploadValueForType(val, col.Type) if err != nil { return "", inserted, fmt.Errorf("row %d column %q: %w", baseRowOffset+rowIdx+1, col.Name, err) } row[col.Name] = coerced if coerced != nil { hasData = true } } if !hasData { continue } line, err := json.Marshal(row) if err != nil { return "", inserted, fmt.Errorf("failed to encode row for insert: %w", err) } builder.Write(line) builder.WriteByte('\n') inserted++ } return builder.String(), inserted, nil } func humanizeUploadInsertError(message string) string { msg := strings.TrimSpace(message) lower := strings.ToLower(msg) if strings.Contains(lower, "cannot parse input") { return msg + " Hint: adjust discovered column types or normalize date/time formats before upload." } return msg } func truncateUploadCommand(sql string, limit int) string { if limit <= 0 || len(sql) <= limit { return sql } return sql[:limit] + "\n... (truncated)" } func coerceUploadValueForType(value interface{}, typeExpr string) (interface{}, error) { if value == nil { return nil, nil } baseType := normalizeClickHouseType(typeExpr) if baseType == "" { baseType = "STRING" } switch { case strings.Contains(baseType, "BOOL"): parsed, err := parseBoolUploadValue(value) if err != nil { return nil, err } return parsed, nil case strings.Contains(baseType, "INT"): parsed, err := parseIntUploadValue(value) if err != nil { return nil, err } return parsed, nil case strings.Contains(baseType, "FLOAT") || strings.Contains(baseType, "DECIMAL"): parsed, err := parseFloatUploadValue(value) if err != nil { return nil, err } return parsed, nil case strings.Contains(baseType, "DATE") && strings.Contains(baseType, "TIME"): parsed, err := parseDateTimeUploadValue(value) if err != nil { return nil, err } return parsed, nil case strings.HasPrefix(baseType, "DATE"): parsed, err := parseDateUploadValue(value) if err != nil { return nil, err } return parsed, nil default: return normalizeUploadValue(value), nil } } func normalizeClickHouseType(typeExpr string) string { trimmed := strings.TrimSpace(typeExpr) if trimmed == "" { return "" } upper := strings.ToUpper(trimmed) changed := true for changed { changed = false for _, wrapper := range []string{"NULLABLE(", "LOWCARDINALITY("} { if strings.HasPrefix(upper, wrapper) && strings.HasSuffix(upper, ")") { upper = strings.TrimSuffix(strings.TrimPrefix(upper, wrapper), ")") changed = true } } } return strings.TrimSpace(upper) } func parseBoolUploadValue(value interface{}) (bool, error) { switch v := value.(type) { case bool: return v, nil case string: trimmed := strings.TrimSpace(strings.ToLower(v)) switch trimmed { case "1", "true", "yes", "on": return true, nil case "0", "false", "no", "off": return false, nil default: return false, fmt.Errorf("cannot parse %q as Bool", v) } case int64: return v != 0, nil case float64: return v != 0, nil default: normalized := normalizeUploadValue(value) if s, ok := normalized.(string); ok { return parseBoolUploadValue(s) } return false, fmt.Errorf("cannot parse %T as Bool", value) } } func parseIntUploadValue(value interface{}) (int64, error) { switch v := value.(type) { case int64: return v, nil case int: return int64(v), nil case float64: if math.Trunc(v) != v { return 0, fmt.Errorf("cannot parse non-integer %v as Int64", v) } return int64(v), nil case string: trimmed := strings.TrimSpace(v) if trimmed == "" { return 0, errors.New("empty value") } i, err := strconv.ParseInt(trimmed, 10, 64) if err != nil { return 0, fmt.Errorf("cannot parse %q as Int64", v) } return i, nil default: normalized := normalizeUploadValue(value) switch nv := normalized.(type) { case int64: return nv, nil case float64: if math.Trunc(nv) != nv { return 0, fmt.Errorf("cannot parse non-integer %v as Int64", nv) } return int64(nv), nil case string: return parseIntUploadValue(nv) default: return 0, fmt.Errorf("cannot parse %T as Int64", value) } } } func parseFloatUploadValue(value interface{}) (float64, error) { switch v := value.(type) { case float64: return v, nil case float32: return float64(v), nil case int64: return float64(v), nil case int: return float64(v), nil case string: trimmed := strings.TrimSpace(v) if trimmed == "" { return 0, errors.New("empty value") } f, err := strconv.ParseFloat(trimmed, 64) if err != nil { return 0, fmt.Errorf("cannot parse %q as Float64", v) } return f, nil default: normalized := normalizeUploadValue(value) switch nv := normalized.(type) { case float64: return nv, nil case int64: return float64(nv), nil case string: return parseFloatUploadValue(nv) default: return 0, fmt.Errorf("cannot parse %T as Float64", value) } } } func parseDateUploadValue(value interface{}) (string, error) { t, err := parseFlexibleTime(value) if err != nil { return "", err } return t.Format("2006-01-02"), nil } func parseDateTimeUploadValue(value interface{}) (string, error) { t, err := parseFlexibleTime(value) if err != nil { return "", err } return t.Format("2006-01-02 15:04:05"), nil } func parseFlexibleTime(value interface{}) (time.Time, error) { switch v := value.(type) { case time.Time: return v.UTC(), nil case string: trimmed := strings.TrimSpace(v) if trimmed == "" { return time.Time{}, errors.New("empty time value") } layouts := []string{ time.RFC3339, time.RFC3339Nano, "2006-01-02 15:04:05", "2006-01-02 15:04:05.000", "2006-01-02T15:04:05", "2006-01-02T15:04:05.000", "2006-01-02", } for _, layout := range layouts { if t, err := time.Parse(layout, trimmed); err == nil { return t.UTC(), nil } } return time.Time{}, fmt.Errorf("cannot parse %q as Date/DateTime", v) default: normalized := normalizeUploadValue(value) if s, ok := normalized.(string); ok { return parseFlexibleTime(s) } return time.Time{}, fmt.Errorf("cannot parse %T as Date/DateTime", value) } } ================================================ FILE: internal/server/handlers/saved_queries.go ================================================ package handlers import ( "encoding/json" "log/slog" "net/http" "strings" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/server/middleware" ) // SavedQueriesHandler handles saved query CRUD operations. type SavedQueriesHandler struct { DB *database.DB } // Routes registers saved query routes on the given router. func (h *SavedQueriesHandler) Routes(r chi.Router) { r.Get("/", h.List) r.Get("/{id}", h.Get) r.Post("/", h.Create) r.Put("/{id}", h.Update) r.Delete("/{id}", h.Delete) r.Post("/{id}/duplicate", h.Duplicate) } // List returns all saved queries. func (h *SavedQueriesHandler) List(w http.ResponseWriter, r *http.Request) { queries, err := h.DB.GetSavedQueries() if err != nil { slog.Error("Failed to list saved queries", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch saved queries"}) return } if queries == nil { queries = []database.SavedQuery{} } writeJSON(w, http.StatusOK, map[string]interface{}{"saved_queries": queries}) } // Get returns a single saved query by ID. func (h *SavedQueriesHandler) Get(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query ID is required"}) return } query, err := h.DB.GetSavedQueryByID(id) if err != nil { slog.Error("Failed to get saved query", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch saved query"}) return } if query == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Saved query not found"}) return } writeJSON(w, http.StatusOK, query) } // Create creates a new saved query. func (h *SavedQueriesHandler) Create(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` Description string `json:"description"` Query string `json:"query"` ConnectionID string `json:"connection_id"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) sqlQuery := strings.TrimSpace(body.Query) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } if sqlQuery == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query is required"}) return } connectionID := strings.TrimSpace(body.ConnectionID) if connectionID == "" { connectionID = session.ConnectionID } id, err := h.DB.CreateSavedQuery(database.CreateSavedQueryParams{ Name: name, Description: strings.TrimSpace(body.Description), Query: sqlQuery, ConnectionID: connectionID, CreatedBy: session.ClickhouseUser, }) if err != nil { slog.Error("Failed to create saved query", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create saved query"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "saved_query.created", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) created, err := h.DB.GetSavedQueryByID(id) if err != nil || created == nil { writeJSON(w, http.StatusCreated, map[string]string{"id": id}) return } writeJSON(w, http.StatusCreated, created) } // Update updates an existing saved query. func (h *SavedQueriesHandler) Update(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query ID is required"}) return } existing, err := h.DB.GetSavedQueryByID(id) if err != nil { slog.Error("Failed to get saved query for update", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch saved query"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Saved query not found"}) return } var body struct { Name *string `json:"name"` Description *string `json:"description"` Query *string `json:"query"` ConnectionID *string `json:"connection_id"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := existing.Name description := "" if existing.Description != nil { description = *existing.Description } query := existing.Query connectionID := "" if existing.ConnectionID != nil { connectionID = *existing.ConnectionID } changed := false if body.Name != nil { n := strings.TrimSpace(*body.Name) if n == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name cannot be empty"}) return } name = n changed = true } if body.Description != nil { description = strings.TrimSpace(*body.Description) changed = true } if body.Query != nil { q := strings.TrimSpace(*body.Query) if q == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query cannot be empty"}) return } query = q changed = true } if body.ConnectionID != nil { connectionID = strings.TrimSpace(*body.ConnectionID) changed = true } if !changed { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "No valid fields to update"}) return } if err := h.DB.UpdateSavedQuery(id, name, description, query, connectionID); err != nil { slog.Error("Failed to update saved query", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update saved query"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "saved_query.updated", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) updated, err := h.DB.GetSavedQueryByID(id) if err != nil || updated == nil { writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) return } writeJSON(w, http.StatusOK, updated) } // Delete removes a saved query. func (h *SavedQueriesHandler) Delete(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query ID is required"}) return } existing, err := h.DB.GetSavedQueryByID(id) if err != nil { slog.Error("Failed to get saved query for delete", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch saved query"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Saved query not found"}) return } if err := h.DB.DeleteSavedQuery(id); err != nil { slog.Error("Failed to delete saved query", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete saved query"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "saved_query.deleted", Username: strPtr(session.ClickhouseUser), Details: strPtr(existing.Name), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // Duplicate creates a copy of an existing saved query. func (h *SavedQueriesHandler) Duplicate(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Query ID is required"}) return } original, err := h.DB.GetSavedQueryByID(id) if err != nil { slog.Error("Failed to get saved query for duplicate", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch saved query"}) return } if original == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Saved query not found"}) return } newName := strings.TrimSpace(original.Name + " (copy)") description := "" if original.Description != nil { description = *original.Description } connectionID := "" if original.ConnectionID != nil { connectionID = *original.ConnectionID } newID, err := h.DB.CreateSavedQuery(database.CreateSavedQueryParams{ Name: newName, Description: description, Query: original.Query, ConnectionID: connectionID, CreatedBy: session.ClickhouseUser, }) if err != nil { slog.Error("Failed to duplicate saved query", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to duplicate saved query"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "saved_query.duplicated", Username: strPtr(session.ClickhouseUser), Details: strPtr("Duplicated from " + id), }) duplicated, err := h.DB.GetSavedQueryByID(newID) if err != nil || duplicated == nil { writeJSON(w, http.StatusCreated, map[string]string{"id": newID}) return } writeJSON(w, http.StatusCreated, duplicated) } ================================================ FILE: internal/server/handlers/schedules.go ================================================ package handlers import ( "encoding/json" "fmt" "log/slog" "net/http" "strconv" "strings" "time" "github.com/go-chi/chi/v5" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/scheduler" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // SchedulesHandler handles scheduled job CRUD and execution. type SchedulesHandler struct { DB *database.DB Gateway *tunnel.Gateway Config *config.Config } // Routes registers schedule routes on the given router. func (h *SchedulesHandler) Routes(r chi.Router) { r.Get("/", h.List) r.Get("/{id}", h.Get) r.Post("/", h.Create) r.Put("/{id}", h.Update) r.Delete("/{id}", h.Delete) r.Get("/{id}/runs", h.ListRuns) r.Post("/{id}/run", h.ManualRun) } // List returns all scheduled jobs. func (h *SchedulesHandler) List(w http.ResponseWriter, r *http.Request) { schedules, err := h.DB.GetSchedules() if err != nil { slog.Error("Failed to list schedules", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch schedules"}) return } if schedules == nil { schedules = []database.Schedule{} } writeJSON(w, http.StatusOK, map[string]interface{}{"schedules": schedules}) } // Get returns a single scheduled job by ID. func (h *SchedulesHandler) Get(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Schedule ID is required"}) return } schedule, err := h.DB.GetScheduleByID(id) if err != nil { slog.Error("Failed to get schedule", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch schedule"}) return } if schedule == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Schedule not found"}) return } writeJSON(w, http.StatusOK, schedule) } // Create creates a new scheduled job. func (h *SchedulesHandler) Create(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } var body struct { Name string `json:"name"` Cron string `json:"cron"` SavedQueryID string `json:"saved_query_id"` ConnectionID string `json:"connection_id"` Timezone string `json:"timezone"` TimeoutMs *int `json:"timeout_ms"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := strings.TrimSpace(body.Name) cronExpr := strings.TrimSpace(body.Cron) savedQueryID := strings.TrimSpace(body.SavedQueryID) if name == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } if cronExpr == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Cron expression is required"}) return } if !scheduler.ValidateCron(cronExpr) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid cron expression"}) return } if savedQueryID == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Saved query ID is required"}) return } // Verify saved query exists savedQuery, err := h.DB.GetSavedQueryByID(savedQueryID) if err != nil { slog.Error("Failed to verify saved query", "error", err, "saved_query_id", savedQueryID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to verify saved query"}) return } if savedQuery == nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Saved query not found"}) return } timezone := strings.TrimSpace(body.Timezone) if timezone == "" { timezone = "UTC" } timeoutMs := 60000 if body.TimeoutMs != nil && *body.TimeoutMs > 0 { timeoutMs = *body.TimeoutMs } connectionID := strings.TrimSpace(body.ConnectionID) if connectionID == "" { if savedQuery.ConnectionID != nil { connectionID = *savedQuery.ConnectionID } if connectionID == "" { connectionID = session.ConnectionID } } id, err := h.DB.CreateSchedule(name, savedQueryID, connectionID, cronExpr, timezone, session.ClickhouseUser, timeoutMs) if err != nil { slog.Error("Failed to create schedule", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to create schedule"}) return } // Set next run time next := scheduler.ComputeNextRun(cronExpr, time.Now().UTC()) if next != nil { h.DB.UpdateScheduleStatus(id, "", "", next) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schedule.created", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) schedule, err := h.DB.GetScheduleByID(id) if err != nil || schedule == nil { writeJSON(w, http.StatusCreated, map[string]string{"id": id}) return } writeJSON(w, http.StatusCreated, schedule) } // Update updates an existing scheduled job. func (h *SchedulesHandler) Update(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Schedule ID is required"}) return } existing, err := h.DB.GetScheduleByID(id) if err != nil { slog.Error("Failed to get schedule for update", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch schedule"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Scheduled job not found"}) return } var body struct { Name *string `json:"name"` Cron *string `json:"cron"` Timezone *string `json:"timezone"` Enabled *bool `json:"enabled"` TimeoutMs *int `json:"timeout_ms"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid request body"}) return } name := existing.Name cron := existing.Cron timezone := existing.Timezone enabled := existing.Enabled timeoutMs := existing.TimeoutMs changed := false if body.Name != nil { n := strings.TrimSpace(*body.Name) if n == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Name is required"}) return } name = n changed = true } if body.Cron != nil { c := strings.TrimSpace(*body.Cron) if c == "" || !scheduler.ValidateCron(c) { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Invalid cron expression"}) return } cron = c changed = true } if body.Timezone != nil { tz := strings.TrimSpace(*body.Timezone) if tz == "" { tz = "UTC" } timezone = tz changed = true } if body.Enabled != nil { enabled = *body.Enabled changed = true } if body.TimeoutMs != nil { if *body.TimeoutMs <= 0 { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "timeout_ms must be greater than 0"}) return } timeoutMs = *body.TimeoutMs changed = true } if !changed { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "No valid fields to update"}) return } if err := h.DB.UpdateSchedule(id, name, cron, timezone, enabled, timeoutMs); err != nil { slog.Error("Failed to update schedule", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to update schedule"}) return } // Recompute next run if enabled { next := scheduler.ComputeNextRun(cron, time.Now().UTC()) h.DB.UpdateScheduleStatus(id, "", "", next) } else { h.DB.UpdateScheduleStatus(id, "", "", nil) } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schedule.updated", Username: strPtr(session.ClickhouseUser), Details: strPtr(name), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // Delete removes a scheduled job. func (h *SchedulesHandler) Delete(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Schedule ID is required"}) return } existing, err := h.DB.GetScheduleByID(id) if err != nil { slog.Error("Failed to get schedule for delete", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch schedule"}) return } if existing == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Scheduled job not found"}) return } if err := h.DB.DeleteSchedule(id); err != nil { slog.Error("Failed to delete schedule", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to delete schedule"}) return } h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schedule.deleted", Username: strPtr(session.ClickhouseUser), Details: strPtr(existing.Name), }) writeJSON(w, http.StatusOK, map[string]interface{}{"success": true}) } // ListRuns returns the execution history for a scheduled job. func (h *SchedulesHandler) ListRuns(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Schedule ID is required"}) return } schedule, err := h.DB.GetScheduleByID(id) if err != nil { slog.Error("Failed to get schedule for runs", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch schedule"}) return } if schedule == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Scheduled job not found"}) return } limit := 50 if raw := r.URL.Query().Get("limit"); raw != "" { if parsed, err := strconv.Atoi(raw); err == nil && parsed > 0 && parsed <= 500 { limit = parsed } } offset := 0 if raw := r.URL.Query().Get("offset"); raw != "" { if parsed, err := strconv.Atoi(raw); err == nil && parsed >= 0 && parsed <= 1000000 { offset = parsed } } runs, err := h.DB.GetScheduleRuns(id, limit+1, offset) if err != nil { slog.Error("Failed to list schedule runs", "error", err, "schedule", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch runs"}) return } if runs == nil { runs = []database.ScheduleRun{} } hasMore := len(runs) > limit if hasMore { runs = runs[:limit] } writeJSON(w, http.StatusOK, map[string]interface{}{ "runs": runs, "has_more": hasMore, "next_offset": offset + len(runs), }) } // ManualRun triggers a manual execution of a scheduled job. func (h *SchedulesHandler) ManualRun(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } id := chi.URLParam(r, "id") if id == "" { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Schedule ID is required"}) return } schedule, err := h.DB.GetScheduleByID(id) if err != nil { slog.Error("Failed to get schedule for manual run", "error", err, "id", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch schedule"}) return } if schedule == nil { writeJSON(w, http.StatusNotFound, map[string]string{"error": "Scheduled job not found"}) return } // Get the saved query savedQuery, err := h.DB.GetSavedQueryByID(schedule.SavedQueryID) if err != nil { slog.Error("Failed to fetch saved query for manual run", "error", err, "saved_query_id", schedule.SavedQueryID) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to fetch saved query"}) return } if savedQuery == nil { writeJSON(w, http.StatusBadRequest, map[string]string{"error": "Saved query not found"}) return } // Determine connection connectionID := session.ConnectionID if schedule.ConnectionID != nil && *schedule.ConnectionID != "" { connectionID = *schedule.ConnectionID } // Decrypt credentials password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password for manual run", "error", err, "schedule", id) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } timeout := time.Duration(schedule.TimeoutMs) * time.Millisecond if timeout <= 0 { timeout = 60 * time.Second } // Create a run record runID, err := h.DB.CreateScheduleRun(id, "running") if err != nil { slog.Error("Failed to create schedule run", "error", err, "schedule", id) } // Execute query start := time.Now() result, execErr := h.Gateway.ExecuteQuery(connectionID, savedQuery.Query, session.ClickhouseUser, password, timeout) elapsed := time.Since(start).Milliseconds() status := "success" var runErr string rowCount := 0 if execErr != nil { status = "error" runErr = execErr.Error() } else if result != nil { rowCount = countRows(result.Data) } // Update run record if runID != "" { h.DB.UpdateScheduleRun(runID, status, rowCount, int(elapsed), runErr) } // Update schedule status var nextRun *time.Time if schedule.Enabled { nextRun = scheduler.ComputeNextRun(schedule.Cron, time.Now().UTC()) } h.DB.UpdateScheduleStatus(id, status, runErr, nextRun) h.DB.CreateAuditLog(database.AuditLogParams{ Action: "schedule.manual_run", Username: strPtr(session.ClickhouseUser), Details: strPtr(fmt.Sprintf("status=%s elapsed=%dms", status, elapsed)), }) if execErr != nil { writeJSON(w, http.StatusOK, map[string]interface{}{ "success": false, "run_id": runID, "status": "error", "error": execErr.Error(), "elapsed": elapsed, }) return } writeJSON(w, http.StatusOK, map[string]interface{}{ "success": true, "run_id": runID, "status": "success", "elapsed": elapsed, }) } ================================================ FILE: internal/server/handlers/view_graph.go ================================================ package handlers import ( "encoding/json" "log/slog" "net/http" "regexp" "strings" "time" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/governance" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/google/uuid" ) // viewEntry holds a row from system.tables for a View or MaterializedView. type viewEntry struct { Database string `json:"database"` Name string `json:"name"` Engine string `json:"engine"` CreateTableQuery string `json:"create_table_query"` } // Regex patterns for parsing SQL table references in view definitions. // These mirror the patterns in governance/lineage.go but are local to avoid // exporting internal regex details. const vgTableRef = "(" + "(?:`[^`]+`|[a-zA-Z_][a-zA-Z0-9_]*)" + "(?:\\.(?:`[^`]+`|[a-zA-Z_][a-zA-Z0-9_]*))?)" var ( vgFromRe = regexp.MustCompile(`(?i)\bFROM\s+` + vgTableRef) vgJoinRe = regexp.MustCompile(`(?i)\bJOIN\s+` + vgTableRef) vgToRe = regexp.MustCompile(`(?i)\bTO\s+` + vgTableRef) ) // GetViewGraph queries ClickHouse for all materialized views and views, // parses their CREATE statements to build a structural dependency graph, // and returns it in the same LineageGraph format used by the lineage endpoints. func (h *GovernanceHandler) GetViewGraph(w http.ResponseWriter, r *http.Request) { session := middleware.GetSession(r) if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } if !h.Gateway.IsTunnelOnline(session.ConnectionID) { writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "Tunnel is offline"}) return } password, err := crypto.Decrypt(session.EncryptedPassword, h.Config.AppSecretKey) if err != nil { slog.Error("Failed to decrypt password", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to decrypt credentials"}) return } sql := `SELECT database, name, engine, create_table_query FROM system.tables WHERE engine IN ('MaterializedView', 'View') AND database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema') FORMAT JSON` result, err := h.Gateway.ExecuteQuery( session.ConnectionID, sql, session.ClickhouseUser, password, 60*time.Second, ) if err != nil { slog.Warn("Failed to query system.tables for views", "error", err, "connection", session.ConnectionID) writeJSON(w, http.StatusBadGateway, map[string]string{"error": err.Error()}) return } // Parse ClickHouse JSON response raw, err := json.Marshal(result.Data) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to marshal result"}) return } var rows []viewEntry if err := json.Unmarshal(raw, &rows); err != nil { slog.Error("Failed to parse view entries", "error", err) writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Failed to parse view data"}) return } graph := buildViewGraph(rows) writeJSON(w, http.StatusOK, map[string]interface{}{ "graph": graph, }) } // buildViewGraph parses CREATE statements from view entries and constructs // a LineageGraph with nodes (tables, views, MVs) and edges (data flow). func buildViewGraph(rows []viewEntry) governance.LineageGraph { nodeMap := make(map[string]governance.LineageNode) var edges []governance.LineageEdge for _, row := range rows { viewKey := row.Database + "." + row.Name nodeType := "materialized_view" if strings.EqualFold(row.Engine, "View") { nodeType = "view" } nodeMap[viewKey] = governance.LineageNode{ ID: viewKey, Database: row.Database, Table: row.Name, Type: nodeType, } query := normaliseWS(row.CreateTableQuery) // For materialized views: extract the TO target table var toTarget *parsedRef if strings.EqualFold(row.Engine, "MaterializedView") { toTarget = extractToTarget(query) } // Extract source tables from FROM/JOIN clauses in the AS SELECT part sources := extractViewSources(query) // Create edges: source → view for _, src := range sources { if isSystemDB(src.db) { continue } srcKey := src.key() if srcKey == viewKey { continue // skip self-reference } // Ensure source node exists if _, ok := nodeMap[srcKey]; !ok { nodeMap[srcKey] = governance.LineageNode{ ID: srcKey, Database: src.db, Table: src.table, Type: "source", } } edges = append(edges, governance.LineageEdge{ ID: uuid.New().String(), ConnectionID: "", SourceDatabase: src.db, SourceTable: src.table, TargetDatabase: row.Database, TargetTable: row.Name, EdgeType: "view_dependency", }) } // Create edge: MV → TO target if toTarget != nil && !isSystemDB(toTarget.db) { tgtKey := toTarget.key() if tgtKey != viewKey { if _, ok := nodeMap[tgtKey]; !ok { nodeMap[tgtKey] = governance.LineageNode{ ID: tgtKey, Database: toTarget.db, Table: toTarget.table, Type: "target", } } edges = append(edges, governance.LineageEdge{ ID: uuid.New().String(), ConnectionID: "", SourceDatabase: row.Database, SourceTable: row.Name, TargetDatabase: toTarget.db, TargetTable: toTarget.table, EdgeType: "materialized_to", }) } } } nodes := make([]governance.LineageNode, 0, len(nodeMap)) for _, n := range nodeMap { nodes = append(nodes, n) } if edges == nil { edges = []governance.LineageEdge{} } return governance.LineageGraph{ Nodes: nodes, Edges: edges, } } // parsedRef is a database.table reference extracted from SQL. type parsedRef struct { db string table string } func (r parsedRef) key() string { if r.db == "" { return r.table } return r.db + "." + r.table } // extractToTarget extracts the TO target table from a materialized view definition. func extractToTarget(query string) *parsedRef { m := vgToRe.FindStringSubmatch(query) if len(m) < 2 { return nil } db, tbl := splitRef(m[1]) return &parsedRef{db: db, table: tbl} } // extractViewSources finds all FROM and JOIN table references in a view definition. func extractViewSources(query string) []parsedRef { seen := map[string]bool{} var results []parsedRef collect := func(re *regexp.Regexp) { for _, m := range re.FindAllStringSubmatch(query, -1) { if len(m) < 2 { continue } db, tbl := splitRef(m[1]) if isSystemDB(db) { continue } key := db + "." + tbl if seen[key] { continue } seen[key] = true results = append(results, parsedRef{db: db, table: tbl}) } } collect(vgFromRe) collect(vgJoinRe) return results } // splitRef splits a possibly qualified table reference into (database, table). func splitRef(raw string) (string, string) { raw = stripBT(raw) parts := strings.SplitN(raw, ".", 2) if len(parts) == 2 { return stripBT(parts[0]), stripBT(parts[1]) } return "", stripBT(parts[0]) } // stripBT removes surrounding backticks. func stripBT(s string) string { if len(s) >= 2 && s[0] == '`' && s[len(s)-1] == '`' { return s[1 : len(s)-1] } return s } // normaliseWS collapses whitespace runs into single spaces. func normaliseWS(s string) string { return strings.Join(strings.Fields(s), " ") } // isSystemDB returns true for ClickHouse system databases. func isSystemDB(db string) bool { switch strings.ToLower(db) { case "system", "information_schema": return true } return false } ================================================ FILE: internal/server/middleware/context.go ================================================ package middleware import ( "context" "net/http" ) type contextKey string const ( sessionKey contextKey = "session" ) // SessionInfo holds session data stored in the request context. type SessionInfo struct { ID string ConnectionID string ClickhouseUser string EncryptedPassword string UserRole string } // SetSession stores the session in the request context. func SetSession(ctx context.Context, session *SessionInfo) context.Context { return context.WithValue(ctx, sessionKey, session) } // GetSession retrieves the session from the request context. func GetSession(r *http.Request) *SessionInfo { s, _ := r.Context().Value(sessionKey).(*SessionInfo) return s } ================================================ FILE: internal/server/middleware/cors.go ================================================ package middleware import ( "net/http" "net/url" "strings" ) // CORSConfig holds CORS configuration. type CORSConfig struct { AllowedOrigins []string AppURL string DevMode bool } // CORS returns a middleware that handles CORS headers. func CORS(cfg CORSConfig) func(http.Handler) http.Handler { var appOrigin string if cfg.AppURL != "" { if u, err := url.Parse(cfg.AppURL); err == nil { appOrigin = u.Scheme + "://" + u.Host } } allowedSet := make(map[string]bool, len(cfg.AllowedOrigins)) for _, o := range cfg.AllowedOrigins { allowedSet[o] = true } return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { origin := r.Header.Get("Origin") if origin != "" { allowed := false // In dev mode, allow any localhost origin if cfg.DevMode { if strings.HasPrefix(origin, "http://localhost:") || strings.HasPrefix(origin, "http://127.0.0.1:") { allowed = true } } // Check explicit allowed origins if !allowed && allowedSet[origin] { allowed = true } // Check against APP_URL origin if !allowed && appOrigin != "" { cleaned := strings.TrimSuffix(origin, "/") if cleaned == appOrigin || cleaned == strings.TrimSuffix(appOrigin, "/") { allowed = true } } if allowed { w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Access-Control-Allow-Credentials", "true") w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, Cookie") w.Header().Set("Vary", "Origin") } } // Handle preflight if r.Method == http.MethodOptions { w.WriteHeader(http.StatusNoContent) return } next.ServeHTTP(w, r) }) } } ================================================ FILE: internal/server/middleware/license.go ================================================ package middleware import ( "net/http" "github.com/caioricciuti/ch-ui/internal/config" ) // RequirePro returns a middleware that blocks access unless the installation // has an active Pro license. Returns 402 Payment Required otherwise. func RequirePro(cfg *config.Config) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if !cfg.IsPro() { writeJSON(w, http.StatusPaymentRequired, map[string]string{ "error": "Pro license required", }) return } next.ServeHTTP(w, r) }) } } ================================================ FILE: internal/server/middleware/logging.go ================================================ package middleware import ( "bufio" "log/slog" "net" "net/http" "time" ) // responseWriter wraps http.ResponseWriter to capture the status code. type responseWriter struct { http.ResponseWriter status int } func (rw *responseWriter) WriteHeader(code int) { rw.status = code rw.ResponseWriter.WriteHeader(code) } // Hijack proxies websocket/upgrade hijacking to the underlying writer. func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { hj, ok := rw.ResponseWriter.(http.Hijacker) if !ok { return nil, nil, http.ErrNotSupported } return hj.Hijack() } // Flush proxies streaming flushes to the underlying writer when available. func (rw *responseWriter) Flush() { if f, ok := rw.ResponseWriter.(http.Flusher); ok { f.Flush() } } // Push proxies HTTP/2 server push when available. func (rw *responseWriter) Push(target string, opts *http.PushOptions) error { if p, ok := rw.ResponseWriter.(http.Pusher); ok { return p.Push(target, opts) } return http.ErrNotSupported } // Logger returns a middleware that logs each request. func Logger(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { start := time.Now() rw := &responseWriter{ResponseWriter: w, status: 200} next.ServeHTTP(rw, r) slog.Debug("request", "method", r.Method, "path", r.URL.Path, "status", rw.status, "duration", time.Since(start).String(), "ip", r.RemoteAddr, ) }) } ================================================ FILE: internal/server/middleware/ratelimit.go ================================================ package middleware import ( "fmt" "strconv" "strings" "time" "github.com/caioricciuti/ch-ui/internal/database" ) // RateLimiter provides rate limiting backed by SQLite. type RateLimiter struct { db *database.DB } // NewRateLimiter creates a new rate limiter. func NewRateLimiter(db *database.DB) *RateLimiter { return &RateLimiter{db: db} } // RateLimitResult holds the result of a rate limit check. type RateLimitResult struct { Allowed bool RetryAfter time.Duration Attempts int MaxAttempts int } var lockoutSchedule = []time.Duration{ 3 * time.Minute, 5 * time.Minute, 10 * time.Minute, } // CheckAuthRateLimit checks if a login attempt is allowed. // Returns whether the attempt is allowed, and if not, how long to wait. func (rl *RateLimiter) CheckAuthRateLimit(identifier, limitType string, maxAttempts int, windowDuration time.Duration) RateLimitResult { entry, err := rl.db.GetRateLimit(identifier) if err != nil { // On error, allow the request return RateLimitResult{Allowed: true, MaxAttempts: maxAttempts} } now := time.Now() if entry != nil { baseType, lockLevel := parseLimitTypeAndLockLevel(entry.Type, limitType) entryType := formatLimitTypeWithLockLevel(baseType, lockLevel) // Check if locked out if entry.LockedUntil != nil { lockedUntil, err := time.Parse(time.RFC3339, *entry.LockedUntil) if err == nil && now.Before(lockedUntil) { // Compatibility: normalize legacy long locks to the new capped schedule. activeLevel := lockLevel if activeLevel <= 0 { activeLevel = 1 } maxDuration := lockoutDurationForLevel(activeLevel) if remaining := time.Until(lockedUntil); remaining > maxDuration { normalizedUntil := now.Add(maxDuration) lockedUntil = normalizedUntil rl.db.UpsertRateLimit( identifier, formatLimitTypeWithLockLevel(baseType, activeLevel), entry.Attempts, now, &normalizedUntil, ) } return RateLimitResult{ Allowed: false, RetryAfter: time.Until(lockedUntil), Attempts: entry.Attempts, MaxAttempts: maxAttempts, } } // Lock expired: keep escalation level, reset attempt window. rl.db.UpsertRateLimit(identifier, entryType, 0, now, nil) return RateLimitResult{Allowed: true, MaxAttempts: maxAttempts} } // Check if window expired firstAttempt, err := time.Parse(time.RFC3339, entry.FirstAttemptAt) if err == nil && now.Sub(firstAttempt) > windowDuration { // Window expired, reset // Escalation is reset once the attempts window is clean. rl.db.UpsertRateLimit(identifier, limitType, 0, now, nil) return RateLimitResult{Allowed: true, MaxAttempts: maxAttempts} } // Check attempts if entry.Attempts >= maxAttempts { // Lock out nextLevel := nextLockoutLevel(lockLevel) lockoutDuration := lockoutDurationForLevel(nextLevel) lockedUntil := now.Add(lockoutDuration) rl.db.UpsertRateLimit( identifier, formatLimitTypeWithLockLevel(baseType, nextLevel), entry.Attempts, firstAttempt, &lockedUntil, ) return RateLimitResult{ Allowed: false, RetryAfter: lockoutDuration, Attempts: entry.Attempts, MaxAttempts: maxAttempts, } } } return RateLimitResult{Allowed: true, MaxAttempts: maxAttempts} } // RecordAttempt records a failed login attempt. func (rl *RateLimiter) RecordAttempt(identifier, limitType string) { entry, _ := rl.db.GetRateLimit(identifier) now := time.Now() if entry == nil { rl.db.UpsertRateLimit(identifier, limitType, 1, now, nil) return } baseType, lockLevel := parseLimitTypeAndLockLevel(entry.Type, limitType) firstAttempt, err := time.Parse(time.RFC3339, entry.FirstAttemptAt) if err != nil { firstAttempt = now } rl.db.UpsertRateLimit( identifier, formatLimitTypeWithLockLevel(baseType, lockLevel), entry.Attempts+1, firstAttempt, nil, ) } // ResetLimit resets the rate limit for an identifier. func (rl *RateLimiter) ResetLimit(identifier string) { rl.db.DeleteRateLimit(identifier) } func parseLimitTypeAndLockLevel(storedType, fallback string) (string, int) { trimmed := strings.TrimSpace(storedType) if trimmed == "" { return fallback, 0 } parts := strings.SplitN(trimmed, ":", 2) base := strings.TrimSpace(parts[0]) if base == "" { base = fallback } if len(parts) == 1 { return base, 0 } level, err := strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil || level < 0 { return base, 0 } return base, level } func formatLimitTypeWithLockLevel(base string, level int) string { if level <= 0 { return base } return fmt.Sprintf("%s:%d", base, level) } func nextLockoutLevel(current int) int { next := current + 1 if next < 1 { next = 1 } if next > len(lockoutSchedule) { next = len(lockoutSchedule) } return next } func lockoutDurationForLevel(level int) time.Duration { if level <= 1 { return lockoutSchedule[0] } if level > len(lockoutSchedule) { return lockoutSchedule[len(lockoutSchedule)-1] } return lockoutSchedule[level-1] } ================================================ FILE: internal/server/middleware/ratelimit_test.go ================================================ package middleware import ( "path/filepath" "testing" "time" "github.com/caioricciuti/ch-ui/internal/database" ) func TestProgressiveLockoutSchedule(t *testing.T) { dbPath := filepath.Join(t.TempDir(), "rate-limit.db") db, err := database.Open(dbPath) if err != nil { t.Fatalf("open db: %v", err) } defer db.Close() rl := NewRateLimiter(db) identifier := "user:test" maxAttempts := 3 window := 15 * time.Minute expectLocked := func(expectedMin time.Duration, expectedMax time.Duration, expectedType string) { t.Helper() res := rl.CheckAuthRateLimit(identifier, "user", maxAttempts, window) if res.Allowed { t.Fatalf("expected blocked result for %s", expectedType) } if res.RetryAfter < expectedMin || res.RetryAfter > expectedMax { t.Fatalf("retryAfter out of range: got=%s want=[%s,%s]", res.RetryAfter, expectedMin, expectedMax) } entry, err := db.GetRateLimit(identifier) if err != nil { t.Fatalf("get rate limit: %v", err) } if entry == nil { t.Fatalf("expected persisted rate limit entry") } if entry.Type != expectedType { t.Fatalf("unexpected entry type: got=%q want=%q", entry.Type, expectedType) } if entry.LockedUntil == nil { t.Fatalf("expected locked_until to be set") } } expireLock := func(expectedType string) { t.Helper() now := time.Now() expired := now.Add(-1 * time.Second) if err := db.UpsertRateLimit(identifier, expectedType, maxAttempts, now.Add(-2*time.Minute), &expired); err != nil { t.Fatalf("upsert expired lock: %v", err) } res := rl.CheckAuthRateLimit(identifier, "user", maxAttempts, window) if !res.Allowed { t.Fatalf("expected allowed result after lock expiry, got blocked retryAfter=%s", res.RetryAfter) } entry, err := db.GetRateLimit(identifier) if err != nil { t.Fatalf("get rate limit after expiry: %v", err) } if entry == nil { t.Fatalf("expected rate limit entry after lock expiry") } if entry.Type != expectedType { t.Fatalf("unexpected type after expiry: got=%q want=%q", entry.Type, expectedType) } if entry.Attempts != 0 { t.Fatalf("attempts should reset after expiry: got=%d", entry.Attempts) } if entry.LockedUntil != nil { t.Fatalf("expected lock to be cleared after expiry") } } if err := db.UpsertRateLimit(identifier, "user", maxAttempts, time.Now(), nil); err != nil { t.Fatalf("seed first lock: %v", err) } expectLocked(3*time.Minute-5*time.Second, 3*time.Minute+5*time.Second, "user:1") expireLock("user:1") if err := db.UpsertRateLimit(identifier, "user:1", maxAttempts, time.Now(), nil); err != nil { t.Fatalf("seed second lock: %v", err) } expectLocked(5*time.Minute-5*time.Second, 5*time.Minute+5*time.Second, "user:2") expireLock("user:2") if err := db.UpsertRateLimit(identifier, "user:2", maxAttempts, time.Now(), nil); err != nil { t.Fatalf("seed third lock: %v", err) } expectLocked(10*time.Minute-5*time.Second, 10*time.Minute+5*time.Second, "user:3") expireLock("user:3") if err := db.UpsertRateLimit(identifier, "user:3", maxAttempts, time.Now(), nil); err != nil { t.Fatalf("seed capped lock: %v", err) } expectLocked(10*time.Minute-5*time.Second, 10*time.Minute+5*time.Second, "user:3") } func TestLegacyLongLockIsCappedToCurrentSchedule(t *testing.T) { dbPath := filepath.Join(t.TempDir(), "rate-limit-legacy.db") db, err := database.Open(dbPath) if err != nil { t.Fatalf("open db: %v", err) } defer db.Close() rl := NewRateLimiter(db) identifier := "user:legacy" now := time.Now() legacyUntil := now.Add(2 * time.Hour) if err := db.UpsertRateLimit(identifier, "user:3", 3, now.Add(-1*time.Minute), &legacyUntil); err != nil { t.Fatalf("seed legacy lock: %v", err) } res := rl.CheckAuthRateLimit(identifier, "user", 3, 15*time.Minute) if res.Allowed { t.Fatalf("expected request to remain blocked during capped lock window") } if res.RetryAfter > 10*time.Minute+5*time.Second { t.Fatalf("legacy lock should be capped to 10m, got retryAfter=%s", res.RetryAfter) } entry, err := db.GetRateLimit(identifier) if err != nil { t.Fatalf("get rate limit: %v", err) } if entry == nil || entry.LockedUntil == nil { t.Fatalf("expected normalized lock to be persisted") } normalizedUntil, err := time.Parse(time.RFC3339, *entry.LockedUntil) if err != nil { t.Fatalf("parse locked_until: %v", err) } if normalizedUntil.After(time.Now().Add(10*time.Minute + 5*time.Second)) { t.Fatalf("persisted lock should be capped near now+10m, got %s", normalizedUntil) } } ================================================ FILE: internal/server/middleware/security.go ================================================ package middleware import "net/http" // SecurityHeaders adds security headers to responses. func SecurityHeaders(isProduction bool) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Content-Type-Options", "nosniff") w.Header().Set("X-Frame-Options", "DENY") w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin") w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()") // CSP is applied in all modes to mitigate XSS attacks. w.Header().Set("Content-Security-Policy", "default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; connect-src 'self' https://api.openai.com; font-src 'self' data:; frame-ancestors 'none'; base-uri 'self'; form-action 'self';") if isProduction { w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload") } next.ServeHTTP(w, r) }) } } ================================================ FILE: internal/server/middleware/session.go ================================================ package middleware import ( "encoding/json" "log/slog" "net/http" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/tunnel" ) // writeJSON writes a JSON response. func writeJSON(w http.ResponseWriter, status int, v interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) json.NewEncoder(w).Encode(v) } // Session returns a middleware that validates the chui_session cookie. func Session(db *database.DB, _ *tunnel.Gateway) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cookie, err := r.Cookie("chui_session") if err != nil || cookie.Value == "" { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Not authenticated"}) return } session, err := db.GetSession(cookie.Value) if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Session lookup failed"}) return } if session == nil { writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "Session expired or invalid"}) return } role := "viewer" overrideRole, err := db.GetUserRole(session.ClickhouseUser) if err != nil { slog.Warn("Failed to resolve explicit user role", "user", session.ClickhouseUser, "error", err) } else if overrideRole != "" { role = overrideRole } else if session.UserRole != nil && *session.UserRole != "" { role = *session.UserRole } info := &SessionInfo{ ID: session.ID, ConnectionID: session.ConnectionID, ClickhouseUser: session.ClickhouseUser, EncryptedPassword: session.EncryptedPassword, UserRole: role, } ctx := SetSession(r.Context(), info) next.ServeHTTP(w, r.WithContext(ctx)) }) } } // RequireAdmin returns a middleware that requires admin role. func RequireAdmin(db *database.DB) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { session := GetSession(r) if session == nil { writeJSON(w, http.StatusForbidden, map[string]string{"error": "Admin access required"}) return } isAdmin, err := db.IsUserRole(session.ClickhouseUser, "admin") if err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "Role check failed"}) return } if !isAdmin { writeJSON(w, http.StatusForbidden, map[string]string{"error": "Admin access required"}) return } next.ServeHTTP(w, r) }) } } ================================================ FILE: internal/server/server.go ================================================ package server import ( "context" "fmt" "io/fs" "log/slog" "net/http" "strings" "time" "github.com/caioricciuti/ch-ui/internal/alerts" "github.com/caioricciuti/ch-ui/internal/config" "github.com/caioricciuti/ch-ui/internal/crypto" "github.com/caioricciuti/ch-ui/internal/database" "github.com/caioricciuti/ch-ui/internal/governance" "github.com/caioricciuti/ch-ui/internal/langfuse" "github.com/caioricciuti/ch-ui/internal/models" "github.com/caioricciuti/ch-ui/internal/pipelines" "github.com/caioricciuti/ch-ui/internal/scheduler" "github.com/caioricciuti/ch-ui/internal/server/handlers" "github.com/caioricciuti/ch-ui/internal/server/middleware" "github.com/caioricciuti/ch-ui/internal/tunnel" "github.com/go-chi/chi/v5" ) // Server is the main HTTP server. type Server struct { cfg *config.Config db *database.DB gateway *tunnel.Gateway scheduler *scheduler.Runner pipelineRunner *pipelines.Runner modelRunner *models.Runner modelScheduler *models.Scheduler govSyncer *governance.Syncer guardrails *governance.GuardrailService alerts *alerts.Dispatcher langfuse *langfuse.Client router chi.Router http *http.Server frontendFS fs.FS } // New creates a new Server with all routes configured. func New(cfg *config.Config, db *database.DB, frontendFS fs.FS) *Server { r := chi.NewRouter() gw := tunnel.NewGateway(db) sched := scheduler.NewRunner(db, gw, cfg.AppSecretKey) pipeRunner := pipelines.NewRunner(db, gw, cfg) modelRunner := models.NewRunner(db, gw, cfg.AppSecretKey) modelScheduler := models.NewScheduler(db, modelRunner) govStore := governance.NewStore(db) govSyncer := governance.NewSyncer(govStore, db, gw, cfg.AppSecretKey) alertDispatcher := alerts.NewDispatcher(db, cfg) lfClient := langfuse.New() // Load Langfuse config from database settings (if configured via admin UI) if lfCfg, err := loadLangfuseConfigFromDB(db, cfg.AppSecretKey); err != nil { slog.Warn("Failed to load Langfuse config from database", "error", err) } else if lfCfg.Enabled() { lfClient.Reconfigure(lfCfg) } s := &Server{ cfg: cfg, db: db, gateway: gw, scheduler: sched, pipelineRunner: pipeRunner, modelRunner: modelRunner, modelScheduler: modelScheduler, govSyncer: govSyncer, guardrails: governance.NewGuardrailService(govStore, db), alerts: alertDispatcher, langfuse: lfClient, router: r, frontendFS: frontendFS, } s.setupRoutes() s.http = &http.Server{ Addr: fmt.Sprintf(":%d", cfg.Port), Handler: r, ReadTimeout: 30 * time.Second, WriteTimeout: 5 * time.Minute, // Long for streaming SSE/queries IdleTimeout: 120 * time.Second, } return s } func (s *Server) setupRoutes() { r := s.router cfg := s.cfg db := s.db gw := s.gateway // ── Global middleware ──────────────────────────────────────────────── r.Use(middleware.Logger) r.Use(middleware.SecurityHeaders(!cfg.DevMode)) r.Use(middleware.CORS(middleware.CORSConfig{ DevMode: cfg.DevMode, AllowedOrigins: cfg.AllowedOrigins, AppURL: cfg.AppURL, })) // ── Health check (no auth) ────────────────────────────────────────── healthHandler := &handlers.HealthHandler{} r.Get("/health", healthHandler.Health) // ── WebSocket tunnel endpoint (agent authenticates via token) ──────── r.HandleFunc("/connect", gw.HandleWebSocket) // ── Rate limiter (shared across handlers) ─────────────────────────── rateLimiter := middleware.NewRateLimiter(db) // ── Webhook endpoint for pipelines (no session — uses token auth) ── r.Post("/api/pipelines/webhook/{id}", pipelines.HandleWebhook) // ── API routes ───────────────────────────────────────────────────── r.Route("/api", func(api chi.Router) { // Auth routes (no session required, login creates the session) authHandler := &handlers.AuthHandler{ DB: db, Gateway: gw, RateLimiter: rateLimiter, Config: cfg, } api.Route("/auth", authHandler.Routes) // License status (no session required) licenseHandler := &handlers.LicenseHandler{DB: db, Config: cfg} api.Get("/license", licenseHandler.GetLicense) // All routes below require a valid session api.Group(func(protected chi.Router) { protected.Use(middleware.Session(db, gw)) // License activation (requires session) protected.Post("/license/activate", licenseHandler.ActivateLicense) protected.Post("/license/deactivate", licenseHandler.DeactivateLicense) // Query execution (community) queryHandler := &handlers.QueryHandler{DB: db, Gateway: gw, Config: cfg, Guardrails: s.guardrails} protected.Route("/query", queryHandler.Routes) // Connections management (community) connectionsHandler := &handlers.ConnectionsHandler{DB: db, Gateway: gw, Config: cfg} protected.Route("/connections", func(cr chi.Router) { cr.Get("/", connectionsHandler.List) cr.Post("/", connectionsHandler.Create) cr.Get("/{id}", connectionsHandler.Get) cr.Delete("/{id}", connectionsHandler.Delete) cr.Post("/{id}/test", connectionsHandler.TestConnection) cr.Get("/{id}/token", connectionsHandler.GetToken) cr.Post("/{id}/regenerate-token", connectionsHandler.RegenerateToken) }) // Saved queries (community) savedQueriesHandler := &handlers.SavedQueriesHandler{DB: db} protected.Route("/saved-queries", savedQueriesHandler.Routes) // ── Community features ───────────────────────────────────── // Dashboards dashboardsHandler := &handlers.DashboardsHandler{DB: db, Gateway: gw, Config: cfg} protected.Mount("/dashboards", dashboardsHandler.Routes()) // Pipelines pipelinesHandler := &handlers.PipelinesHandler{DB: db, Gateway: gw, Config: cfg, Runner: s.pipelineRunner} protected.Mount("/pipelines", pipelinesHandler.Routes()) // Models (dbt-like SQL transformations) modelsHandler := &handlers.ModelsHandler{DB: db, Gateway: gw, Config: cfg, Runner: s.modelRunner} protected.Mount("/models", modelsHandler.Routes()) // Brain AI assistant brainHandler := &handlers.BrainHandler{DB: db, Gateway: gw, Config: cfg, Langfuse: s.langfuse} protected.Route("/brain", brainHandler.Routes) // Admin routes (require admin role) adminHandler := &handlers.AdminHandler{ DB: db, Gateway: gw, Config: cfg, Langfuse: s.langfuse, GovSyncer: s.govSyncer, } protected.Route("/admin", func(ar chi.Router) { adminHandler.Routes(ar) }) // ── Pro-only features ────────────────────────────────────── protected.Group(func(pro chi.Router) { pro.Use(middleware.RequirePro(cfg)) // Scheduled jobs schedulesHandler := &handlers.SchedulesHandler{DB: db, Gateway: gw, Config: cfg} pro.Route("/schedules", schedulesHandler.Routes) // Governance govHandler := &handlers.GovernanceHandler{ DB: db, Gateway: gw, Config: cfg, Store: s.govSyncer.GetStore(), Syncer: s.govSyncer, } pro.Mount("/governance", govHandler.Routes()) }) }) }) // ── SPA fallback (serve embedded frontend) ────────────────────────── if s.frontendFS != nil { // Check whether the frontend was actually built and embedded. if _, err := s.frontendFS.Open("index.html"); err != nil { slog.Warn("Frontend assets not embedded; build the frontend first or use a release binary") r.Get("/*", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusNotFound) fmt.Fprintln(w, "Frontend assets not available. Build the frontend first or use a release binary.") }) } else { fileServer := http.FileServer(http.FS(s.frontendFS)) r.Get("/*", func(w http.ResponseWriter, r *http.Request) { // Try to serve the file directly path := r.URL.Path[1:] // strip leading / f, err := s.frontendFS.Open(path) if err != nil { // File not found — serve index.html for SPA routing w.Header().Set("Cache-Control", "no-cache") r.URL.Path = "/" } else { f.Close() if strings.HasPrefix(path, "assets/") { w.Header().Set("Cache-Control", "public, max-age=31536000, immutable") } else { w.Header().Set("Cache-Control", "no-cache") } } fileServer.ServeHTTP(w, r) }) } } slog.Info("Routes configured") } // Start starts the HTTP server. func (s *Server) Start() error { s.scheduler.Start() s.pipelineRunner.Start() s.modelScheduler.Start() switch { case !s.cfg.IsPro(): slog.Info("Governance background sync disabled (requires Pro license)") case !s.db.GovernanceSyncEnabled(): slog.Info("Governance background sync disabled (opt-in required; enable in Governance → Settings)") default: s.govSyncer.StartBackground() } s.alerts.Start() s.langfuse.Start() slog.Info("Server listening", "addr", s.http.Addr) return s.http.ListenAndServe() } // Shutdown gracefully stops the server. func (s *Server) Shutdown(ctx context.Context) error { slog.Info("Graceful shutdown initiated") s.scheduler.Stop() s.pipelineRunner.Stop() s.modelScheduler.Stop() s.govSyncer.Stop() s.alerts.Stop() s.langfuse.Stop() s.gateway.Stop() return s.http.Shutdown(ctx) } // loadLangfuseConfigFromDB reads Langfuse configuration from the settings table. func loadLangfuseConfigFromDB(db *database.DB, appSecretKey string) (langfuse.Config, error) { var cfg langfuse.Config publicKey, err := db.GetSetting("langfuse.public_key") if err != nil { return cfg, err } cfg.PublicKey = publicKey encryptedSecret, err := db.GetSetting("langfuse.secret_key") if err != nil { return cfg, err } if encryptedSecret != "" { decrypted, err := crypto.Decrypt(encryptedSecret, appSecretKey) if err != nil { return cfg, fmt.Errorf("decrypt langfuse secret: %w", err) } cfg.SecretKey = decrypted } baseURL, err := db.GetSetting("langfuse.base_url") if err != nil { return cfg, err } cfg.BaseURL = baseURL cfg.NormalizeBaseURL() return cfg, nil } ================================================ FILE: internal/tunnel/api.go ================================================ package tunnel import ( "encoding/json" "errors" "time" "github.com/google/uuid" "github.com/gorilla/websocket" ) // IsTunnelOnline checks if a tunnel connection is currently active. func (g *Gateway) IsTunnelOnline(connectionID string) bool { _, ok := g.tunnels.Load(connectionID) return ok } // GetTunnelStatus returns the online status and last seen time for a connection. func (g *Gateway) GetTunnelStatus(connectionID string) (online bool, lastSeen time.Time) { val, ok := g.tunnels.Load(connectionID) if !ok { return false, time.Time{} } t := val.(*ConnectedTunnel) return true, t.LastSeen } // GetConnectedCount returns the number of currently connected tunnels. func (g *Gateway) GetConnectedCount() int { count := 0 g.tunnels.Range(func(_, _ any) bool { count++ return true }) return count } // ExecuteQuery sends a SQL query to the agent via the tunnel and waits for a result. func (g *Gateway) ExecuteQuery(connectionID, sql, user, password string, timeout time.Duration) (*QueryResult, error) { val, ok := g.tunnels.Load(connectionID) if !ok { return nil, errors.New("tunnel not connected") } t := val.(*ConnectedTunnel) requestID := uuid.NewString() pending := &PendingRequest{ ResultCh: make(chan json.RawMessage, 1), ErrorCh: make(chan error, 1), } t.Pending.Store(requestID, pending) defer t.Pending.Delete(requestID) msg := GatewayMessage{ Type: "query", ID: requestID, QueryID: requestID, SQL: sql, Query: sql, User: user, Password: password, } data, _ := json.Marshal(msg) t.mu.Lock() err := t.WS.WriteMessage(websocket.TextMessage, data) t.mu.Unlock() if err != nil { return nil, err } select { case payload := <-pending.ResultCh: var result QueryResult if err := json.Unmarshal(payload, &result); err != nil { return nil, err } return &result, nil case err := <-pending.ErrorCh: return nil, err case <-time.After(timeout): // Send cancel to agent cancel := GatewayMessage{ Type: "cancel_query", ID: requestID, QueryID: requestID, } cancelData, _ := json.Marshal(cancel) t.mu.Lock() t.WS.WriteMessage(websocket.TextMessage, cancelData) t.mu.Unlock() return nil, errors.New("query timeout") } } // ExecuteQueryWithFormat sends a SQL query with a specific output format and returns the raw result. // The format parameter (e.g. "JSONCompact") is passed to the agent, which appends FORMAT to the query. // Returns the raw ClickHouse response as-is (no intermediate parse/reserialize). func (g *Gateway) ExecuteQueryWithFormat(connectionID, sql, user, password, format string, timeout time.Duration) (json.RawMessage, error) { val, ok := g.tunnels.Load(connectionID) if !ok { return nil, errors.New("tunnel not connected") } t := val.(*ConnectedTunnel) requestID := uuid.NewString() pending := &PendingRequest{ ResultCh: make(chan json.RawMessage, 1), ErrorCh: make(chan error, 1), } t.Pending.Store(requestID, pending) defer t.Pending.Delete(requestID) msg := GatewayMessage{ Type: "query", ID: requestID, QueryID: requestID, SQL: sql, Query: sql, User: user, Password: password, Format: format, } data, _ := json.Marshal(msg) t.mu.Lock() err := t.WS.WriteMessage(websocket.TextMessage, data) t.mu.Unlock() if err != nil { return nil, err } select { case payload := <-pending.ResultCh: // The payload is a marshaled QueryResult{Data, Meta, Stats}. // For format-aware queries the agent puts the raw CH response in Data. var result QueryResult if err := json.Unmarshal(payload, &result); err != nil { return payload, nil // fallback: return as-is } if len(result.Data) > 0 { return result.Data, nil } return payload, nil case err := <-pending.ErrorCh: return nil, err case <-time.After(timeout): cancel := GatewayMessage{ Type: "cancel_query", ID: requestID, QueryID: requestID, } cancelData, _ := json.Marshal(cancel) t.mu.Lock() t.WS.WriteMessage(websocket.TextMessage, cancelData) t.mu.Unlock() return nil, errors.New("query timeout") } } // ExecuteStreamQuery sends a streaming query to the agent and returns channels for progressive consumption. // The caller must range over stream.ChunkCh, then select on stream.DoneCh/ErrorCh. // Call CleanupStream when done to release resources. func (g *Gateway) ExecuteStreamQuery(connectionID, sql, user, password string, settings map[string]string) (requestID string, stream *PendingStreamRequest, err error) { val, ok := g.tunnels.Load(connectionID) if !ok { return "", nil, errors.New("tunnel not connected") } t := val.(*ConnectedTunnel) requestID = uuid.NewString() stream = &PendingStreamRequest{ MetaCh: make(chan json.RawMessage, 1), ChunkCh: make(chan json.RawMessage, 8), DoneCh: make(chan json.RawMessage, 1), ErrorCh: make(chan error, 1), } t.Pending.Store(requestID, stream) msg := GatewayMessage{ Type: "query_stream", ID: requestID, QueryID: requestID, SQL: sql, Query: sql, User: user, Password: password, Settings: settings, } data, _ := json.Marshal(msg) t.mu.Lock() wsErr := t.WS.WriteMessage(websocket.TextMessage, data) t.mu.Unlock() if wsErr != nil { t.Pending.Delete(requestID) return "", nil, wsErr } return requestID, stream, nil } // CleanupStream removes a pending stream request from the tunnel's pending map. // Call this when the HTTP handler finishes (completion, error, or client disconnect). func (g *Gateway) CleanupStream(connectionID, requestID string) { val, ok := g.tunnels.Load(connectionID) if !ok { return } t := val.(*ConnectedTunnel) t.Pending.Delete(requestID) } // TestConnection tests a ClickHouse connection through the tunnel. func (g *Gateway) TestConnection(connectionID, user, password string, timeout time.Duration) (*TestResult, error) { val, ok := g.tunnels.Load(connectionID) if !ok { return nil, errors.New("tunnel not connected") } t := val.(*ConnectedTunnel) requestID := uuid.NewString() pending := &PendingRequest{ ResultCh: make(chan json.RawMessage, 1), ErrorCh: make(chan error, 1), } t.Pending.Store(requestID, pending) defer t.Pending.Delete(requestID) msg := GatewayMessage{ Type: "test_connection", ID: requestID, QueryID: requestID, User: user, Password: password, } data, _ := json.Marshal(msg) t.mu.Lock() err := t.WS.WriteMessage(websocket.TextMessage, data) t.mu.Unlock() if err != nil { return nil, err } select { case payload := <-pending.ResultCh: var result TestResult if err := json.Unmarshal(payload, &result); err != nil { return nil, err } return &result, nil case err := <-pending.ErrorCh: return nil, err case <-time.After(timeout): return nil, errors.New("connection test timeout") } } ================================================ FILE: internal/tunnel/gateway.go ================================================ package tunnel import ( "context" "encoding/json" "errors" "log/slog" "net" "net/http" "net/url" "strings" "sync" "time" "github.com/gorilla/websocket" "github.com/caioricciuti/ch-ui/internal/database" ) var upgrader = websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { origin := r.Header.Get("Origin") // Non-browser clients (like ch-ui-agent) typically do not send Origin. if origin == "" { return true } originURL, err := url.Parse(origin) if err != nil { return false } originHost, originPort := splitHostPort(originURL.Host) requestHost, requestPort := splitHostPort(r.Host) if originHost == "" || requestHost == "" { return false } samePort := originPort == "" || requestPort == "" || originPort == requestPort if samePort && strings.EqualFold(originHost, requestHost) { return true } // Local development friendliness: consider localhost and loopback IPs equivalent. if samePort && isLoopbackHost(originHost) && isLoopbackHost(requestHost) { return true } return false }, EnableCompression: true, } func splitHostPort(hostport string) (string, string) { hostport = strings.TrimSpace(hostport) if hostport == "" { return "", "" } host, port, err := net.SplitHostPort(hostport) if err == nil { return strings.Trim(host, "[]"), strings.TrimSpace(port) } // Host without explicit port. return strings.Trim(hostport, "[]"), "" } func isLoopbackHost(host string) bool { if strings.EqualFold(host, "localhost") { return true } ip := net.ParseIP(host) return ip != nil && ip.IsLoopback() } // PendingRequest represents a request waiting for a response from the agent. type PendingRequest struct { ResultCh chan json.RawMessage // receives the full response payload ErrorCh chan error } // PendingStreamRequest represents a streaming query waiting for chunked responses. type PendingStreamRequest struct { MetaCh chan json.RawMessage // receives query_stream_start meta ChunkCh chan json.RawMessage // receives query_stream_chunk data (buffered) DoneCh chan json.RawMessage // receives query_stream_end statistics ErrorCh chan error } // ConnectedTunnel represents an active tunnel agent connection. type ConnectedTunnel struct { ConnectionID string ConnectionName string WS *websocket.Conn LastSeen time.Time Pending sync.Map // map[requestID]*PendingRequest mu sync.Mutex } // Gateway manages WebSocket connections from tunnel agents. type Gateway struct { db *database.DB tunnels sync.Map // map[connectionID]*ConnectedTunnel stopCh chan struct{} } // NewGateway creates a new tunnel gateway. func NewGateway(db *database.DB) *Gateway { g := &Gateway{ db: db, stopCh: make(chan struct{}), } go g.heartbeatLoop() slog.Info("Tunnel gateway initialized") return g } // Stop stops the gateway heartbeat. func (g *Gateway) Stop() { close(g.stopCh) } // HandleWebSocket handles the WebSocket upgrade and read loop for a tunnel agent. func (g *Gateway) HandleWebSocket(w http.ResponseWriter, r *http.Request) { conn, err := upgrader.Upgrade(w, r, nil) if err != nil { slog.Error("WebSocket upgrade failed", "error", err) return } slog.Debug("New tunnel WebSocket connection") // Read loop go g.readLoop(conn) } func (g *Gateway) readLoop(conn *websocket.Conn) { var connID string // set after auth conn.SetPingHandler(func(appData string) error { if connID != "" { g.touchTunnel(connID) } // Keep Gorilla's default behavior: reply with a Pong control frame. return conn.WriteControl(websocket.PongMessage, []byte(appData), time.Now().Add(5*time.Second)) }) conn.SetPongHandler(func(_ string) error { if connID != "" { g.touchTunnel(connID) } return nil }) defer func() { if connID != "" { g.handleDisconnect(connID, conn) } conn.Close() }() for { _, message, err := conn.ReadMessage() if err != nil { if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseNormalClosure) { slog.Debug("Tunnel WebSocket read error", "error", err) } return } var msg AgentMessage if err := json.Unmarshal(message, &msg); err != nil { slog.Warn("Failed to parse tunnel message", "error", err) conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.ClosePolicyViolation, "Invalid message format")) return } // Any valid message means the tunnel is alive. if connID != "" { g.touchTunnel(connID) } switch msg.Type { case "auth": authConnID := g.handleAuth(conn, msg.Token, msg.Takeover) if authConnID == "" { return // auth failed, connection closed } connID = authConnID case "pong": g.handlePong(connID) case "query_result": g.handleQueryResult(connID, &msg) case "query_error": g.handleQueryError(connID, &msg) case "test_result": g.handleTestResult(connID, &msg) case "host_info": g.handleHostInfo(connID, &msg) case "query_stream_start": g.handleStreamStart(connID, &msg) case "query_stream_chunk": g.handleStreamChunk(connID, &msg) case "query_stream_end": g.handleStreamEnd(connID, &msg) case "query_stream_error": g.handleStreamError(connID, &msg) default: slog.Warn("Unknown tunnel message type", "type", msg.Type) } } } func (g *Gateway) handleAuth(conn *websocket.Conn, token string, takeover bool) string { remoteAddr := "" if conn != nil && conn.RemoteAddr() != nil { remoteAddr = conn.RemoteAddr().String() } authCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() tc, err := g.db.GetConnectionByTokenCtx(authCtx, token) if err != nil { slog.Warn("Tunnel auth failed: token lookup error", "remote_addr", remoteAddr, "error", err) g.sendJSON(conn, GatewayMessage{Type: "auth_error", Message: "Tunnel auth temporarily unavailable. Please retry."}) conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseTryAgainLater, "Auth backend busy")) return "" } if tc == nil { slog.Debug("Tunnel auth failed: invalid token", "remote_addr", remoteAddr) g.sendJSON(conn, GatewayMessage{Type: "auth_error", Message: "Invalid tunnel token"}) conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.ClosePolicyViolation, "Invalid token")) return "" } // If a healthy connection already exists for this token, reject duplicates. // This avoids two agent processes evicting each other in a reconnect loop. if existing, ok := g.tunnels.Load(tc.ID); ok { t := existing.(*ConnectedTunnel) isHealthy := time.Since(t.LastSeen) < 45*time.Second if isHealthy && !takeover { slog.Warn("Tunnel auth rejected: connection already active", "name", tc.Name) g.sendJSON(conn, GatewayMessage{Type: "auth_error", Message: "Tunnel token already connected (use --takeover to replace it)"}) conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.ClosePolicyViolation, "Token already connected")) return "" } if isHealthy && takeover { slog.Warn("Replacing active tunnel connection via takeover", "name", tc.Name) } // Replace stale session. t.mu.Lock() _ = t.WS.Close() t.mu.Unlock() g.handleDisconnect(tc.ID, t.WS) slog.Warn("Replaced stale tunnel connection", "name", tc.Name) } tunnel := &ConnectedTunnel{ ConnectionID: tc.ID, ConnectionName: tc.Name, WS: conn, LastSeen: time.Now(), } g.tunnels.Store(tc.ID, tunnel) g.db.UpdateConnectionStatus(tc.ID, "connected") g.sendJSON(conn, GatewayMessage{ Type: "auth_ok", ConnectionID: tc.ID, ConnectionName: tc.Name, }) slog.Info("Tunnel agent authenticated", "name", tc.Name, "connection_id", tc.ID) g.db.CreateAuditLog(database.AuditLogParams{ Action: "tunnel.connected", ConnectionID: strPtr(tc.ID), }) return tc.ID } func (g *Gateway) handlePong(connID string) { if connID == "" { return } g.touchTunnel(connID) g.db.UpdateConnectionStatus(connID, "connected") } func (g *Gateway) touchTunnel(connID string) { if connID == "" { return } if val, ok := g.tunnels.Load(connID); ok { t := val.(*ConnectedTunnel) t.LastSeen = time.Now() } } func (g *Gateway) handleQueryResult(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.LoadAndDelete(id) if !ok { slog.Warn("Query result for unknown request", "id", id) return } pending := pendingVal.(*PendingRequest) // Build the result payload result := QueryResult{ Data: msg.Data, Meta: msg.Meta, Stats: msg.GetStats(), } payload, _ := json.Marshal(result) select { case pending.ResultCh <- payload: default: } } func (g *Gateway) handleQueryError(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.LoadAndDelete(id) if !ok { return } pending := pendingVal.(*PendingRequest) select { case pending.ErrorCh <- errors.New(msg.Error): default: } } func (g *Gateway) handleTestResult(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.LoadAndDelete(id) if !ok { return } pending := pendingVal.(*PendingRequest) if msg.IsTestSuccess() { result := TestResult{Success: true, Version: msg.Version} payload, _ := json.Marshal(result) select { case pending.ResultCh <- payload: default: } } else { errMsg := msg.Error if errMsg == "" { errMsg = "Connection test failed" } select { case pending.ErrorCh <- errors.New(errMsg): default: } } } func (g *Gateway) handleHostInfo(connID string, msg *AgentMessage) { if connID == "" || len(msg.HostInfo) == 0 { return } var info database.HostInfo if err := json.Unmarshal(msg.HostInfo, &info); err != nil { slog.Warn("Failed to parse host info", "error", err) return } g.db.UpdateConnectionHostInfo(connID, info) slog.Debug("Host info received", "connection", connID, "hostname", info.Hostname) } func (g *Gateway) handleStreamStart(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.Load(id) if !ok { slog.Warn("Stream start for unknown request", "id", id) return } pending, ok := pendingVal.(*PendingStreamRequest) if !ok { return } select { case pending.MetaCh <- msg.Meta: default: } } func (g *Gateway) handleStreamChunk(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.Load(id) if !ok { return } pending, ok := pendingVal.(*PendingStreamRequest) if !ok { return } pending.ChunkCh <- msg.Data // backpressure: blocks if consumer is slow } func (g *Gateway) handleStreamEnd(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.LoadAndDelete(id) if !ok { return } pending, ok := pendingVal.(*PendingStreamRequest) if !ok { return } done := StreamDone{ Statistics: msg.GetStats(), TotalRows: msg.TotalRows, } payload, _ := json.Marshal(done) close(pending.ChunkCh) select { case pending.DoneCh <- payload: default: } } func (g *Gateway) handleStreamError(connID string, msg *AgentMessage) { id := msg.GetMessageID() if connID == "" || id == "" { return } val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) pendingVal, ok := t.Pending.LoadAndDelete(id) if !ok { return } pending, ok := pendingVal.(*PendingStreamRequest) if !ok { return } close(pending.ChunkCh) select { case pending.ErrorCh <- errors.New(msg.Error): default: } } func (g *Gateway) handleDisconnect(connID string, ws *websocket.Conn) { val, ok := g.tunnels.Load(connID) if !ok { return } t := val.(*ConnectedTunnel) if ws != nil && t.WS != ws { // A newer connection already replaced this one. return } if !g.tunnels.CompareAndDelete(connID, t) { return } // Reject all pending requests t.Pending.Range(func(key, value any) bool { switch p := value.(type) { case *PendingRequest: select { case p.ErrorCh <- errors.New("tunnel disconnected"): default: } case *PendingStreamRequest: close(p.ChunkCh) select { case p.ErrorCh <- errors.New("tunnel disconnected"): default: } } t.Pending.Delete(key) return true }) g.db.UpdateConnectionStatus(connID, "disconnected") slog.Info("Tunnel disconnected", "name", t.ConnectionName, "connection_id", connID) g.db.CreateAuditLog(database.AuditLogParams{ Action: "tunnel.disconnected", ConnectionID: strPtr(connID), }) } func strPtr(s string) *string { return &s } func (g *Gateway) sendJSON(conn *websocket.Conn, msg GatewayMessage) { data, _ := json.Marshal(msg) conn.WriteMessage(websocket.TextMessage, data) } // heartbeatLoop pings all connected agents every 30 seconds. func (g *Gateway) heartbeatLoop() { ticker := time.NewTicker(30 * time.Second) defer ticker.Stop() for { select { case <-g.stopCh: return case <-ticker.C: g.pingAll() } } } func (g *Gateway) pingAll() { now := time.Now() staleThreshold := 3 * time.Minute g.tunnels.Range(func(key, value any) bool { connID := key.(string) t := value.(*ConnectedTunnel) if now.Sub(t.LastSeen) > staleThreshold { slog.Warn("Tunnel connection stale, removing", "name", t.ConnectionName, "lastSeen", t.LastSeen) t.mu.Lock() t.WS.Close() t.mu.Unlock() g.handleDisconnect(connID, t.WS) return true } ping := GatewayMessage{Type: "ping"} data, _ := json.Marshal(ping) t.mu.Lock() err := t.WS.WriteMessage(websocket.TextMessage, data) t.mu.Unlock() if err != nil { slog.Warn("Ping failed", "name", t.ConnectionName, "error", err) g.handleDisconnect(connID, t.WS) } return true }) } ================================================ FILE: internal/tunnel/protocol.go ================================================ package tunnel import "encoding/json" // AgentMessage represents messages from the tunnel agent to the gateway. type AgentMessage struct { Type string `json:"type"` Token string `json:"token,omitempty"` // auth Takeover bool `json:"takeover,omitempty"` // auth (replace active session) ID string `json:"id,omitempty"` // legacy JS agent QueryID string `json:"query_id,omitempty"` // Go agent Data json.RawMessage `json:"data,omitempty"` // query_result, query_stream_chunk Meta json.RawMessage `json:"meta,omitempty"` // query_result, query_stream_start Stats json.RawMessage `json:"stats,omitempty"` // query_result (legacy) Statistics json.RawMessage `json:"statistics,omitempty"` // query_result (Go agent), query_stream_end Error string `json:"error,omitempty"` // query_error, query_stream_error Success *bool `json:"success,omitempty"` // test_result (legacy) Online *bool `json:"online,omitempty"` // test_result (Go agent) Version string `json:"version,omitempty"` // test_result HostInfo json.RawMessage `json:"host_info,omitempty"` // host_info Seq int `json:"seq,omitempty"` // query_stream_chunk sequence number TotalRows int64 `json:"total_rows,omitempty"` // query_stream_end total row count } // GetMessageID returns the message ID from either legacy or Go agent format. func (m *AgentMessage) GetMessageID() string { if m.QueryID != "" { return m.QueryID } return m.ID } // GetStats returns stats from either legacy or Go agent format. func (m *AgentMessage) GetStats() json.RawMessage { if len(m.Stats) > 0 { return m.Stats } return m.Statistics } // IsTestSuccess normalizes the test result between legacy and Go agent. func (m *AgentMessage) IsTestSuccess() bool { if m.Success != nil { return *m.Success } if m.Online != nil { return *m.Online } return false } // GatewayMessage represents messages from the gateway to the tunnel agent. type GatewayMessage struct { Type string `json:"type"` ConnectionID string `json:"connectionId,omitempty"` // auth_ok ConnectionName string `json:"connectionName,omitempty"` // auth_ok Message string `json:"message,omitempty"` // auth_error ID string `json:"id,omitempty"` // legacy JS agent QueryID string `json:"query_id,omitempty"` // Go agent SQL string `json:"sql,omitempty"` // query (legacy) Query string `json:"query,omitempty"` // query (Go agent) User string `json:"user,omitempty"` // query, test Password string `json:"password,omitempty"` // query, test Format string `json:"format,omitempty"` // query Settings map[string]string `json:"settings,omitempty"` // ClickHouse query settings (URL params) } // QueryResult represents a ClickHouse query result returned from the agent. type QueryResult struct { Data json.RawMessage `json:"data"` Meta json.RawMessage `json:"meta"` Stats json.RawMessage `json:"stats"` } // TestResult represents a connection test result returned from the agent. type TestResult struct { Success bool `json:"success"` Error string `json:"error,omitempty"` Version string `json:"version,omitempty"` } // StreamDone represents the final payload of a streaming query. type StreamDone struct { Statistics json.RawMessage `json:"statistics"` TotalRows int64 `json:"total_rows"` } // HostInfo represents machine info from the agent. type HostInfo struct { Hostname string `json:"hostname"` OS string `json:"os"` Arch string `json:"arch"` CPUCores int `json:"cpu_cores"` MemoryTotal int64 `json:"memory_total"` MemoryFree int64 `json:"memory_free"` DiskTotal int64 `json:"disk_total"` DiskFree int64 `json:"disk_free"` GoVersion string `json:"go_version"` AgentUptime float64 `json:"agent_uptime"` CollectedAt string `json:"collected_at"` } ================================================ FILE: internal/version/version.go ================================================ package version var ( Version = "dev" Commit = "none" BuildDate = "unknown" ) func Set(v, c, d string) { Version = v Commit = c BuildDate = d } ================================================ FILE: license/public.pem ================================================ -----BEGIN PUBLIC KEY----- MCowBQYDK2VwAyEA62CBTMWey4wS4Fknr/5Sfk7k1J7+4MYpBfxBPvKXRFg= -----END PUBLIC KEY----- ================================================ FILE: main.go ================================================ package main import ( "github.com/caioricciuti/ch-ui/cmd" "github.com/caioricciuti/ch-ui/internal/version" ) var ( Version = "dev" Commit = "none" BuildDate = "unknown" ) func main() { version.Set(Version, Commit, BuildDate) cmd.FrontendFS = frontendFS() cmd.Execute() } ================================================ FILE: ui/.gitignore ================================================ # Logs logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* pnpm-debug.log* lerna-debug.log* node_modules dist dist-ssr *.local # Editor directories and files .vscode/* !.vscode/extensions.json .idea .DS_Store *.suo *.ntvs* *.njsproj *.sln *.sw? ================================================ FILE: ui/README.md ================================================ # Svelte + TS + Vite This template should help get you started developing with Svelte and TypeScript in Vite. ## Recommended IDE Setup [VS Code](https://code.visualstudio.com/) + [Svelte](https://marketplace.visualstudio.com/items?itemName=svelte.svelte-vscode). ## Need an official Svelte framework? Check out [SvelteKit](https://github.com/sveltejs/kit#readme), which is also powered by Vite. Deploy anywhere with its serverless-first approach and adapt to various platforms, with out of the box support for TypeScript, SCSS, and Less, and easily-added support for mdsvex, GraphQL, PostCSS, Tailwind CSS, and more. ## Technical considerations **Why use this over SvelteKit?** - It brings its own routing solution which might not be preferable for some users. - It is first and foremost a framework that just happens to use Vite under the hood, not a Vite app. This template contains as little as possible to get started with Vite + TypeScript + Svelte, while taking into account the developer experience with regards to HMR and intellisense. It demonstrates capabilities on par with the other `create-vite` templates and is a good starting point for beginners dipping their toes into a Vite + Svelte project. Should you later need the extended capabilities and extensibility provided by SvelteKit, the template has been structured similarly to SvelteKit so that it is easy to migrate. **Why `global.d.ts` instead of `compilerOptions.types` inside `jsconfig.json` or `tsconfig.json`?** Setting `compilerOptions.types` shuts out all other types not explicitly listed in the configuration. Using triple-slash references keeps the default TypeScript setting of accepting type information from the entire workspace, while also adding `svelte` and `vite/client` type information. **Why include `.vscode/extensions.json`?** Other templates indirectly recommend extensions via the README, but this file allows VS Code to prompt the user to install the recommended extension upon opening the project. **Why enable `allowJs` in the TS template?** While `allowJs: false` would indeed prevent the use of `.js` files in the project, it does not prevent the use of JavaScript syntax in `.svelte` files. In addition, it would force `checkJs: false`, bringing the worst of both worlds: not being able to guarantee the entire codebase is TypeScript, and also having worse typechecking for the existing JavaScript. In addition, there are valid use cases in which a mixed codebase may be relevant. **Why is HMR not preserving my local component state?** HMR state preservation comes with a number of gotchas! It has been disabled by default in both `svelte-hmr` and `@sveltejs/vite-plugin-svelte` due to its often surprising behavior. You can read the details [here](https://github.com/rixo/svelte-hmr#svelte-hmr). If you have state that's important to retain within a component, consider creating an external store which would not be replaced by HMR. ```ts // store.ts // An extremely simple external store import { writable } from 'svelte/store' export default writable(0) ``` ================================================ FILE: ui/index.html ================================================ CH-UI
================================================ FILE: ui/package.json ================================================ { "name": "ui", "private": true, "version": "0.0.0", "type": "module", "scripts": { "dev": "vite", "build": "vite build", "preview": "vite preview", "check": "svelte-check --tsconfig ./tsconfig.app.json && tsc -p tsconfig.node.json", "test": "vitest run --config vitest.config.ts" }, "devDependencies": { "@sveltejs/vite-plugin-svelte": "^6.2.4", "@tsconfig/svelte": "^5.0.8", "@types/json-bigint": "^1.0.4", "@types/node": "^24.10.13", "json-bigint": "^1.0.0", "svelte": "^5.53.2", "svelte-check": "^4.4.3", "typescript": "~5.9.3", "vite": "^7.3.1", "vitest": "^2.1.9" }, "dependencies": { "@codemirror/autocomplete": "^6.20.0", "@codemirror/commands": "^6.10.2", "@codemirror/lang-sql": "^6.10.0", "@codemirror/language": "^6.12.1", "@codemirror/search": "^6.6.0", "@codemirror/state": "^6.5.4", "@codemirror/theme-one-dark": "^6.1.3", "@codemirror/view": "^6.39.15", "@tailwindcss/vite": "^4.2.0", "@xyflow/svelte": "^1.5.1", "lucide-svelte": "^0.563.0", "marked": "^15.0.7", "svelte-sonner": "^1.0.7", "tailwindcss": "^4.2.0", "uplot": "^1.6.32" }, "overrides": { "@codemirror/state": "$@codemirror/state", "@codemirror/view": "$@codemirror/view" } } ================================================ FILE: ui/src/App.svelte ================================================ {#if loading}
CH-UI

Loading CH-UI workspace...

{:else if !authenticated} {:else} {/if} ================================================ FILE: ui/src/app.css ================================================ @import "tailwindcss"; @custom-variant dark (&:where(.dark, .dark *)); @theme { --font-sans: "Inter", "SF Pro Text", -apple-system, BlinkMacSystemFont, sans-serif; --font-mono: "JetBrains Mono", "Fira Code", ui-monospace, monospace; /* Compatibility token still referenced in components */ --color-ch-blue: #f97316; --color-ch-orange: #f97316; --color-ch-green: #10b981; /* Neutral grayscale (no blue cast) */ --color-gray-50: #f7f7f8; --color-gray-100: #efeff1; --color-gray-200: #dfdfe2; --color-gray-300: #c8c9ce; --color-gray-400: #a2a5ad; --color-gray-500: #7a7e88; --color-gray-600: #5f646d; --color-gray-700: #494d55; --color-gray-800: #33363d; --color-gray-900: #1f2126; --color-gray-950: #121316; } @theme inline { --radius-sm: calc(var(--radius) - 4px); --radius-md: calc(var(--radius) - 2px); --radius-lg: var(--radius); --radius-xl: calc(var(--radius) + 4px); --color-background: var(--background); --color-foreground: var(--foreground); --color-card: var(--card); --color-card-foreground: var(--card-foreground); --color-popover: var(--popover); --color-popover-foreground: var(--popover-foreground); --color-primary: var(--primary); --color-primary-foreground: var(--primary-foreground); --color-secondary: var(--secondary); --color-secondary-foreground: var(--secondary-foreground); --color-muted: var(--muted); --color-muted-foreground: var(--muted-foreground); --color-accent: var(--accent); --color-accent-foreground: var(--accent-foreground); --color-destructive: var(--destructive); --color-border: var(--border); --color-input: var(--input); --color-ring: var(--ring); } :root { --radius: 0.425rem; --background: #f7f7f8; --foreground: #1b1d22; --card: #ffffff; --card-foreground: #1b1d22; --popover: #ffffff; --popover-foreground: #1b1d22; --primary: #f97316; --primary-foreground: #fffaf5; --secondary: #efeff1; --secondary-foreground: #2a2d33; --muted: #efeff1; --muted-foreground: #626773; --accent: #efeff1; --accent-foreground: #2a2d33; --destructive: #dc2626; --border: #d6d7dc; --input: #d6d7dc; --ring: #fb923c; } .dark { --background: #131418; --foreground: #f3f4f6; --card: #1b1d22; --card-foreground: #f3f4f6; --popover: #1b1d22; --popover-foreground: #f3f4f6; --primary: #fb923c; --primary-foreground: #1a1613; --secondary: #282b32; --secondary-foreground: #f3f4f6; --muted: #282b32; --muted-foreground: #a4a8b2; --accent: #282b32; --accent-foreground: #f3f4f6; --destructive: #ef4444; --border: rgba(255, 255, 255, 0.11); --input: rgba(255, 255, 255, 0.14); --ring: #fdba74; } html, body { overflow: hidden; } * { border-color: var(--border); outline-color: color-mix(in oklab, var(--ring), transparent 50%); scrollbar-width: thin; scrollbar-color: rgba(123, 123, 123, 0.45) transparent; } body { background: var(--background); color: var(--foreground); font-family: var(--font-sans); font-weight: 450; -webkit-font-smoothing: antialiased; text-rendering: optimizeLegibility; } h1, h2, h3 { letter-spacing: -0.012em; } code, pre, .font-mono { font-family: var(--font-mono); } .surface-card { background: color-mix(in oklab, var(--card), transparent 1%); border: 1px solid color-mix(in oklab, var(--border), transparent 8%); box-shadow: 0 8px 24px rgba(10, 10, 10, 0.18); } .dark .surface-card { box-shadow: 0 10px 28px rgba(0, 0, 0, 0.34); } .brand-pill { background: color-mix(in oklab, #f97316 16%, transparent); border: 1px solid color-mix(in oklab, #f97316 38%, transparent); color: #f97316; } .dark .brand-pill { background: color-mix(in oklab, #fb923c 18%, transparent); border-color: color-mix(in oklab, #fb923c 40%, transparent); color: #fdba74; } ::-webkit-scrollbar { width: 8px; height: 8px; } ::-webkit-scrollbar-track { background: transparent; } ::-webkit-scrollbar-thumb { background-color: rgba(123, 123, 123, 0.45); border-radius: 10px; } ::-webkit-scrollbar-thumb:hover { background-color: rgba(123, 123, 123, 0.62); } @layer components { .ds-page-header { @apply flex items-center justify-between px-4 py-3 border-b border-gray-200 dark:border-gray-800; } .ds-page-title { @apply text-xl font-semibold text-gray-900 dark:text-gray-100; } .ds-page-subtitle { @apply text-xs text-gray-500 dark:text-gray-400; } .ds-tabs { @apply flex gap-1 px-4 pt-2 border-b border-gray-200 dark:border-gray-800; } .ds-tab { @apply px-3.5 py-2.5 text-[13px] font-semibold rounded-t transition-colors text-gray-500 hover:text-gray-700 dark:hover:text-gray-300; } .ds-tab-active { @apply text-ch-blue border-b-2 border-ch-blue bg-orange-50 dark:bg-orange-950/20; } .ds-card { @apply rounded-lg border border-gray-200 dark:border-gray-800 bg-transparent; } .ds-panel { @apply rounded-lg border border-gray-200 dark:border-gray-800 bg-gray-50 dark:bg-gray-900; box-shadow: 0 8px 24px rgba(10, 10, 10, 0.18); } .dark .ds-panel { box-shadow: 0 10px 28px rgba(0, 0, 0, 0.34); } .ds-panel-muted { @apply rounded-lg border border-gray-200 dark:border-gray-800 bg-gray-100/70 dark:bg-gray-900/70; } .ds-stat-card { @apply rounded-lg border border-gray-200 dark:border-gray-800 bg-gray-50 dark:bg-gray-900 p-4; } .ds-form-label { @apply block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1; } .ds-input { @apply w-full rounded border border-gray-300 dark:border-gray-700 bg-transparent px-3 py-2 text-[14px] text-gray-700 dark:text-gray-300; @apply focus:outline-none focus:ring-2 focus:ring-ch-blue/40 focus:border-ch-blue; } .ds-input-sm { @apply w-full rounded border border-gray-300 dark:border-gray-700 bg-transparent px-2.5 py-1.5 text-[13px] text-gray-700 dark:text-gray-300; @apply focus:outline-none focus:ring-2 focus:ring-ch-blue/40 focus:border-ch-blue; } .ds-select { @apply rounded border border-gray-300 dark:border-gray-700 bg-transparent px-2.5 py-1.5 text-[13px] text-gray-700 dark:text-gray-300; @apply focus:outline-none focus:ring-2 focus:ring-ch-blue/40 focus:border-ch-blue; } .ds-textarea { @apply w-full rounded border border-gray-300 dark:border-gray-700 bg-transparent px-3 py-2 text-sm text-gray-700 dark:text-gray-300; @apply focus:outline-none focus:ring-2 focus:ring-ch-blue/40 focus:border-ch-blue; } .ds-btn-primary { @apply inline-flex items-center justify-center gap-1.5 rounded px-3 py-1.5 text-[13px] font-medium text-white bg-ch-blue border border-orange-500 transition-colors; @apply hover:bg-orange-600; } .ds-btn-ghost { @apply inline-flex items-center justify-center gap-1 rounded p-1.5 text-gray-500 transition-colors; @apply hover:text-ch-blue hover:bg-gray-200 dark:hover:bg-gray-800; } .ds-btn-outline { @apply inline-flex items-center justify-center gap-1 rounded border border-gray-300 dark:border-gray-700 px-2.5 py-1.5 text-[13px] font-medium text-gray-700 dark:text-gray-300 transition-colors; @apply hover:border-ch-blue hover:text-ch-blue; } .ds-icon-btn { @apply inline-flex items-center justify-center rounded p-1.5 text-gray-500 transition-colors; @apply hover:text-ch-blue hover:bg-gray-200 dark:hover:bg-gray-800; } .ds-segment { @apply inline-flex rounded-lg border border-gray-300/80 dark:border-gray-700/80 bg-gray-100/70 dark:bg-gray-900/65 p-1; } .ds-segment-btn { @apply px-2.5 h-7 rounded-md text-xs transition-colors text-gray-500 hover:text-gray-700 dark:hover:text-gray-300; } .ds-segment-btn-active { @apply bg-orange-100 dark:bg-orange-500/15 text-ch-orange; } .ds-table-wrap { @apply overflow-x-auto; } .ds-table { @apply w-full text-sm; } .ds-table-head-row { @apply border-b border-gray-200 dark:border-gray-800; } .ds-table-th { @apply text-left py-2 px-3 text-gray-500 font-medium; } .ds-table-th-right { @apply text-right py-2 px-3 text-gray-500 font-medium; } .ds-table-th-compact { @apply text-left py-1.5 px-2 text-[11px] text-gray-500 font-medium; } .ds-table-th-right-compact { @apply text-right py-1.5 px-2 text-[11px] text-gray-500 font-medium; } .ds-table-row { @apply border-b border-gray-100 dark:border-gray-900 hover:bg-gray-50 dark:hover:bg-gray-900/50 transition-colors; } .ds-table-row-static { @apply border-b border-gray-100 dark:border-gray-900; } .ds-td { @apply py-2 px-3 text-gray-500 dark:text-gray-400; } .ds-td-strong { @apply py-2 px-3 text-gray-800 dark:text-gray-200 font-medium; } .ds-td-right { @apply py-2 px-3 text-right text-gray-500 dark:text-gray-400; } .ds-td-mono { @apply py-2 px-3 text-xs text-gray-500 font-mono; } .ds-td-compact { @apply py-1.5 px-2 text-xs text-gray-500 dark:text-gray-400; } .ds-td-compact-strong { @apply py-1.5 px-2 text-xs text-gray-800 dark:text-gray-200 font-medium; } .ds-badge { @apply inline-flex items-center px-1.5 py-0.5 rounded text-[11px] font-medium; } .ds-badge-neutral { @apply bg-gray-200 dark:bg-gray-800 text-gray-700 dark:text-gray-300; } .ds-badge-success { @apply bg-green-100 dark:bg-green-900/30 text-green-700 dark:text-green-300; } .ds-badge-danger { @apply bg-red-100 dark:bg-red-900/30 text-red-700 dark:text-red-300; } .ds-badge-warn { @apply bg-yellow-100 dark:bg-yellow-900/30 text-yellow-800 dark:text-yellow-300; } .ds-badge-brand { @apply border border-orange-200 dark:border-orange-700/60 bg-orange-100 dark:bg-orange-500/15 text-orange-900 dark:text-orange-200; } .ds-empty { @apply text-center py-10 rounded-lg border border-gray-200 dark:border-gray-800 bg-gray-50 dark:bg-gray-900; } .ds-checkbox { @apply h-4 w-4 rounded border border-gray-300 dark:border-gray-700 bg-white dark:bg-gray-900 accent-ch-blue; @apply focus:outline-none focus:ring-2 focus:ring-ch-blue/35 focus:ring-offset-0; } .ds-checkbox-sm { @apply h-3.5 w-3.5; } .ds-checkbox-label { @apply inline-flex items-center gap-2 text-sm text-gray-700 dark:text-gray-300 select-none; } .ds-radio { @apply h-4 w-4 border border-gray-300 dark:border-gray-700 bg-white dark:bg-gray-900 accent-ch-blue; @apply focus:outline-none focus:ring-2 focus:ring-ch-blue/35 focus:ring-offset-0; } /* Brain chat markdown output styling */ .prose-brain h1 { @apply text-lg font-bold mt-4 mb-2; } .prose-brain h2 { @apply text-base font-bold mt-3 mb-1.5; } .prose-brain h3 { @apply text-sm font-semibold mt-3 mb-1; } .prose-brain h4, .prose-brain h5, .prose-brain h6 { @apply text-sm font-semibold mt-2 mb-1; } .prose-brain p { @apply my-1.5 leading-relaxed; } .prose-brain ul { @apply list-disc pl-5 my-1.5 space-y-0.5; } .prose-brain ol { @apply list-decimal pl-5 my-1.5 space-y-0.5; } .prose-brain li { @apply leading-relaxed; } .prose-brain a { @apply text-ch-blue hover:underline; } .prose-brain blockquote { @apply border-l-2 border-gray-300 dark:border-gray-600 pl-3 my-2 text-gray-600 dark:text-gray-400 italic; } .prose-brain pre { @apply bg-gray-100 dark:bg-gray-800 rounded-lg p-3 my-2 overflow-x-auto text-xs font-mono; } .prose-brain code { @apply bg-gray-200 dark:bg-gray-700 px-1 py-0.5 rounded text-xs font-mono; } .prose-brain pre code { @apply bg-transparent p-0; } .prose-brain table { @apply min-w-full text-xs my-2 border border-gray-200 dark:border-gray-700 rounded; } .prose-brain th { @apply px-2 py-1 text-left bg-gray-100 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 font-semibold; } .prose-brain td { @apply px-2 py-1 border-b border-gray-200/70 dark:border-gray-700/70; } .prose-brain hr { @apply my-3 border-gray-200 dark:border-gray-700; } .prose-brain strong { @apply font-semibold; } .prose-brain em { @apply italic; } } ================================================ FILE: ui/src/lib/api/alerts.ts ================================================ import { apiDel, apiGet, apiPost, apiPut } from './client' import type { AlertChannel, AlertEvent, AlertRule } from '../types/alerts' const BASE = '/api/governance/alerts' export type AlertRuleRoutePayload = { channel_id: string recipients: string[] is_active: boolean delivery_mode?: 'immediate' | 'digest' digest_window_minutes?: number escalation_channel_id?: string escalation_recipients?: string[] escalation_after_failures?: number } export async function adminListAlertChannels(): Promise { const res = await apiGet<{ channels: AlertChannel[] }>(`${BASE}/channels`) return res.channels ?? [] } export async function adminCreateAlertChannel(payload: { name: string channel_type: 'smtp' | 'resend' | 'brevo' is_active: boolean config: Record }): Promise { await apiPost(`${BASE}/channels`, payload) } export async function adminUpdateAlertChannel(id: string, payload: { name?: string channel_type?: 'smtp' | 'resend' | 'brevo' is_active?: boolean config?: Record }): Promise { await apiPut(`${BASE}/channels/${encodeURIComponent(id)}`, payload) } export async function adminDeleteAlertChannel(id: string): Promise { await apiDel(`${BASE}/channels/${encodeURIComponent(id)}`) } export async function adminTestAlertChannel(id: string, payload: { recipients: string[] subject?: string message?: string }): Promise<{ provider_message_id?: string }> { return apiPost<{ provider_message_id?: string }>(`${BASE}/channels/${encodeURIComponent(id)}/test`, payload) } export async function adminListAlertRules(): Promise { const res = await apiGet<{ rules: AlertRule[] }>(`${BASE}/rules`) return res.rules ?? [] } export async function adminCreateAlertRule(payload: { name: string event_type: string severity_min: string enabled: boolean cooldown_seconds: number max_attempts: number subject_template?: string body_template?: string routes: AlertRuleRoutePayload[] }): Promise { await apiPost(`${BASE}/rules`, payload) } export async function adminUpdateAlertRule(id: string, payload: { name?: string event_type?: string severity_min?: string enabled?: boolean cooldown_seconds?: number max_attempts?: number subject_template?: string body_template?: string routes?: AlertRuleRoutePayload[] }): Promise { await apiPut(`${BASE}/rules/${encodeURIComponent(id)}`, payload) } export async function adminDeleteAlertRule(id: string): Promise { await apiDel(`${BASE}/rules/${encodeURIComponent(id)}`) } export async function adminListAlertEvents(params: { limit?: number; eventType?: string; status?: string } = {}): Promise { const q = new URLSearchParams() if (params.limit) q.set('limit', String(params.limit)) if (params.eventType) q.set('event_type', params.eventType) if (params.status) q.set('status', params.status) const url = `${BASE}/events${q.toString() ? `?${q.toString()}` : ''}` const res = await apiGet<{ events: AlertEvent[] }>(url) return res.events ?? [] } ================================================ FILE: ui/src/lib/api/auth.ts ================================================ import { apiGet, apiPost } from './client' import type { Session, Connection } from '../types/api' interface LoginParams { connectionId: string username: string password: string } interface LoginResponse { success: boolean session: Session } interface ConnectionsResponse { success: boolean connections: Connection[] } interface SessionResponse { success: boolean session: Session } /** Log in to a ClickHouse connection */ export function login(params: LoginParams): Promise { return apiPost('/api/auth/login', params) } /** Log out and destroy the session */ export function logout(): Promise { return apiPost('/api/auth/logout') } /** Check if a valid session exists */ export async function checkSession(): Promise { try { const res = await apiGet<{ authenticated: boolean; session?: Session }>('/api/auth/session') if (!res.authenticated) return null return res.session ?? null } catch { return null } } /** List all connections (with online/offline status) */ export async function listConnections(): Promise { const res = await apiGet('/api/auth/connections') return res.connections ?? [] } ================================================ FILE: ui/src/lib/api/brain.ts ================================================ import { withBase } from '../basePath' import { apiDel, apiGet, apiPost, apiPut } from './client' import type { BrainArtifact, BrainChat, BrainMessage, BrainModelOption, BrainProviderAdmin, BrainSkill, } from '../types/brain' export async function listBrainModels(): Promise { const res = await apiGet<{ success: boolean; models: BrainModelOption[] }>('/api/brain/models') return res.models ?? [] } export async function listBrainChats(includeArchived = false): Promise { const res = await apiGet<{ success: boolean; chats: BrainChat[] }>(`/api/brain/chats?includeArchived=${includeArchived}`) return res.chats ?? [] } export async function createBrainChat(payload: { title?: string; modelId?: string }): Promise { const res = await apiPost<{ success: boolean; chat: BrainChat }>('/api/brain/chats', payload) return res.chat } export async function updateBrainChat(chatId: string, payload: { title?: string; archived?: boolean; modelId?: string; contextDatabase?: string; contextTable?: string; contextTables?: string }): Promise { await apiPut(`/api/brain/chats/${encodeURIComponent(chatId)}`, payload) } export async function deleteBrainChat(chatId: string): Promise { await apiDel(`/api/brain/chats/${encodeURIComponent(chatId)}`) } export async function listBrainMessages(chatId: string): Promise { const res = await apiGet<{ success: boolean; messages: BrainMessage[] }>(`/api/brain/chats/${encodeURIComponent(chatId)}/messages`) return res.messages ?? [] } export async function listBrainArtifacts(chatId: string): Promise { const res = await apiGet<{ success: boolean; artifacts: BrainArtifact[] }>(`/api/brain/chats/${encodeURIComponent(chatId)}/artifacts`) return res.artifacts ?? [] } export async function runBrainQueryArtifact(chatId: string, payload: { query: string; title?: string; messageId?: string; timeout?: number }): Promise { return apiPost(`/api/brain/chats/${encodeURIComponent(chatId)}/artifacts/query`, payload) } export interface StreamEvent { type: 'delta' | 'done' | 'error' delta?: string error?: string messageId?: string chatId?: string } export async function streamBrainMessage( chatId: string, payload: { content: string; modelId?: string; schemaContext?: any; schemaContexts?: any[] }, onEvent: (event: StreamEvent) => void, ): Promise { const response = await fetch(withBase(`/api/brain/chats/${encodeURIComponent(chatId)}/messages/stream`), { method: 'POST', credentials: 'include', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload), }) if (!response.ok) { const body = await response.json().catch(() => ({})) throw new Error(body.error ?? `Request failed (${response.status})`) } const reader = response.body?.getReader() if (!reader) throw new Error('No response body') const decoder = new TextDecoder() let buffer = '' while (true) { const { done, value } = await reader.read() if (done) break buffer += decoder.decode(value, { stream: true }) const lines = buffer.split('\n') buffer = lines.pop() ?? '' for (const line of lines) { if (!line.startsWith('data: ')) continue const raw = line.slice(6) if (!raw) continue try { const parsed = JSON.parse(raw) as StreamEvent onEvent(parsed) } catch { // ignore malformed chunks } } } } // -------- Admin endpoints -------- export async function adminListBrainProviders(): Promise { const res = await apiGet<{ success: boolean; providers: BrainProviderAdmin[] }>('/api/admin/brain/providers') return res.providers ?? [] } export async function adminCreateBrainProvider(payload: { name: string kind: string baseUrl?: string apiKey?: string isActive?: boolean isDefault?: boolean }): Promise { await apiPost('/api/admin/brain/providers', payload) } export async function adminUpdateBrainProvider(id: string, payload: { name?: string kind?: string baseUrl?: string apiKey?: string isActive?: boolean isDefault?: boolean }): Promise { await apiPut(`/api/admin/brain/providers/${encodeURIComponent(id)}`, payload) } export async function adminDeleteBrainProvider(id: string): Promise { await apiDel(`/api/admin/brain/providers/${encodeURIComponent(id)}`) } export async function adminSyncBrainProviderModels(id: string): Promise { await apiPost(`/api/admin/brain/providers/${encodeURIComponent(id)}/sync-models`) } export async function adminListBrainModels(): Promise { const res = await apiGet<{ success: boolean; models: BrainModelOption[] }>('/api/admin/brain/models') return res.models ?? [] } export async function adminUpdateBrainModel(id: string, payload: { displayName?: string isActive?: boolean isDefault?: boolean }): Promise { await apiPut(`/api/admin/brain/models/${encodeURIComponent(id)}`, payload) } export async function adminBulkUpdateBrainModels(payload: { providerId: string action: 'deactivate_all' | 'activate_all' | 'activate_recommended' }): Promise { await apiPost('/api/admin/brain/models/bulk', payload) } export async function adminListBrainSkills(): Promise { const res = await apiGet<{ success: boolean; skills: BrainSkill[] }>('/api/admin/brain/skills') return res.skills ?? [] } export async function adminCreateBrainSkill(payload: { name: string content: string isActive?: boolean isDefault?: boolean }): Promise { await apiPost('/api/admin/brain/skills', payload) } export async function adminUpdateBrainSkill(id: string, payload: { name?: string content?: string isActive?: boolean isDefault?: boolean }): Promise { await apiPut(`/api/admin/brain/skills/${encodeURIComponent(id)}`, payload) } ================================================ FILE: ui/src/lib/api/client.ts ================================================ import { withBase } from '../basePath' import { safeParse } from '../utils/safe-json' /** Base fetch wrapper with credentials and error handling */ async function parseResponseBody(res: Response): Promise { const contentType = (res.headers.get('content-type') || '').toLowerCase() if (contentType.includes('application/json')) { try { const text = await res.text() return safeParse(text) } catch { return null } } const text = await res.text().catch(() => '') if (!text) return null return { message: text } } function buildErrorMessage(status: number, body: any): string { const raw = body && typeof body === 'object' ? String(body.message || body.error || '').trim() : '' if (status === 429) { const retryAfter = body && typeof body === 'object' ? Number(body.retryAfter || body.retry_after || 0) : 0 if (raw && retryAfter > 0) return `${raw} (retry in ${retryAfter}s)` if (raw) return raw return 'Too many requests' } if (raw) return raw return `Request failed (${status})` } async function request( url: string, options: RequestInit = {}, ): Promise { const isFormDataBody = typeof FormData !== 'undefined' && options.body instanceof FormData const res = await fetch(withBase(url), { credentials: 'include', headers: { ...(isFormDataBody ? {} : { 'Content-Type': 'application/json' }), ...options.headers, }, ...options, }) const body = await parseResponseBody(res) const isAuthEndpoint = url.startsWith('/api/auth/') if (res.status === 401 && !isAuthEndpoint) { // Session expired — redirect to login window.location.href = withBase('/login') throw new Error('Session expired') } if (res.status === 402) { throw new Error(buildErrorMessage(res.status, body) || 'Pro license required') } if (!res.ok || (body && body.success === false)) { throw new Error(buildErrorMessage(res.status, body)) } return body as T } export function apiGet(url: string): Promise { return request(url) } export function apiPost(url: string, data?: unknown): Promise { return request(url, { method: 'POST', body: data != null ? JSON.stringify(data) : undefined, }) } export function apiPostForm(url: string, data: FormData): Promise { return request(url, { method: 'POST', body: data, }) } export function apiPut(url: string, data?: unknown): Promise { return request(url, { method: 'PUT', body: data != null ? JSON.stringify(data) : undefined, }) } export function apiDel(url: string): Promise { return request(url, { method: 'DELETE' }) } ================================================ FILE: ui/src/lib/api/governance.ts ================================================ import { apiGet, apiPost, apiPut, apiDel } from './client' import type { GovernanceOverview, GovernanceSettings, SyncResult, SyncState, GovDatabase, GovTable, GovColumn, SchemaChange, QueryLogEntry, TopQuery, LineageGraph, TagEntry, ChUser, ChRole, AccessMatrixEntry, OverPermission, Policy, PolicyViolation, GovernanceObjectComment, GovernanceIncident, GovernanceIncidentComment, } from '../types/governance' const BASE = '/api/governance' // ── Overview / Sync ───────────────────────────────────────────── export function fetchOverview() { return apiGet<{ overview?: GovernanceOverview } | GovernanceOverview>(`${BASE}/overview`) .then((res: any) => res?.overview ?? res) } export function triggerSync() { return apiPost(`${BASE}/sync`) } export function triggerSingleSync(type: 'metadata' | 'query_log' | 'access') { return apiPost(`${BASE}/sync/${type}`) } export function fetchSyncStatus() { return apiGet<{ sync_states: SyncState[] }>(`${BASE}/sync/status`) } // ── Settings (admin) ──────────────────────────────────────────── export function fetchGovernanceSettings() { return apiGet('/api/admin/governance/settings') } export function updateGovernanceSettings(payload: { sync_enabled?: boolean banner_dismissed?: boolean }) { return apiPut('/api/admin/governance/settings', payload) } // ── Metadata ──────────────────────────────────────────────────── export function fetchDatabases() { return apiGet<{ databases: GovDatabase[] }>(`${BASE}/databases`) } export function fetchTables(params?: { database?: string; tag?: string; search?: string }) { const qs = new URLSearchParams() if (params?.database) qs.set('database', params.database) if (params?.tag) qs.set('tag', params.tag) if (params?.search) qs.set('search', params.search) const q = qs.toString() return apiGet<{ tables: GovTable[] }>(`${BASE}/tables${q ? '?' + q : ''}`) } export function fetchTableDetail(database: string, table: string) { return apiGet<{ table: GovTable columns: GovColumn[] tags: TagEntry[] recent_queries?: QueryLogEntry[] queries?: QueryLogEntry[] incoming: any[] outgoing: any[] }>(`${BASE}/tables/${encodeURIComponent(database)}/${encodeURIComponent(table)}`) .then((res: any) => ({ ...res, recent_queries: res?.recent_queries ?? res?.queries ?? [], })) } export function fetchTableNotes(database: string, table: string) { return apiGet<{ notes: GovernanceObjectComment[] }>(`${BASE}/tables/${encodeURIComponent(database)}/${encodeURIComponent(table)}/notes`) } export function fetchColumnNotes(database: string, table: string, column: string) { return apiGet<{ notes: GovernanceObjectComment[] }>(`${BASE}/tables/${encodeURIComponent(database)}/${encodeURIComponent(table)}/columns/${encodeURIComponent(column)}/notes`) } export function createTableNote(database: string, table: string, commentText: string) { return apiPost<{ id: string }>(`${BASE}/tables/${encodeURIComponent(database)}/${encodeURIComponent(table)}/notes`, { comment_text: commentText }) } export function createColumnNote(database: string, table: string, column: string, commentText: string) { return apiPost<{ id: string }>(`${BASE}/tables/${encodeURIComponent(database)}/${encodeURIComponent(table)}/columns/${encodeURIComponent(column)}/notes`, { comment_text: commentText }) } export function deleteObjectNote(id: string) { return apiDel(`${BASE}/notes/${encodeURIComponent(id)}`) } export function fetchSchemaChanges(limit = 50) { return apiGet<{ changes: SchemaChange[] }>(`${BASE}/schema-changes?limit=${limit}`) } // ── Query Log ─────────────────────────────────────────────────── export function fetchQueryLog(params?: { user?: string; table?: string; limit?: number; offset?: number }) { const qs = new URLSearchParams() if (params?.user) qs.set('user', params.user) if (params?.table) qs.set('table', params.table) if (params?.limit) qs.set('limit', String(params.limit)) if (params?.offset) qs.set('offset', String(params.offset)) const q = qs.toString() return apiGet<{ entries: QueryLogEntry[]; total: number }>(`${BASE}/query-log${q ? '?' + q : ''}`) } export function fetchTopQueries(limit = 20) { return apiGet<{ queries?: TopQuery[]; top_queries?: any[] }>(`${BASE}/query-log/top?limit=${limit}`) .then((res: any) => { const normalized = (res?.queries ?? res?.top_queries ?? []).map((q: any) => ({ normalized_hash: q?.normalized_hash ?? '', count: Number(q?.count ?? q?.execution_count ?? 0), avg_duration_ms: Number(q?.avg_duration_ms ?? q?.avg_duration ?? 0), total_read_rows: Number(q?.total_read_rows ?? 0), sample_query: q?.sample_query ?? q?.normalized_query ?? '', last_seen: q?.last_seen ?? '', })) as TopQuery[] return { queries: normalized } }) } // ── Lineage ───────────────────────────────────────────────────── export function fetchLineage(database: string, table: string) { return apiGet<{ graph?: LineageGraph } | LineageGraph>(`${BASE}/lineage?database=${encodeURIComponent(database)}&table=${encodeURIComponent(table)}`) .then((res: any) => res?.graph ?? res) } export function fetchLineageGraph(includeColumns = false) { const qs = includeColumns ? '?include_columns=true' : '' return apiGet<{ graph?: LineageGraph } | LineageGraph>(`${BASE}/lineage/graph${qs}`) .then((res: any) => res?.graph ?? res) } export function fetchViewGraph() { return apiGet<{ graph?: LineageGraph } | LineageGraph>(`${BASE}/view-graph`) .then((res: any) => res?.graph ?? res) } export function fetchLineageWithColumns(database: string, table: string) { return apiGet<{ graph?: LineageGraph } | LineageGraph>( `${BASE}/lineage?database=${encodeURIComponent(database)}&table=${encodeURIComponent(table)}&include_columns=true` ).then((res: any) => res?.graph ?? res) } export function fetchQueryByQueryID(queryId: string) { return apiGet<{ entry: QueryLogEntry }>(`${BASE}/query-log/${encodeURIComponent(queryId)}`) } // ── Tags ──────────────────────────────────────────────────────── export function fetchTags(params?: { database?: string; table?: string }) { const qs = new URLSearchParams() if (params?.database) qs.set('database', params.database) if (params?.table) qs.set('table', params.table) const q = qs.toString() return apiGet<{ tags: TagEntry[] }>(`${BASE}/tags${q ? '?' + q : ''}`) } export function createTag(data: { object_type: string; database_name: string; table_name: string; column_name?: string; tag: string }) { return apiPost<{ id: string }>(`${BASE}/tags`, data) } export function deleteTag(id: string) { return apiDel(`${BASE}/tags/${id}`) } // ── Access ────────────────────────────────────────────────────── export function fetchAccessUsers() { return apiGet<{ users: ChUser[] }>(`${BASE}/access/users`) } export function fetchAccessRoles() { return apiGet<{ roles: ChRole[] }>(`${BASE}/access/roles`) } export function fetchAccessMatrix(user?: string) { const q = user ? `?user=${encodeURIComponent(user)}` : '' return apiGet<{ matrix: AccessMatrixEntry[] }>(`${BASE}/access/matrix${q}`) } export function fetchOverPermissions(days = 30) { return apiGet<{ over_permissions: OverPermission[] }>(`${BASE}/access/over-permissions?days=${days}`) } // ── Policies ──────────────────────────────────────────────────── export function fetchPolicies() { return apiGet<{ policies: Policy[] }>(`${BASE}/policies`) } export function createPolicy(data: Partial) { return apiPost<{ id: string }>(`${BASE}/policies`, data) } export function getPolicy(id: string) { return apiGet(`${BASE}/policies/${id}`) } export function updatePolicy(id: string, data: Partial) { return apiPut(`${BASE}/policies/${id}`, data) } export function deletePolicy(id: string) { return apiDel(`${BASE}/policies/${id}`) } // ── Violations ────────────────────────────────────────────────── export function fetchViolations(params?: { policy_id?: string; limit?: number }) { const qs = new URLSearchParams() if (params?.policy_id) qs.set('policy_id', params.policy_id) if (params?.limit) qs.set('limit', String(params.limit)) const q = qs.toString() return apiGet<{ violations: PolicyViolation[] }>(`${BASE}/violations${q ? '?' + q : ''}`) } export function promoteViolationToIncident(id: string) { return apiPost<{ incident_id: string; created: boolean }>(`${BASE}/violations/${encodeURIComponent(id)}/incident`) } export function fetchIncidents(params?: { status?: string; severity?: string; limit?: number }) { const qs = new URLSearchParams() if (params?.status) qs.set('status', params.status) if (params?.severity) qs.set('severity', params.severity) if (params?.limit) qs.set('limit', String(params.limit)) const q = qs.toString() return apiGet<{ incidents: GovernanceIncident[] }>(`${BASE}/incidents${q ? '?' + q : ''}`) } export function getIncident(id: string) { return apiGet<{ incident: GovernanceIncident }>(`${BASE}/incidents/${encodeURIComponent(id)}`) } export function createIncident(data: { source_type?: string source_ref?: string title: string severity?: string status?: string assignee?: string details?: string }) { return apiPost<{ id: string }>(`${BASE}/incidents`, data) } export function updateIncident(id: string, data: { title?: string severity?: string status?: string assignee?: string details?: string resolution_note?: string }) { return apiPut(`${BASE}/incidents/${encodeURIComponent(id)}`, data) } export function fetchIncidentComments(id: string) { return apiGet<{ comments: GovernanceIncidentComment[] }>(`${BASE}/incidents/${encodeURIComponent(id)}/comments`) } export function createIncidentComment(id: string, commentText: string) { return apiPost<{ id: string }>(`${BASE}/incidents/${encodeURIComponent(id)}/comments`, { comment_text: commentText }) } ================================================ FILE: ui/src/lib/api/models.ts ================================================ import { apiGet, apiPost, apiPut, apiDel } from './client' import type { Model, ModelRun, ModelRunResult, ModelDAG, ValidationResult, ModelSchedule, Pipeline } from '../types/models' const BASE = '/api/models' export function listModels() { return apiGet<{ models: Model[] }>(BASE) } export function createModel(data: { name: string description?: string target_database: string materialization: string sql_body: string table_engine?: string order_by?: string }) { return apiPost<{ model: Model }>(BASE, data) } export function getModel(id: string) { return apiGet<{ model: Model }>(`${BASE}/${id}`) } export function updateModel(id: string, data: Partial>) { return apiPut<{ model: Model }>(`${BASE}/${id}`, data) } export function deleteModel(id: string) { return apiDel(`${BASE}/${id}`) } export function getDAG() { return apiGet(`${BASE}/dag`) } export function validateModels() { return apiGet(`${BASE}/validate`) } export function runAllModels() { return apiPost<{ run_id: string }>(`${BASE}/run`) } export function runSingleModel(id: string) { return apiPost<{ run_id: string }>(`${BASE}/${id}/run`) } export function listModelRuns(limit = 20, offset = 0) { return apiGet<{ runs: ModelRun[] }>(`${BASE}/runs?limit=${limit}&offset=${offset}`) } export function getModelRun(runId: string) { return apiGet<{ run: ModelRun; results: ModelRunResult[] }>(`${BASE}/runs/${runId}`) } export function listPipelines() { return apiGet<{ pipelines: Pipeline[] }>(`${BASE}/pipelines`) } export function runPipeline(anchorId: string) { return apiPost<{ run_id: string }>(`${BASE}/pipelines/${anchorId}/run`) } export function getPipelineSchedule(anchorId: string) { return apiGet<{ schedule: ModelSchedule | null }>(`${BASE}/schedule/${anchorId}`) } export function upsertPipelineSchedule(anchorId: string, data: { cron: string; enabled: boolean }) { return apiPut<{ schedule: ModelSchedule }>(`${BASE}/schedule/${anchorId}`, data) } export function deletePipelineSchedule(anchorId: string) { return apiDel(`${BASE}/schedule/${anchorId}`) } ================================================ FILE: ui/src/lib/api/pipelines.ts ================================================ import { apiGet, apiPost, apiPut, apiDel } from './client' import type { Pipeline, PipelineGraph, PipelineRun, PipelineRunLog } from '../types/pipelines' const BASE = '/api/pipelines' export function listPipelines() { return apiGet<{ pipelines: Pipeline[] }>(BASE) } export function createPipeline(data: { name: string; description?: string; connection_id?: string }) { return apiPost<{ pipeline: Pipeline }>(BASE, data) } export function getPipeline(id: string) { return apiGet<{ pipeline: Pipeline; graph: PipelineGraph }>(`${BASE}/${id}`) } export function updatePipeline(id: string, data: { name: string; description?: string }) { return apiPut<{ pipeline: Pipeline }>(`${BASE}/${id}`, data) } export function deletePipeline(id: string) { return apiDel(`${BASE}/${id}`) } export function saveGraph(id: string, graph: { nodes: { id: string; node_type: string; label: string; position_x: number; position_y: number; config: Record }[] edges: { id: string; source_node_id: string; target_node_id: string; source_handle?: string; target_handle?: string }[] viewport?: { x: number; y: number; zoom: number } }) { return apiPut<{ success: string }>(`${BASE}/${id}/graph`, graph) } export function startPipeline(id: string) { return apiPost<{ success: string }>(`${BASE}/${id}/start`) } export function stopPipeline(id: string) { return apiPost<{ success: string }>(`${BASE}/${id}/stop`) } export function getPipelineStatus(id: string) { return apiGet<{ pipeline_id: string status: string last_error: string | null rows_ingested?: number bytes_ingested?: number batches_sent?: number errors_count?: number }>(`${BASE}/${id}/status`) } export function listRuns(id: string, limit = 20, offset = 0) { return apiGet<{ runs: PipelineRun[] }>(`${BASE}/${id}/runs?limit=${limit}&offset=${offset}`) } export function getRunLogs(id: string, runId: string, limit = 200) { return apiGet<{ logs: PipelineRunLog[] }>(`${BASE}/${id}/runs/${runId}/logs?limit=${limit}`) } ================================================ FILE: ui/src/lib/api/query.ts ================================================ import { apiGet, apiPost } from './client' import type { LegacyQueryResult, ExplorerDataResponse, QueryPlanResult, QueryProfileResult, QueryEstimateResult, SampleQueryResult, } from '../types/query' import type { Column } from '../types/schema' interface RunQueryParams { query: string timeout?: number } function escapeLiteral(value: string): string { // Reject null bytes which can truncate strings in some SQL engines if (value.includes('\0')) throw new Error('Invalid character in identifier') return value.replace(/\\/g, '\\\\').replace(/'/g, "\\'") } function escapeIdentifier(value: string): string { if (value.includes('\0')) throw new Error('Invalid character in identifier') return '`' + value.replace(/`/g, '``') + '`' } /** Execute a query (legacy JSON format) */ export function runQuery(params: RunQueryParams): Promise { return apiPost('/api/query/run', params) } /** Format a SQL query */ export async function formatSQL(query: string): Promise { const res = await apiPost<{ formatted: string }>('/api/query/format', { query }) return res.formatted } /** Get EXPLAIN output for a query */ export function explainQuery(query: string): Promise { return apiPost('/api/query/explain', { query }) } /** Get parsed query plan (tree + raw lines) */ export function fetchQueryPlan(query: string): Promise { return apiPost('/api/query/plan', { query }) } /** Get inline profiling row from system.query_log for a query */ export function fetchQueryProfile(query: string): Promise { return apiPost('/api/query/profile', { query }) } /** Get query cost estimate via EXPLAIN ESTIMATE */ export function estimateQuery(query: string): Promise { return apiPost('/api/query/estimate', { query }) } /** Execute sampling query: first N rows per shard with fallback to global sample */ export function runSampleQuery(params: { query: string per_shard?: number shard_by?: string timeout?: number }): Promise { return apiPost('/api/query/sample', { query: params.query, per_shard: params.per_shard ?? 25, shard_by: params.shard_by ?? '_shard_num', timeout: params.timeout ?? 45, }) } /** Fetch paginated explorer data (JSONCompact format) */ export function fetchExplorerData(params: { database: string table: string page?: number page_size?: number sort_column?: string sort_dir?: string }): Promise { return apiPost('/api/query/explorer-data', { database: params.database, table: params.table, page: params.page ?? 0, page_size: params.page_size ?? 100, sort_column: params.sort_column ?? '', sort_dir: params.sort_dir ?? 'asc', }) } /** List databases */ export async function listDatabases(): Promise { const res = await apiGet<{ databases: string[] }>('/api/query/databases') return res.databases ?? [] } /** Fetch autocomplete data (functions + keywords) */ export async function fetchCompletions(): Promise<{ functions: string[]; keywords: string[] }> { const res = await apiGet<{ functions: string[]; keywords: string[] }>('/api/query/completions') return { functions: res.functions ?? [], keywords: res.keywords ?? [] } } /** List tables in a database */ export async function listTables(database: string): Promise { const res = await apiGet<{ tables: Array<{ name: string; engine: string }> }>(`/api/query/tables?database=${encodeURIComponent(database)}`) return (res.tables ?? []).map(t => t.name) } /** List columns for a table */ export async function listColumns(database: string, table: string): Promise { const res = await apiGet<{ columns: Column[] }>( `/api/query/columns?database=${encodeURIComponent(database)}&table=${encodeURIComponent(table)}`, ) return res.columns ?? [] } /** Fetch table metadata from system.tables */ export async function fetchTableInfo(database: string, table: string): Promise> { const db = escapeLiteral(database) const tbl = escapeLiteral(table) const query = `SELECT database, name, engine, total_rows, total_bytes, lifetime_rows, lifetime_bytes, metadata_modification_time, create_table_query, partition_key, sorting_key, primary_key, sampling_key FROM system.tables WHERE database = '${db}' AND name = '${tbl}'` const res = await runQuery({ query }) if (res.data?.length > 0) { const row = res.data[0] if (Array.isArray(row)) { const obj: Record = {} res.meta.forEach((col: any, i: number) => { obj[col.name] = row[i] }) return obj } return row as Record } return {} } /** Fetch table schema via DESCRIBE */ export async function fetchTableSchema(database: string, table: string): Promise { return runQuery({ query: `DESCRIBE TABLE ${escapeIdentifier(database)}.${escapeIdentifier(table)}` }) } /** Fetch database metadata and aggregate stats */ export async function fetchDatabaseInfo(database: string): Promise> { const db = escapeLiteral(database) const query = `SELECT d.name, d.engine, d.data_path, d.metadata_path, count(t.name) AS table_count, sumOrNull(t.total_rows) AS total_rows, sumOrNull(t.total_bytes) AS total_bytes, maxOrNull(t.metadata_modification_time) AS last_modified FROM system.databases d LEFT JOIN system.tables t ON t.database = d.name WHERE d.name = '${db}' GROUP BY d.name, d.engine, d.data_path, d.metadata_path` const res = await runQuery({ query }) if (res.data?.length > 0) { const row = res.data[0] if (Array.isArray(row)) { const obj: Record = {} res.meta.forEach((col: any, i: number) => { obj[col.name] = row[i] }) return obj } return row as Record } return {} } /** Fetch tables list and table-level stats for a database */ export async function fetchDatabaseTables(database: string): Promise { const db = escapeLiteral(database) const query = `SELECT name, engine, total_rows, total_bytes, metadata_modification_time FROM system.tables WHERE database = '${db}' ORDER BY name` return runQuery({ query }) } ================================================ FILE: ui/src/lib/api/stream.ts ================================================ import { withBase } from '../basePath' import type { ColumnMeta, QueryStats, StreamMessage } from '../types/query' import { safeParse } from '../utils/safe-json' /** Execute a streaming query via NDJSON. Calls the provided callbacks as data arrives. */ export async function executeStreamQuery( sql: string, maxResultRows: number, onMeta: (meta: ColumnMeta[]) => void, onChunk: (rows: unknown[][], seq: number) => void, onDone: (stats: QueryStats | undefined, totalRows: number) => void, onError: (error: string) => void, signal?: AbortSignal, ): Promise { const res = await fetch(withBase('/api/query/stream'), { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ query: sql, maxResultRows }), credentials: 'include', signal, }) if (!res.ok) { const body = await res.json().catch(() => ({ error: `HTTP ${res.status}` })) onError(body.error || `Request failed (${res.status})`) return } const reader = res.body!.getReader() const decoder = new TextDecoder() let buf = '' while (true) { const { done, value } = await reader.read() if (done) break buf += decoder.decode(value, { stream: true }) const lines = buf.split('\n') buf = lines.pop()! for (const line of lines) { if (!line.trim()) continue try { const msg: StreamMessage = safeParse(line) switch (msg.type) { case 'meta': onMeta(msg.meta) break case 'chunk': onChunk(msg.data, msg.seq) break case 'done': onDone(msg.statistics, msg.total_rows) break case 'error': onError(msg.error) break } } catch { // Skip malformed lines } } } // Process remaining buffer if (buf.trim()) { try { const msg: StreamMessage = safeParse(buf) if (msg.type === 'done') onDone(msg.statistics, msg.total_rows) else if (msg.type === 'error') onError(msg.error) } catch { // ignore } } } ================================================ FILE: ui/src/lib/basePath.ts ================================================ /** * Base path utility for subpath deployments. * * Supports deploying CH-UI behind a reverse proxy at a subpath. * Checks runtime window.env first (for Docker inject-env), falls back to * build-time import.meta.env.BASE_URL (set via VITE_BASE_PATH). */ declare global { interface Window { env?: { VITE_BASE_PATH?: string } } } /** Returns the base path without trailing slash (empty string for root). */ export function getBasePath(): string { if (typeof window !== 'undefined' && window.env?.VITE_BASE_PATH) { return window.env.VITE_BASE_PATH.replace(/\/$/, '') } const base = import.meta.env.BASE_URL ?? '/' return base.endsWith('/') ? base.slice(0, -1) : base } /** Prepends the base path to an absolute path (path should start with /). */ export function withBase(path: string): string { const base = getBasePath() return base ? base + path : path } /** Strips the base path prefix from a pathname. */ export function stripBase(path: string): string { const base = getBasePath() if (!base) return path return path.startsWith(base) ? (path.slice(base.length) || '/') : path } ================================================ FILE: ui/src/lib/components/brain/BrainArtifactCard.svelte ================================================
{#if chartConfig && expanded}
{/if} {#if payload?.query}
View query
{payload.query}
{/if}
View raw payload
{artifact.content}
{:else}
View payload
{artifact.content}
{/if} {/if}
================================================ FILE: ui/src/lib/components/brain/BrainEmptyState.svelte ================================================

Start a chat to generate SQL and insights.

================================================ FILE: ui/src/lib/components/brain/BrainHeader.svelte ================================================

Brain

onDbChange(v)} />
{#if selectedDb}
onTableChange(v)} />
{/if}
onModelChange(v)} />
================================================ FILE: ui/src/lib/components/brain/BrainInput.svelte ================================================
{#if contexts.length > 0}
{#each contexts as ctx (`${ctx.database}.${ctx.table}`)} {ctx.database}.{ctx.table} {/each} {#if contexts.length > 1 && onClearAllContexts} {/if}
{/if}
{#if mentionActive} { mentionActive = false; mentionQuery = '' }} bind:this={dropdownRef} /> {/if}
================================================ FILE: ui/src/lib/components/brain/BrainMentionDropdown.svelte ================================================ {#if filteredOptions.length > 0}
{#each filteredOptions as opt, i (opt.label)} {/each}
{/if} ================================================ FILE: ui/src/lib/components/brain/BrainMessage.svelte ================================================ {#if message.role === 'user'}
{message.content}
{:else}
{#if message.content} {#each segments as seg} {#if seg.type === 'markdown'}
{@html seg.html}
{:else if seg.type === 'sql'} {/if} {/each} {#if orphanArtifacts.length > 0} {#each orphanArtifacts as art} {/each} {/if} {:else if streaming && isLastMessage}
{/if}
{/if} ================================================ FILE: ui/src/lib/components/brain/BrainSidebar.svelte ================================================ ================================================ FILE: ui/src/lib/components/brain/BrainSqlBlock.svelte ================================================
SQL
{@html highlightSQL(sql)}
{#if artifact} {/if} ================================================ FILE: ui/src/lib/components/brain/brain-markdown.ts ================================================ import { Marked } from 'marked' export interface MessageSegment { type: 'markdown' | 'sql' content: string html?: string } const SQL_FENCE_RE = /```sql\n([\s\S]*?)```/g const marked = new Marked({ breaks: true, gfm: true, }) /** Render a markdown string to HTML using marked. */ export function renderMarkdown(content: string): string { return marked.parse(content) as string } /** * Split assistant message content into alternating markdown and sql segments. * SQL segments are extracted from ```sql fences so they can be rendered * as interactive Svelte components instead of static HTML. */ export function parseMessageSegments(content: string): MessageSegment[] { const segments: MessageSegment[] = [] let lastIndex = 0 for (const match of content.matchAll(SQL_FENCE_RE)) { const matchStart = match.index! // Markdown text before this sql block if (matchStart > lastIndex) { const md = content.slice(lastIndex, matchStart) segments.push({ type: 'markdown', content: md, html: renderMarkdown(md) }) } // The sql block itself segments.push({ type: 'sql', content: match[1].trim() }) lastIndex = matchStart + match[0].length } // Trailing markdown after the last sql block if (lastIndex < content.length) { const md = content.slice(lastIndex) segments.push({ type: 'markdown', content: md, html: renderMarkdown(md) }) } return segments } /** Extract raw SQL strings from ```sql fences. */ export function extractSqlBlocks(content: string): string[] { const blocks: string[] = [] for (const match of content.matchAll(SQL_FENCE_RE)) { blocks.push(match[1].trim()) } return blocks } const SQL_KEYWORDS = new Set([ 'SELECT','FROM','WHERE','JOIN','LEFT','RIGHT','INNER','OUTER','CROSS','FULL', 'ON','AND','OR','NOT','IN','IS','NULL','LIKE','BETWEEN','EXISTS', 'GROUP','BY','ORDER','ASC','DESC','LIMIT','OFFSET','HAVING', 'INSERT','INTO','VALUES','UPDATE','SET','DELETE','CREATE','ALTER','DROP', 'TABLE','INDEX','VIEW','AS','WITH','UNION','ALL','DISTINCT','CASE','WHEN', 'THEN','ELSE','END','CAST','IF','ARRAY','MAP','TUPLE', 'FORMAT','USING','ENGINE','PARTITION','SAMPLE','PREWHERE','GLOBAL', 'ANY','ANTI','SEMI','MATERIALIZED','FINAL','SETTINGS', 'TRUE','FALSE','COUNT','SUM','AVG','MIN','MAX','UNIQ', ]) /** Lightweight SQL syntax highlighting — returns HTML with colored spans. */ export function highlightSQL(sql: string): string { return sql.replace( /('(?:[^'\\]|\\.)*')|("(?:[^"\\]|\\.)*")|(--[^\n]*)|(\b\d+(?:\.\d+)?\b)|(\b[A-Za-z_]\w*\b)/g, (match, singleStr: string, doubleStr: string, comment: string, num: string, word: string) => { if (singleStr || doubleStr) return `${escapeHtml(match)}` if (comment) return `${escapeHtml(match)}` if (num) return `${escapeHtml(match)}` if (word && SQL_KEYWORDS.has(word.toUpperCase())) return `${escapeHtml(match)}` return escapeHtml(match) }, ) } function escapeHtml(s: string): string { return s.replace(/&/g, '&').replace(//g, '>').replace(/"/g, '"') } ================================================ FILE: ui/src/lib/components/common/Button.svelte ================================================ ================================================ FILE: ui/src/lib/components/common/Combobox.svelte ================================================
{#if open}
{ e.preventDefault(); closeMenu() }} onkeydown={(e) => (e.key === 'Escape' || e.key === 'Enter') && closeMenu()} >
{#if filtered.length === 0}
{emptyText}
{:else} {#each filtered as opt, idx (opt.value)} {/each} {/if}
{/if}
================================================ FILE: ui/src/lib/components/common/ConfirmDialog.svelte ================================================
{#if description}

{description}

{/if}
================================================ FILE: ui/src/lib/components/common/ContextMenu.svelte ================================================ {#if open}
(e.key === 'Escape' || e.key === 'Enter') && closeMenu()} oncontextmenu={(e) => { e.preventDefault() closeMenu() }} >
{/if} ================================================ FILE: ui/src/lib/components/common/HelpTip.svelte ================================================ {#if open} {/if} ================================================ FILE: ui/src/lib/components/common/InputDialog.svelte ================================================
{#if description}

{description}

{/if}
================================================ FILE: ui/src/lib/components/common/MiniTrendChart.svelte ================================================
================================================ FILE: ui/src/lib/components/common/Modal.svelte ================================================ {#if open}
{/if} ================================================ FILE: ui/src/lib/components/common/ProRequired.svelte ================================================
CH-UI logo

Pro license required

{feature} is part of CH-UI Pro.

Activate a Pro license in License to unlock this feature, or get a license if you don't have one yet.

What you unlock with Pro

    {#each proFeatures as feat}
  • {feat}
  • {/each}
Get a License
================================================ FILE: ui/src/lib/components/common/Sheet.svelte ================================================ {#if open} {/if} ================================================ FILE: ui/src/lib/components/common/Spinner.svelte ================================================ ================================================ FILE: ui/src/lib/components/common/Toast.svelte ================================================ ================================================ FILE: ui/src/lib/components/dashboard/ChartPanel.svelte ================================================
================================================ FILE: ui/src/lib/components/dashboard/DashboardGrid.svelte ================================================ {#if panels.length === 0}

No panels yet

Add a panel with a SQL query to visualize data

{:else}
{#each panels as panel (panel.id)} {@const layout = displayLayouts.find(l => l.id === panel.id)} {@const pos = layout && colW > 0 ? gridToPixel(layout, colW) : null} {@const result = panelResults.get(panel.id)} {@const cfg = parsePanelConfig(panel.config)} {@const isActive = (mode === 'dragging' || mode === 'resizing') && activeId === panel.id} {#if pos}
handleDragStart(panel.id, e)} >
{panel.name}
{#if !result || result.loading}
{:else if result.error}

{result.error}

{:else if panel.panel_type === 'stat'}
{getStatValue(result.data, result.meta)}
{:else if panel.panel_type === 'timeseries' || panel.panel_type === 'bar'} {:else} {#if result.meta.length > 0}
{#each result.meta as col} {/each} {#each result.data.slice(0, 100) as row} {#each result.meta as col} {/each} {/each}
{col.name}
{row[col.name] ?? '--'}
{:else}

No data

{/if} {/if}
handleResizeStart(panel.id, e)} >
{/if} {/each} {#if ghostLayout && mode !== 'idle' && colW > 0} {@const ghostPos = gridToPixel(ghostLayout, colW)}
{/if}
{/if} ================================================ FILE: ui/src/lib/components/dashboard/PanelEditor.svelte ================================================

{panel?.id ? 'Edit Panel' : 'New Panel'}

query = v} />
{#if running}
{:else if queryError}

{queryError}

{:else if queryData.length === 0 && queryMeta.length === 0}
Run a query to see results
{:else if chartType === 'table'} {:else if chartType === 'stat'}
{getStatValue(queryData, queryMeta)}
{:else} {/if}
Available Variables
$__timestamp(col) — DateTime range filter
$__timeFilter(col) — Epoch range filter
$__interval — Aggregation interval (seconds)
$__timeFrom / $__timeTo — Range boundaries

Visualization

{#each vizTypes as vt} {@const Icon = vt.icon} {/each}
{#if chartType === 'timeseries' || chartType === 'bar'}

Time Scope

Uses dashboard picker: {dashboardRangeLabel}

xColumn = v} placeholder="Select column..." />

Y-Axis Columns

{#if queryMeta.length === 0}

Run a query first

{:else}
{#each queryMeta.filter(m => m.name !== xColumn) as col} {/each}
{/if}
{#if yColumns.length > 0}

Series Colors

{#each yColumns as yCol, i}
updateColor(i, (e.target as HTMLInputElement).value)} class="w-6 h-6 rounded border border-gray-300 dark:border-gray-600 cursor-pointer" /> {yCol}
{/each}
{/if}
legendPosition = v as 'bottom' | 'right' | 'none'} placeholder="Legend position" />
{/if}
================================================ FILE: ui/src/lib/components/dashboard/TimeRangeSelector.svelte ================================================
{#if open}
{#if rangeDescription}
{rangeDescription}
{/if}
fromTime = v} /> toTime = v} /> timezone = v} />
{/if}
================================================ FILE: ui/src/lib/components/dashboard/time-picker/CalendarMonth.svelte ================================================
{title}
{#each WEEKDAYS as day}
{day}
{/each}
{#each grid as week}
{#each week as cell} {#if cell} {:else}
{/if} {/each}
{/each}
================================================ FILE: ui/src/lib/components/dashboard/time-picker/DualCalendar.svelte ================================================
================================================ FILE: ui/src/lib/components/dashboard/time-picker/PresetList.svelte ================================================
{#each recentPresets as preset} {/each}
{#each namedPresets as preset} {/each}
{#each durationPresets as preset} {/each}
================================================ FILE: ui/src/lib/components/dashboard/time-picker/TimeInput.svelte ================================================
{label}:
update('h', e.currentTarget.value)} onfocus={selectOnFocus} /> : update('m', e.currentTarget.value)} onfocus={selectOnFocus} /> : update('s', e.currentTarget.value)} onfocus={selectOnFocus} />
================================================ FILE: ui/src/lib/components/dashboard/time-picker/TimezoneSelect.svelte ================================================
Timezone:
================================================ FILE: ui/src/lib/components/editor/InsightsPanel.svelte ================================================
Runtime
{elapsedMs > 0 ? formatElapsed(elapsedSeconds) : '\u2014'}
{formatNumber(data.length)} rows returned
Inline Profiling
{rowsPerSec > 0 ? formatNumber(Math.round(rowsPerSec)) : '0'} rows/s
{throughputBytesPerSec > 0 ? formatBytes(throughputBytesPerSec) : '0 B'}/s (estimated)
Streaming Viewer
{formatNumber(streamRows || data.length)} rows, {formatNumber(streamChunks)} chunks
{#if running} Live ingest running... {:else if streamLastChunkAt} Last chunk at {new Date(streamLastChunkAt).toLocaleTimeString()} {:else} No stream events yet {/if}
Estimate vs Actual
{#if estimate && estimate.success && !estimate.error && stats} {@const actualRows = Number(stats.rows_read ?? 0)} {@const estimatedRows = estimate.total_rows} {@const accuracy = estimatedRows > 0 ? Math.round((Math.min(actualRows, estimatedRows) / Math.max(actualRows, estimatedRows)) * 100) : 0}
Estimated
{formatNumber(estimatedRows)} rows
Actual
{formatNumber(actualRows)} rows
{accuracy}%
{:else if estimate && estimate.success && !estimate.error && !stats}
{formatNumber(estimate.total_rows)} rows · {estimate.total_parts} parts · {formatNumber(estimate.total_marks)} marks
Run the query to compare with actual.
{:else}
No estimate available. Type a SELECT query.
{/if}
Sampling
Mode: {samplingMode ?? 'none'}
Query Plan Visualizer
{#if planLoading}
Loading query plan...
{:else if planError}
{planError}
{:else if planFlow.length > 0}
Source: {planSource} · {formatNumber(planFlow.length)} stages
{#each planFlow as node, i (node.id)}
{#if i < planFlow.length - 1}
{/if}
{node.index}
{node.title}
{#if node.detail}
{node.detail}
{/if}
{/each}
{:else}
No plan loaded yet.
{/if}
Columnar Memory View
{#if columnMemory.length === 0}
Run a query with rows to estimate per-column memory.
{:else}
{#each columnMemory as col}
{col.name} {formatBytes(col.bytes)} ({col.pct.toFixed(1)}%)
{col.type} · avg {formatBytes(col.avgBytes)}/row
{/each}
{/if}
Histogram Per Column
histogramColumn = v} placeholder="Numeric column" />
{#if !histogram || histogram.length === 0}
No numeric values available for histogram.
{:else}
{#each histogram as bin}
{bin.from.toFixed(2)}
{bin.count}
{/each}
{/if}
Inline Profile Events
{#if profileLoading}
Loading profile from system.query_log...
{:else if profileError}
{profileError}
{:else if profileAvailable && profile}
Duration
{profileNumber('query_duration_ms')} ms
Memory
{formatBytes(profileNumber('memory_usage'))}
Read Rows
{formatNumber(profileNumber('read_rows'))}
Read Bytes
{formatBytes(profileNumber('read_bytes'))}
Result Rows
{formatNumber(profileNumber('result_rows'))}
Selected Marks
{formatNumber(profileNumber('selected_marks'))}
{:else}
{profileReason ?? 'No profile row available yet.'}
{/if}
{#if stats}
Stream started: {streamStartedAt ? new Date(streamStartedAt).toLocaleTimeString() : '\u2014'} · rows_read: {formatNumber(Number(stats.rows_read ?? 0))} · bytes_read: {formatBytes(Number(stats.bytes_read ?? 0))}
{/if}
================================================ FILE: ui/src/lib/components/editor/ResultFooter.svelte ================================================
{#each tabs as tab} {/each}
{formatNumber(rowCount)} rows {#if elapsedMs > 0} {formatElapsed(elapsedMs / 1000)} {/if} {#if streamChunks > 0} {formatNumber(streamRows)} streamed {formatNumber(streamChunks)} chunks {/if} {#if stats} {#if stats.rows_read} {formatNumber(stats.rows_read)} read {/if} {#if stats.bytes_read} {formatBytes(stats.bytes_read)} {/if} {/if}
{#if getMaxResultRows() > 10000} {/if} Max rows setMaxResultRows(parseInt(e.currentTarget.value) || 1000)} />
{#if copyMenuOpen}
{#each formatOptions as option} {/each}
{/if}
{#if downloadMenuOpen}
{#each formatOptions as option} {/each}
{/if}
================================================ FILE: ui/src/lib/components/editor/ResultPanel.svelte ================================================
{#if loading}
Executing query...
{:else if error}
{error}
{:else if meta.length === 0}

Run a query to see results

Cmd/Ctrl+Enter to execute

{:else} {#if activeTab === 'data'} {:else if activeTab === 'stats'} {:else if activeTab === 'insights'} {:else} {/if} activeTab = tab} {meta} {data} {stats} {elapsedMs} {streamRows} {streamChunks} /> {/if}
================================================ FILE: ui/src/lib/components/editor/SchemaPanel.svelte ================================================
{#each meta as col, i} {@const dt = getDisplayType(col.type)} {/each}
# Column Name ClickHouse Type Category
{i + 1} {col.name} {col.type} {dt}
================================================ FILE: ui/src/lib/components/editor/SqlEditor.svelte ================================================
================================================ FILE: ui/src/lib/components/editor/StatsPanel.svelte ================================================
{#each stats as col, i} {/each}
Name Type Count Nulls Null% Min Max Avg Sum Distinct
{col.name} {col.displayType} {formatNumber(col.count)} {formatNumber(col.nulls)} {pct(col.nullPct)} {#if col.displayType === 'number'} {fmt(col.min)} {:else if col.displayType === 'string'} {col.minLen !== undefined ? fmt(col.minLen) + ' ch' : '\u2014'} {:else if col.displayType === 'date'} {col.earliest ?? '\u2014'} {:else} {'\u2014'} {/if} {#if col.displayType === 'number'} {fmt(col.max)} {:else if col.displayType === 'string'} {col.maxLen !== undefined ? fmt(col.maxLen) + ' ch' : '\u2014'} {:else if col.displayType === 'date'} {col.latest ?? '\u2014'} {:else} {'\u2014'} {/if} {#if col.displayType === 'number'} {fmt(col.avg)} {:else if col.displayType === 'string'} {col.avgLen !== undefined ? fmt(col.avgLen) + ' ch' : '\u2014'} {:else} {'\u2014'} {/if} {fmt(col.sum)} {col.distinct !== undefined ? formatNumber(col.distinct) : '\u2014'}
================================================ FILE: ui/src/lib/components/editor/Toolbar.svelte ================================================
{#if running && oncancel} {:else} {/if} {#if onformat} {/if} {#if onexplain} {/if} {#if estimateLabel}
{estimateLabel}
{:else if estimateLoading}
Estimating...
{/if}
{#if onsave} {/if}
================================================ FILE: ui/src/lib/components/explorer/DataPreview.svelte ================================================
{database}.{table}
{#if loading && meta.length === 0}
Loading...
{:else if error}
{error}
{:else if meta.length > 0} {#if totalRows > pageSize} {/if} {:else}
Select a table to preview data
{/if}
================================================ FILE: ui/src/lib/components/explorer/DatabaseTree.svelte ================================================ e.key === "Escape" && closeMenu()} />
openContextMenu(e, { kind: "root" })} >
{#if searchTerm} {/if}
{#if !canManageSchema}

Schema create/delete actions require admin role

{/if}
{#if loading && databases.length === 0}
{:else if filteredDatabases.length === 0}
{searchTerm ? "No matches" : "No databases"}
{:else} {#each filteredDatabases as db}
openContextMenu(e, { kind: "database", database: db.name })} >
{#if db.expanded && db.tables} {#each db.tables as table}
openContextMenu(e, { kind: "table", database: db.name, table: table.name, })} >
{#if table.expanded && table.columns} {#each table.columns as col}
{col.name} {col.type}
{/each} {/if}
{/each} {/if}
{/each} {/if}
(createDatabaseSheetOpen = false)} >
{ e.preventDefault(); submitCreateDatabase(); }} >
Database Name
Database Engine
(createDatabaseForm = { ...createDatabaseForm, engine: value })} placeholder="Select engine" />
Cluster (optional)
(createDatabaseForm = { ...createDatabaseForm, onCluster: value })} placeholder={clustersLoading ? "Loading clusters..." : "No cluster"} disabled={clustersLoading} />
Creates a database with selected engine. If cluster is set, operation runs with ON CLUSTER.
(createTableSheetOpen = false)} >
{ e.preventDefault(); submitCreateTable(); }} >
Database
(createTableForm = { ...createTableForm, database: value })} placeholder="Select database" />
Table Name
Engine
(createTableForm = { ...createTableForm, engine: value })} placeholder="Select engine" />
Cluster (optional)
(createTableForm = { ...createTableForm, onCluster: value })} placeholder={clustersLoading ? "Loading clusters..." : "No cluster"} disabled={clustersLoading} />

Columns

Type is selected from ClickHouse data types (parametric families are prefilled with valid templates).

{#each createTableForm.columns as col} {/each}
Name Type Default Expression Comment Actions
updateTableColumn(col.id, { name: (e.currentTarget as HTMLInputElement).value, })} placeholder="column_name" /> updateTableColumn(col.id, { type: value })} placeholder={dataTypesLoading ? "Loading types..." : "Select type"} disabled={dataTypesLoading} /> updateTableColumn(col.id, { defaultExpression: (e.currentTarget as HTMLInputElement) .value, })} placeholder="now()" /> updateTableColumn(col.id, { comment: (e.currentTarget as HTMLInputElement).value, })} placeholder="Business meaning" />
ORDER BY
PARTITION BY
PRIMARY KEY
SAMPLE BY
TTL
SETTINGS
Table Comment (optional)
Command Preview
{buildCreateTableCommandPreview()}
{#if createTableErrorMessage}
Create Table Error
{createTableErrorMessage}
{/if}
(deleteDatabaseSheetOpen = false)} >
{ e.preventDefault(); submitDeleteDatabase(); }} >
This will permanently delete {deleteDatabaseForm.name} and all tables inside it.
Cluster (optional)
(deleteDatabaseForm = { ...deleteDatabaseForm, onCluster: value })} placeholder={clustersLoading ? "Loading clusters..." : "No cluster"} disabled={clustersLoading} />
Type database name to confirm
(uploadSheetOpen = false)} >
{ e.preventDefault(); submitUpload(); }} >

Source File

Accepted formats: CSV, Parquet, JSON, JSONL.

Rows detected
{uploadRowsDetected || "—"}
Format
{uploadSourceFormat || "—"}
Columns
{uploadColumns.length || "—"}

Target

Database
{#if uploadForm.mode === "new"}
Table Name
Engine
(uploadForm = { ...uploadForm, engine: value })} placeholder="Select engine" />
Cluster (optional)
(uploadForm = { ...uploadForm, onCluster: value })} placeholder={clustersLoading ? "Loading clusters..." : "No cluster"} disabled={clustersLoading} />
{:else}
Target Table
(uploadForm = { ...uploadForm, existingTable: value })} placeholder={uploadTablesLoading ? "Loading tables..." : "Select table"} disabled={uploadTablesLoading || !uploadForm.database} />
{/if}
{#if uploadForm.mode === "new"}
ORDER BY
PARTITION BY
PRIMARY KEY
Table Comment (optional)
{/if}

Discovered Columns

Edit inferred types before upload if needed.
{#if uploadColumns.length === 0}
Run "Discover Schema" to populate columns.
{:else}
{#each uploadColumns as col} {/each}
Name Type Sample
updateUploadColumn(col.id, { name: (e.currentTarget as HTMLInputElement).value, })} /> updateUploadColumn(col.id, { type: value })} placeholder={dataTypesLoading ? "Loading types..." : "Select type"} disabled={dataTypesLoading} /> {col.sample || "—"}
{/if}
Preview
{#if uploadPreviewRows.length === 0}
No preview rows yet.
{:else}
{JSON.stringify(
            uploadPreviewRows.slice(0, 5),
            null,
            2,
          )}
{/if}
{#if uploadErrorMessage}
Upload Error
{uploadErrorMessage}
{/if} {#if uploadCreateSQL || uploadInsertSQL}
Executed Commands
{#if uploadCreateSQL}
CREATE TABLE
{uploadCreateSQL}
{/if} {#if uploadInsertSQL}
INSERT (sample/batch)
{uploadInsertSQL}
{/if}
{/if}
(deleteTableSheetOpen = false)} >
{ e.preventDefault(); submitDeleteTable(); }} >
This will permanently delete {deleteTableForm.database}.{deleteTableForm.name}.
Cluster (optional)
(deleteTableForm = { ...deleteTableForm, onCluster: value })} placeholder={clustersLoading ? "Loading clusters..." : "No cluster"} disabled={clustersLoading} />
Type full name to confirm
================================================ FILE: ui/src/lib/components/governance/LineageGraph.svelte ================================================ ================================================ FILE: ui/src/lib/components/governance/LineageTableNode.svelte ================================================
{data.database}
{data.table}
{#if data.nodeType === 'materialized_view'} MV {:else if data.nodeType === 'view'} View {/if}
{#if data.columns && data.columns.length > 0} {#if expanded}
{#each data.columns as col}
{col.column_name} {col.column_type}
{/each}
{/if} {/if}
================================================ FILE: ui/src/lib/components/layout/CommandPalette.svelte ================================================ {#if open}
ESC
{#if filtered.length === 0}
No command found
{:else} {#each filtered as item, idx (item.id)} {/each} {/if}
Use ↑ ↓ to navigate, Enter to run.
{/if} ================================================ FILE: ui/src/lib/components/layout/Shell.svelte ================================================
{#each groups as group, i (group.id)}
setFocusedGroup(group.id)} onkeydown={(e) => { // Only treat Space/Enter as pane activation when the pane itself is focused. if (e.target !== e.currentTarget) return if (e.key === 'Enter' || e.key === ' ') { e.preventDefault() setFocusedGroup(group.id) } }} role="button" tabindex="0" >
{#if split && i === 0}
splitPercent = 50} >
{/if} {/each} {#if edgeSplitVisible}
handleEdgeDragOver('left', e)} ondrop={(e) => handleEdgeDrop('left', e)} >
handleEdgeDragOver('right', e)} ondrop={(e) => handleEdgeDrop('right', e)} >
{/if}
{#if resizing}
{/if} ================================================ FILE: ui/src/lib/components/layout/Sidebar.svelte ================================================
{#if !collapsed}
{/if}
{#if dragging}
{/if} ================================================ FILE: ui/src/lib/components/layout/TabBar.svelte ================================================ { if (e.key === 'Escape') hideTabMenu() handleGlobalTabShortcuts(e) }} />
{#each tabs as tab, i (tab.id)} {@const Icon = getIcon(tab)}
handleTabClick(tab.id)} oncontextmenu={(e) => openTabMenu(e, tab.id)} ondblclick={() => tab.type === 'query' && !isHomeTab(tab) && startRename(tab)} onmousedown={(e) => handleMiddleClick(e, tab.id)} onkeydown={(e) => { if (e.key === 'Enter' || e.key === ' ') { e.preventDefault() handleTabClick(tab.id) } }} ondragstart={(e) => handleDragStart(e, tab)} ondragover={(e) => handleDragOver(e, i)} ondrop={(e) => handleDrop(e, i)} ondragend={handleDragEnd} role="tab" tabindex="0" aria-selected={tab.id === activeId} title={tab.name} > {#if editingTabId === tab.id} e.stopPropagation()} /> {:else} {#if isHomeTab(tab)} {tab.name} {:else} {tab.name} {/if} {/if} {#if (tab.type === 'query' && tab.dirty) || (tab.type === 'model' && (tab as ModelTab).dirty)} {/if} {#if getTabs().length > 1 && !isHomeTab(tab)} {/if} {#if !isHomeTab(tab)} {/if}
{/each} {#if !split && dragTabId}
Drop to Split
{/if}
================================================ FILE: ui/src/lib/components/layout/TabContent.svelte ================================================
{#if !activeTab}
Open a query or select a table to get started
{:else if requiresPro && !proActive} {#if licenseLoading || !licenseChecked}
Checking license...
{:else} {/if} {:else if activeTab.type === 'query'} {#key activeTab.id} {/key} {:else if activeTab.type === 'table'} {#key activeTab.id} {/key} {:else if activeTab.type === 'database'} {#key activeTab.id} {/key} {:else if activeTab.type === 'saved-queries'} {:else if activeTab.type === 'settings'} {:else if activeTab.type === 'dashboards'} {:else if activeTab.type === 'dashboard'} {#key activeTab.id} {/key} {:else if activeTab.type === 'schedules'} {:else if activeTab.type === 'brain'} {:else if activeTab.type === 'admin'} {:else if activeTab.type === 'governance'} {:else if activeTab.type === 'pipelines'} {:else if activeTab.type === 'model'} {#key activeTab.id} {/key} {:else if activeTab.type === 'models'} {:else if activeTab.type === 'home'} {/if}
================================================ FILE: ui/src/lib/components/layout/TabGroup.svelte ================================================
================================================ FILE: ui/src/lib/components/layout/content/DatabaseContent.svelte ================================================
{tab.database}
{#each subTabs as st} {/each}
{#if activeSubTab === 'overview'} {#if infoLoading}
Loading database info...
{:else if infoError}
{infoError}
{:else}
{#each metrics as m} {@const Icon = m.icon}
{m.label}
{m.value}
{/each}

Details

Name {dbInfo.name ?? tab.database} Engine {dbInfo.engine ?? '—'} Data Path {dbInfo.data_path ?? '—'} Metadata Path {dbInfo.metadata_path ?? '—'} Last Modified {formatDate(dbInfo.last_modified)}
{/if} {:else if activeSubTab === 'tables'} {#if tablesLoading}
Loading tables...
{:else if tablesError}
{tablesError}
{:else if tablesMeta.length > 0} {:else}
No tables found
{/if} {/if}
================================================ FILE: ui/src/lib/components/layout/content/ModelContent.svelte ================================================
updateModelTabEdit(tab.id, { modelName: (e.target as HTMLInputElement).value })} class="text-sm font-semibold bg-transparent border-0 border-b border-transparent hover:border-gray-300 dark:hover:border-gray-600 focus:border-orange-400 focus:outline-none text-gray-800 dark:text-gray-200 px-1 py-0.5 min-w-[120px] max-w-[200px]" placeholder="model_name" /> | updateModelTabEdit(tab.id, { targetDatabase: (e.target as HTMLInputElement).value })} class="text-xs bg-transparent border border-gray-300 dark:border-gray-600 rounded px-1.5 py-0.5 w-24 text-gray-700 dark:text-gray-300 focus:border-orange-400 focus:outline-none" /> |
{#if tab.edit.materialization === 'table'} | updateModelTabEdit(tab.id, { orderBy: (e.target as HTMLInputElement).value })} placeholder="ORDER BY" class="text-[10px] bg-transparent border border-gray-300 dark:border-gray-600 rounded px-1.5 py-0.5 w-24 text-gray-700 dark:text-gray-300 focus:border-orange-400 focus:outline-none" /> {/if}
{#if showDescription}
{/if}
Use $ref(model_name) to reference other models | View = computed on read, Table = snapshot on run
{#if running || runLoading}
Running model...
{:else if runResult}
{#if runResult.status === 'success'} {:else if runResult.status === 'error'} {:else} {/if} {runResult.status} {runResult.elapsed_ms}ms {#if runResult.finished_at} {new Date(runResult.finished_at).toLocaleString()} {/if}
{#if runResult.error}
{runResult.error}
{/if} {#if runResult.resolved_sql}
Resolved SQL
{runResult.resolved_sql}
{/if}
{:else}
Run this model to see results
{/if}
{#if dragging}
{/if} ================================================ FILE: ui/src/lib/components/layout/content/QueryContent.svelte ================================================
handleRun()} oncancel={handleCancel} onformat={handleFormat} onexplain={handleExplain} onsave={handleSaveClick} {estimate} {estimateLoading} />
{#if dragging}
{/if} {#if showSaveModal}
showSaveModal = false} onkeydown={(e) => e.key === 'Escape' && (showSaveModal = false)} >
e.stopPropagation()} onkeydown={(e) => e.stopPropagation()} tabindex="-1" >

Save Query

{/if} ================================================ FILE: ui/src/lib/components/layout/content/TableContent.svelte ================================================
{tab.database}. {tab.table}
{#each subTabs as st} {/each}
{#if activeSubTab === 'overview'} {#if infoLoading}
Loading table info...
{:else if infoError}
{infoError}
{:else}
{#each metrics as m} {@const Icon = m.icon}
{m.label}
{m.value}
{/each}
{#if hasKeys}

Storage Keys

{#if tableInfo.partition_key} Partition Key {tableInfo.partition_key} {/if} {#if tableInfo.sorting_key} Sorting Key {tableInfo.sorting_key} {/if} {#if tableInfo.primary_key} Primary Key {tableInfo.primary_key} {/if} {#if tableInfo.sampling_key} Sampling Key {tableInfo.sampling_key} {/if}
{/if} {#if tableInfo.create_table_query}

Create Table SQL

Syntax highlighted
{/if} {/if} {:else if activeSubTab === 'schema'} {#if schemaLoading}
Loading schema...
{:else if schemaError}
{schemaError}
{:else if schemaMeta.length > 0}
schemaCopyMenuOpen = false} /> {:else}
No schema data
{/if} {:else if activeSubTab === 'data'} {/if}
================================================ FILE: ui/src/lib/components/models/ModelNode.svelte ================================================
{data.name}
{data.materialization} | {data.target_database}
================================================ FILE: ui/src/lib/components/pipelines/NodeConfigPanel.svelte ================================================

Node Config

handleLabelChange((e.target as HTMLInputElement).value)} class="w-full rounded-lg border border-gray-300 dark:border-gray-700 bg-white dark:bg-gray-800 px-2.5 py-1.5 text-xs text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-orange-500" />

{nodeType.replace('source_', '').replace('sink_', '')} Settings

{#each fields as field (field.key)}
{#if field.type === 'text' || field.type === 'password'} handleChange(field.key, (e.target as HTMLInputElement).value)} class="w-full rounded-lg border border-gray-300 dark:border-gray-700 bg-white dark:bg-gray-800 px-2.5 py-1.5 text-xs text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-orange-500" /> {:else if field.type === 'number'} handleChange(field.key, Number((e.target as HTMLInputElement).value))} class="w-full rounded-lg border border-gray-300 dark:border-gray-700 bg-white dark:bg-gray-800 px-2.5 py-1.5 text-xs text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-orange-500" /> {:else if field.type === 'select'} {:else if field.type === 'textarea'} {:else if field.type === 'toggle'} {#if field.key === 'auth_enabled' && getFieldValue(field) && localConfig['auth_token']}

Use header: Authorization: Bearer {String(localConfig['auth_token']).slice(0, 8)}...

{/if} {:else if field.type === 'info'} {@const webhookUrl = `${typeof window !== 'undefined' ? window.location.origin : ''}/api/pipelines/webhook/${pipelineId}`}
{/if}
{/each}
================================================ FILE: ui/src/lib/components/pipelines/PipelineCanvas.svelte ================================================

Sources

{#each SOURCE_NODE_TYPES as source} {@const Icon = sourceIcons[source.type] || Radio}
onDragStart(e, source.type)} role="button" tabindex={0} >
{source.label}
{source.description}
{/each}

Sinks

{#each SINK_NODE_TYPES as sink}
onDragStart(e, sink.type)} role="button" tabindex={0} >
{sink.label}
{sink.description}
{/each}
================================================ FILE: ui/src/lib/components/pipelines/PipelineEditor.svelte ================================================
{#if loading}
{:else if pipeline}
{ nodes = updated }} onEdgesChange={(updated) => { edges = updated }} onConnect={handleConnect} onNodeClick={handleNodeClick} onPaneClick={handlePaneClick} /> {#if selectedNode} } {pipelineId} onUpdate={handleNodeConfigUpdate} onClose={() => { selectedNodeId = null }} /> {/if}
{ if (pipeline) pipeline = { ...pipeline, status: newStatus } }} /> {:else}
Pipeline not found
{/if}
================================================ FILE: ui/src/lib/components/pipelines/PipelineList.svelte ================================================

Pipelines

{pipelines.length} total
{#if loading}
{:else if filtered.length === 0}
{#if search.trim()}

No pipelines match "{search}"

{:else}

No pipelines yet

Create a pipeline to start ingesting data into ClickHouse

{/if}
{:else}
{#each filtered as pipeline (pipeline.id)}
onSelect(pipeline.id)} onkeydown={(e) => { if (e.key === 'Enter') onSelect(pipeline.id) }} role="button" tabindex={0} >

{pipeline.name}

{#if pipeline.description}

{pipeline.description}

{/if}
{#if pipeline.status === 'running'} {:else if pipeline.status === 'error'} {:else if pipeline.status === 'starting' || pipeline.status === 'stopping'} {:else} {/if} {pipeline.status}
Updated {formatDate(pipeline.updated_at)}
e.stopPropagation()}> {#if pipeline.status === 'running' || pipeline.status === 'starting'} {:else} {/if}
{#if pipeline.last_error && pipeline.status === 'error'}
{pipeline.last_error}
{/if}
{/each}
{/if}
{ if (confirmDelete) { onDelete(confirmDelete.id) confirmDelete = null } }} oncancel={() => { confirmDelete = null }} /> ================================================ FILE: ui/src/lib/components/pipelines/PipelineStatusBar.svelte ================================================ {#if isRunning || rowsIngested > 0}
{#if isRunning} Running {/if} {formatNumber(rowsIngested)} rows {formatBytes(bytesIngested)} {formatNumber(batchesSent)} batches {#if errorsCount > 0} {formatNumber(errorsCount)} errors {/if}
{/if} ================================================ FILE: ui/src/lib/components/pipelines/PipelineToolbar.svelte ================================================

{pipelineName}

{status}
{#if isRunning} {:else} {/if}
================================================ FILE: ui/src/lib/components/pipelines/nodes/SinkNode.svelte ================================================
{data.label}
ClickHouse
================================================ FILE: ui/src/lib/components/pipelines/nodes/SourceNode.svelte ================================================
{data.label}
{data.node_type.replace('source_', '')}
================================================ FILE: ui/src/lib/components/table/Pagination.svelte ================================================
{formatNumber(from)}-{formatNumber(to)} of {formatNumber(totalRows)} rows
Page {page + 1} of {totalPages}
================================================ FILE: ui/src/lib/components/table/TableCell.svelte ================================================ {#if isNull} Null {:else if displayType === 'bool'} {String(value)} {:else if isUrl} {formatted} {:else if displayType === 'json'} {formatted} {:else if displayType === 'number' || displayType === 'date'} {formatted} {:else} {formatted} {/if} ================================================ FILE: ui/src/lib/components/table/TableHeader.svelte ================================================ onfitall?.()} title="Double-click to auto-fit all columns" ># {#each columns as col, i} {#if onsort} {:else}
{col.name} {compactTypeLabel(col.type)}
{/if} {#if onresize} {/if} {/each} ================================================ FILE: ui/src/lib/components/table/VirtualTable.svelte ================================================
{#each effectiveWidths as width} {/each} {#if rowCount > 0} {#each visibleRows as row, vi (startIdx + vi)} {@const absIdx = startIdx + vi} selectedRow = absIdx} > {#each meta as col, ci} {/each} {/each} {/if}
{absIdx + 1}
{#if rowCount === 0 && meta.length > 0}

No rows returned

Your query executed successfully, but nothing matched the current filters.

{/if}
================================================ FILE: ui/src/lib/editor/completions.ts ================================================ import type { CompletionContext, CompletionResult, Completion } from '@codemirror/autocomplete' import { snippetCompletion } from '@codemirror/autocomplete' import { fetchCompletions, listColumns, listTables } from '../api/query' import { getDatabases, loadDatabases } from '../stores/schema.svelte' import type { Column } from '../types/schema' // ── Cached server completions ─────────────────────────────────── let cachedFunctions: string[] | null = null let cachedKeywords: string[] | null = null let fetchPromise: Promise | null = null // ── Cached schema metadata for autocomplete ───────────────────── let dbFetchPromise: Promise | null = null const tableCache = new Map() const columnCache = new Map() const tableFetches = new Map>() const columnFetches = new Map>() // ── Model name cache for $ref() autocomplete ───────────────────── let cachedModelNames: { name: string; materialization: string; target_database: string }[] | null = null let modelFetchPromise: Promise | null = null async function ensureModelsLoaded(): Promise { if (cachedModelNames !== null) return if (modelFetchPromise) { await modelFetchPromise; return } modelFetchPromise = (async () => { try { const { listModels } = await import('../api/models') const res = await listModels() cachedModelNames = (res.models ?? []).map((m: { name: string; materialization: string; target_database: string }) => ({ name: m.name, materialization: m.materialization, target_database: m.target_database, })) } catch { cachedModelNames = [] } finally { modelFetchPromise = null } })() await modelFetchPromise } /** Invalidate model name cache (call after create/delete/rename). */ export function refreshModelCache(): void { cachedModelNames = null } function detectRefContext(doc: string, pos: number): { inside: boolean; nameStart: number } { const before = doc.slice(Math.max(0, pos - 100), pos) const match = before.match(/\$ref\(\s*([\w]*)$/) if (!match) return { inside: false, nameStart: pos } return { inside: true, nameStart: pos - match[1].length } } function buildModelCompletions(): Completion[] { return (cachedModelNames ?? []).map((m) => ({ label: m.name, type: 'class', detail: `${m.materialization} \u2192 ${m.target_database}`, boost: 50, })) } type SqlContext = 'table' | 'column' | 'dot' | 'function' | 'default' interface TableRef { db: string table: string } function normalizeIdent(id: string): string { return id.replace(/[`"']/g, '').trim() } function parseTableRef(ref: string): TableRef | null { const clean = normalizeIdent(ref) if (!clean) return null const parts = clean.split('.') if (parts.length >= 2) { return { db: normalizeIdent(parts[0]), table: normalizeIdent(parts[1]), } } // Table without DB; resolve later using known databases. return { db: '', table: normalizeIdent(parts[0]), } } function tableKey(db: string, table: string): string { return `${db}.${table}` } async function ensureFunctionKeywordCache(): Promise { if (cachedFunctions !== null && cachedKeywords !== null) return if (fetchPromise) { await fetchPromise return } fetchPromise = fetchCompletions() .then(({ functions, keywords }) => { cachedFunctions = functions cachedKeywords = keywords }) .catch(() => { cachedFunctions = [] cachedKeywords = [] }) await fetchPromise } async function ensureDatabasesLoaded(): Promise { if (getDatabases().length > 0) return if (dbFetchPromise) { await dbFetchPromise return } dbFetchPromise = loadDatabases().catch(() => {}) await dbFetchPromise } async function ensureTablesCached(dbName: string): Promise { if (tableCache.has(dbName)) return const dbInStore = getDatabases().find((d) => d.name === dbName) if (dbInStore?.tables) { tableCache.set(dbName, dbInStore.tables.map((t) => t.name)) return } const existing = tableFetches.get(dbName) if (existing) { await existing return } const p = listTables(dbName) .then((tables) => { tableCache.set(dbName, tables) }) .catch(() => { tableCache.set(dbName, []) }) .finally(() => { tableFetches.delete(dbName) }) tableFetches.set(dbName, p) await p } async function ensureColumnsCached(dbName: string, tableName: string): Promise { const key = tableKey(dbName, tableName) if (columnCache.has(key)) return const dbInStore = getDatabases().find((d) => d.name === dbName) const tableInStore = dbInStore?.tables?.find((t) => t.name === tableName) if (tableInStore?.columns) { columnCache.set(key, tableInStore.columns) return } const existing = columnFetches.get(key) if (existing) { await existing return } const p = listColumns(dbName, tableName) .then((cols) => { columnCache.set(key, cols) }) .catch(() => { columnCache.set(key, []) }) .finally(() => { columnFetches.delete(key) }) columnFetches.set(key, p) await p } function detectContext(doc: string, pos: number): SqlContext { const before = doc.slice(Math.max(0, pos - 1000), pos) if (/([`"\w]+)\.([`"\w]*)$/i.test(before)) return 'dot' if (/\b(?:FROM|JOIN|INTO|UPDATE|TABLE|DATABASE)\s+[`"\w.]*$/i.test(before)) return 'table' if (/\b(?:SELECT|WHERE|ORDER\s+BY|GROUP\s+BY|HAVING|AND|OR|ON|USING|SET|WITH|BY)\s+[`"\w.]*$/i.test(before)) { return 'column' } if (/\b\w+\(\s*[\w.`"]*$/i.test(before)) return 'function' return 'default' } function buildAliasMap(doc: string): Map { const aliases = new Map() const regex = /(?:FROM|JOIN)\s+([`"\w]+(?:\.[`"\w]+)?)(?:\s+(?:AS\s+)?([`"\w]+))?/gi let match: RegExpExecArray | null while ((match = regex.exec(doc)) !== null) { const tableRef = normalizeIdent(match[1]) const alias = normalizeIdent(match[2] ?? '') if (alias) aliases.set(alias, tableRef) } return aliases } function buildReferencedTables(doc: string): string[] { const out = new Set() const regex = /(?:FROM|JOIN)\s+([`"\w]+(?:\.[`"\w]+)?)/gi let match: RegExpExecArray | null while ((match = regex.exec(doc)) !== null) { const tableRef = normalizeIdent(match[1]) if (tableRef) out.add(tableRef) } return [...out] } function knownDatabases(): string[] { const fromStore = getDatabases().map((d) => d.name) return [...new Set([...fromStore, ...tableCache.keys()])] } function findTablesForDatabase(dbName: string): string[] { const fromStore = getDatabases().find((d) => d.name === dbName)?.tables?.map((t) => t.name) ?? [] const fromCache = tableCache.get(dbName) ?? [] return [...new Set([...fromStore, ...fromCache])] } function findColumns(dbName: string, tableName: string): Column[] { const key = tableKey(dbName, tableName) const fromCache = columnCache.get(key) ?? [] if (fromCache.length > 0) return fromCache const fromStore = getDatabases().find((d) => d.name === dbName)?.tables?.find((t) => t.name === tableName)?.columns ?? [] return fromStore } function fuzzyScore(text: string, term: string): number { const t = text.toLowerCase() const q = term.toLowerCase().trim() if (!q) return 1 if (t === q) return 180 if (t.startsWith(q)) return 130 if (t.includes(`.${q}`)) return 110 if (t.includes(` ${q}`)) return 95 if (t.includes(q)) return 70 let qi = 0 let seqScore = 0 for (let i = 0; i < t.length && qi < q.length; i++) { if (t[i] === q[qi]) { seqScore += i > 0 && /[._\s]/.test(t[i - 1]) ? 8 : 3 qi++ } } return qi === q.length ? seqScore : -1 } function rankCompletions(items: Completion[], term: string): Completion[] { return items .map((item) => { const hay = `${item.label} ${item.detail ?? ''}` const score = fuzzyScore(hay, term) return { item, score: score + (item.boost ?? 0) } }) .filter((x) => x.score >= 0) .sort((a, b) => b.score - a.score) .slice(0, 300) .map((x) => x.item) } function dedupeCompletions(items: Completion[]): Completion[] { const seen = new Set() const out: Completion[] = [] for (const item of items) { const key = `${item.label}|${item.type}|${item.detail ?? ''}` if (seen.has(key)) continue seen.add(key) out.push(item) } return out } async function resolveUnqualifiedTableRefs(tableName: string): Promise { const refs: TableRef[] = [] for (const db of knownDatabases()) { await ensureTablesCached(db) if (findTablesForDatabase(db).includes(tableName)) { refs.push({ db, table: tableName }) } } return refs } async function buildDotCompletions(doc: string, beforeCursor: string): Promise { const match = beforeCursor.match(/([`"\w]+)\.([`"\w]*)$/) if (!match) return [] const lhs = normalizeIdent(match[1]) const aliasMap = buildAliasMap(doc) const aliasTableRef = aliasMap.get(lhs) // alias.column -> resolve alias table and columns if (aliasTableRef) { const parsed = parseTableRef(aliasTableRef) if (!parsed) return [] const refs: TableRef[] = parsed.db ? [parsed] : await resolveUnqualifiedTableRefs(parsed.table) const options: Completion[] = [] for (const ref of refs) { await ensureColumnsCached(ref.db, ref.table) for (const col of findColumns(ref.db, ref.table)) { options.push({ label: col.name, detail: `${col.type} (${ref.db}.${ref.table})`, type: 'property', boost: 22, }) } } return options } // db.table -> suggest table list const dbNames = knownDatabases() if (dbNames.includes(lhs)) { await ensureTablesCached(lhs) return findTablesForDatabase(lhs).map((t) => ({ label: t, detail: lhs, type: 'class', boost: 18, })) } // table.column (unqualified table name) -> resolve across dbs const refs = await resolveUnqualifiedTableRefs(lhs) const options: Completion[] = [] for (const ref of refs) { await ensureColumnsCached(ref.db, ref.table) for (const col of findColumns(ref.db, ref.table)) { options.push({ label: col.name, detail: `${col.type} (${ref.db}.${ref.table})`, type: 'property', boost: 16, }) } } return options } function buildDatabaseCompletions(): Completion[] { return knownDatabases().map((db) => ({ label: db, type: 'namespace', boost: 5, })) } async function buildTableCompletions(term: string): Promise { const options: Completion[] = [] const termClean = normalizeIdent(term) const dotIdx = termClean.indexOf('.') // If user typed "db." in FROM/JOIN context, prioritize tables of that DB if (dotIdx >= 0) { const dbName = termClean.slice(0, dotIdx) if (dbName) { await ensureTablesCached(dbName) for (const t of findTablesForDatabase(dbName)) { options.push({ label: t, detail: dbName, type: 'class', boost: 24, }) } return options } } for (const dbName of knownDatabases()) { await ensureTablesCached(dbName) for (const table of findTablesForDatabase(dbName)) { options.push({ label: `${dbName}.${table}`, type: 'class', boost: 16, }) options.push({ label: table, detail: dbName, type: 'class', boost: 10, }) } } return options } async function buildReferencedColumnCompletions(doc: string): Promise { const aliasMap = buildAliasMap(doc) const tableRefs = buildReferencedTables(doc) const options: Completion[] = [] // map tableRef -> aliases for alias-qualified completion const refToAliases = new Map() for (const [alias, ref] of aliasMap.entries()) { const list = refToAliases.get(ref) ?? [] list.push(alias) refToAliases.set(ref, list) } for (const refRaw of tableRefs) { const parsed = parseTableRef(refRaw) if (!parsed) continue const refs: TableRef[] = parsed.db ? [parsed] : await resolveUnqualifiedTableRefs(parsed.table) for (const ref of refs) { await ensureColumnsCached(ref.db, ref.table) const columns = findColumns(ref.db, ref.table) for (const col of columns) { options.push({ label: col.name, detail: `${col.type} (${ref.db}.${ref.table})`, type: 'property', boost: 20, }) // alias.column suggestions for JOIN/ON precision const aliases = refToAliases.get(refRaw) ?? [] for (const alias of aliases) { options.push({ label: `${alias}.${col.name}`, detail: `${col.type} (${ref.db}.${ref.table})`, type: 'property', boost: 24, }) } } } } return options } function buildFunctionCompletions(): Completion[] { return (cachedFunctions ?? []).map((fn) => ({ label: fn, type: 'function', boost: 7, detail: 'Function', })) } function buildKeywordCompletions(): Completion[] { return (cachedKeywords ?? []).map((kw) => ({ label: kw, type: 'keyword', boost: 4, })) } function buildSnippetCompletions(): Completion[] { return [ snippetCompletion('\\$ref(${model_name})', { label: '$ref()', type: 'snippet', detail: 'Reference another model', boost: 35, }), snippetCompletion('SELECT ${columns}\nFROM ${database}.${table}\nLIMIT ${1000}', { label: 'SELECT … FROM', type: 'snippet', detail: 'Query starter', boost: 30, }), snippetCompletion('SELECT ${a}.*, ${b}.*\nFROM ${table_a} ${a}\nJOIN ${table_b} ${b} ON ${a}.${id} = ${b}.${id}', { label: 'JOIN Template', type: 'snippet', detail: 'Join two tables', boost: 28, }), snippetCompletion('WITH ${cte_name} AS (\n ${query}\n)\nSELECT *\nFROM ${cte_name}', { label: 'CTE Template', type: 'snippet', detail: 'WITH clause', boost: 26, }), snippetCompletion('countIf(${condition})', { label: 'countIf()', type: 'snippet', detail: 'Conditional count', boost: 20, }), snippetCompletion('sumIf(${value}, ${condition})', { label: 'sumIf()', type: 'snippet', detail: 'Conditional sum', boost: 20, }), snippetCompletion('uniqExact(${value})', { label: 'uniqExact()', type: 'snippet', detail: 'Exact cardinality', boost: 18, }), snippetCompletion('toStartOfInterval(${timestamp}, INTERVAL ${5} minute)', { label: 'toStartOfInterval()', type: 'snippet', detail: 'Time bucket', boost: 18, }), ] } // ── Main completion source ────────────────────────────────────── export async function clickhouseCompletionSource( context: CompletionContext, ): Promise { const word = context.matchBefore(/[\w.`"]*/) if (!word) return null if (word.from === word.to && !context.explicit) return null // Warm caches in background so autocomplete stays responsive even when // metadata endpoints are slow or temporarily unavailable. void ensureFunctionKeywordCache() void ensureDatabasesLoaded() const doc = context.state.doc.toString() const beforeCursor = doc.slice(0, context.pos) // Check for $ref() context first — return only model names const refCtx = detectRefContext(doc, context.pos) if (refCtx.inside) { await ensureModelsLoaded() return { from: refCtx.nameStart, options: buildModelCompletions(), validFor: /^\w*$/, } } // Detect context at the cursor (not token start) so dot/function contexts are // classified correctly while the user is actively typing. const sqlCtx = detectContext(doc, context.pos) const term = word.text.replace(/[`"]/g, '') let options: Completion[] = [] switch (sqlCtx) { case 'dot': options = await buildDotCompletions(doc, beforeCursor) break case 'table': options = [ ...(await buildTableCompletions(term)), ...buildDatabaseCompletions(), ...buildKeywordCompletions().filter((k) => ['JOIN', 'ON', 'USING', 'WHERE'].includes(k.label)), ] break case 'column': options = [ ...(await buildReferencedColumnCompletions(doc)), ...buildFunctionCompletions(), ...buildSnippetCompletions(), ...buildKeywordCompletions(), ] break case 'function': options = [ ...buildFunctionCompletions(), ...(await buildReferencedColumnCompletions(doc)), ] break default: options = [ ...buildSnippetCompletions(), ...buildKeywordCompletions(), ...buildFunctionCompletions(), ...buildDatabaseCompletions(), ...(await buildTableCompletions(term)), ...(await buildReferencedColumnCompletions(doc)), ] break } const ranked = rankCompletions(dedupeCompletions(options), term) return { from: word.from, options: ranked, validFor: /^[\w.`"]*$/, } } ================================================ FILE: ui/src/lib/stores/command-palette.svelte.ts ================================================ let open = $state(false) export function isCommandPaletteOpen(): boolean { return open } export function openCommandPalette(): void { open = true } export function closeCommandPalette(): void { open = false } export function toggleCommandPalette(): void { open = !open } ================================================ FILE: ui/src/lib/stores/license.svelte.ts ================================================ import type { LicenseInfo } from '../types/api' import { apiGet } from '../api/client' let license = $state(null) let loading = $state(false) let loadPromise: Promise | null = null export function getLicense(): LicenseInfo | null { return license } export function isLicenseLoading(): boolean { return loading } export function isProActive(): boolean { return !!(license?.valid && license?.edition?.toLowerCase() === 'pro') } export async function loadLicense(force = false): Promise { if (!force && license) return if (loadPromise) { await loadPromise return } loading = true loadPromise = apiGet('/api/license') .then((res) => { license = res }) .catch(() => { license = null }) .finally(() => { loading = false loadPromise = null }) await loadPromise } ================================================ FILE: ui/src/lib/stores/number-format.svelte.ts ================================================ const STORAGE_KEY = 'ch-ui-format-numbers' const initial = localStorage.getItem(STORAGE_KEY) !== 'false' let formatNumbers = $state(initial) export function getFormatNumbers(): boolean { return formatNumbers } export function toggleFormatNumbers(): void { formatNumbers = !formatNumbers localStorage.setItem(STORAGE_KEY, String(formatNumbers)) } ================================================ FILE: ui/src/lib/stores/query-limit.svelte.ts ================================================ const STORAGE_KEY = 'ch-ui-max-result-rows' const DEFAULT_LIMIT = 1000 const stored = parseInt(localStorage.getItem(STORAGE_KEY) ?? '', 10) let maxResultRows = $state(isNaN(stored) || stored < 1 ? DEFAULT_LIMIT : stored) export function getMaxResultRows(): number { return maxResultRows } export function setMaxResultRows(value: number): void { const clamped = Math.max(1, Math.round(value)) maxResultRows = clamped localStorage.setItem(STORAGE_KEY, String(clamped)) } ================================================ FILE: ui/src/lib/stores/router.svelte.ts ================================================ import { withBase, stripBase } from '../basePath' import type { SingletonTab } from './tabs.svelte' import { getActiveTab, getTabs, openDashboardTab, openHomeTab, openSingletonTab, setActiveTab } from './tabs.svelte' // ── URL ↔ Tab mapping ──────────────────────────────────────────── const TAB_PATHS: Record = { 'home': '/', 'saved-queries': '/saved-queries', 'dashboards': '/dashboards', 'schedules': '/schedules', 'brain': '/brain', 'admin': '/admin', 'governance': '/governance', 'pipelines': '/pipelines', 'models': '/models', 'model': '/models', 'settings': '/license', } const PATH_TABS: Record = { '/saved-queries': { type: 'saved-queries', label: 'Saved Queries' }, '/dashboards': { type: 'dashboards', label: 'Dashboards' }, '/schedules': { type: 'schedules', label: 'Schedules' }, '/brain': { type: 'brain', label: 'Brain' }, '/admin': { type: 'admin', label: 'Admin' }, '/governance': { type: 'governance', label: 'Governance' }, '/pipelines': { type: 'pipelines', label: 'Pipelines' }, '/models': { type: 'models', label: 'Models' }, '/settings': { type: 'settings', label: 'License' }, '/license': { type: 'settings', label: 'License' }, } // Prevents pushState during popstate-triggered tab activation let suppressPush = false // ── Pipeline sub-route state ───────────────────────────────────── let pipelineId = $state(undefined) export function getCurrentPipelineId(): string | undefined { return pipelineId } // ── URL helpers ────────────────────────────────────────────────── function buildUrl(path: string, tabId?: string): string { const fullPath = withBase(path) if (tabId) return `${fullPath}?tab=${tabId}` return fullPath } function currentTabParam(): string | null { return new URLSearchParams(window.location.search).get('tab') } function pushUrl(path: string, tabId?: string): void { const url = buildUrl(path, tabId) const currentPath = window.location.pathname if (currentPath !== withBase(path)) { history.pushState(null, '', url) } else if (currentTabParam() !== tabId) { history.replaceState(null, '', url) } } // ── Push helpers ───────────────────────────────────────────────── export function pushTabRoute(tabType: string): void { if (suppressPush) return const path = TAB_PATHS[tabType] ?? '/' const activeTab = getActiveTab() pushUrl(path, activeTab?.id) } export function pushTabRouteForTab(tab: { id: string; type: string; dashboardId?: string }): void { if (suppressPush) return if (tab.type === 'dashboard' && tab.dashboardId) { pushUrl(`/dashboards/${tab.dashboardId}`, tab.id) return } const path = TAB_PATHS[tab.type] ?? '/' pushUrl(path, tab.id) } export function pushDashboardDetail(id: string): void { if (suppressPush) return const dashTab = getTabs().find(t => t.type === 'dashboard' && 'dashboardId' in t && t.dashboardId === id) pushUrl('/dashboards/' + id, dashTab?.id) } export function pushDashboardList(): void { if (suppressPush) return const tab = getTabs().find(t => t.type === 'dashboards') pushUrl('/dashboards', tab?.id) } export function pushPipelineDetail(id: string): void { if (suppressPush) return const tab = getTabs().find(t => t.type === 'pipelines') pushUrl('/pipelines/' + id, tab?.id) pipelineId = id } export function pushPipelineList(): void { if (suppressPush) return const tab = getTabs().find(t => t.type === 'pipelines') pushUrl('/pipelines', tab?.id) pipelineId = undefined } // ── Parse current URL ─────────────────────────────────────────── export function parseRoute(): { type: string; dashboardId?: string; pipelineId?: string } { const path = stripBase(window.location.pathname) // /dashboards/:id const dashMatch = path.match(/^\/dashboards\/(.+)$/) if (dashMatch) { return { type: 'dashboard', dashboardId: dashMatch[1] } } // /pipelines/:id const pipeMatch = path.match(/^\/pipelines\/(.+)$/) if (pipeMatch) { return { type: 'pipelines', pipelineId: pipeMatch[1] } } // Known singleton paths const entry = PATH_TABS[path] if (entry) { return { type: entry.type } } // Default: home (query editor) return { type: 'home' } } // ── Restore from ?tab= query param ───────────────────────────── function tryRestoreFromTabParam(): boolean { const tabId = currentTabParam() if (!tabId) return false const tab = getTabs().find(t => t.id === tabId) if (!tab) return false suppressPush = true setActiveTab(tabId) suppressPush = false return true } function updateSubRouteState(): void { const match = stripBase(window.location.pathname).match(/^\/pipelines\/(.+)$/) pipelineId = match?.[1] } // ── Sync URL → tab state ──────────────────────────────────────── function syncRouteToTabs(): void { const route = parseRoute() // Update pipeline sub-route state pipelineId = route.pipelineId if (route.type === 'home') { openHomeTab() return } if (route.type === 'dashboard' && route.dashboardId) { suppressPush = true openDashboardTab(route.dashboardId, 'Dashboard') suppressPush = false return } if (route.type === 'pipelines') { suppressPush = true openSingletonTab('pipelines', 'Pipelines') suppressPush = false return } const entry = PATH_TABS[TAB_PATHS[route.type]] if (entry) { suppressPush = true openSingletonTab(entry.type, entry.label) suppressPush = false } } // ── Initialize ────────────────────────────────────────────────── let initialized = false export function initRouter(): void { if (initialized) return initialized = true // On initial load, try ?tab= param first (survives reload reliably) if (!tryRestoreFromTabParam()) { // Fallback: sync from URL pathname syncRouteToTabs() } updateSubRouteState() // Seed ?tab= if missing so a subsequent reload works const activeTab = getActiveTab() if (activeTab && !currentTabParam()) { const url = buildUrl(stripBase(window.location.pathname), activeTab.id) history.replaceState(null, '', url) } // Handle browser back/forward window.addEventListener('popstate', () => { if (!tryRestoreFromTabParam()) { syncRouteToTabs() } updateSubRouteState() }) } ================================================ FILE: ui/src/lib/stores/schema.svelte.ts ================================================ import type { Database, Table, Column } from '../types/schema' import { apiGet } from '../api/client' let databases = $state([]) let loading = $state(false) export function getDatabases(): Database[] { return databases } export function isSchemaLoading(): boolean { return loading } export async function loadDatabases(): Promise { loading = true try { const res = await apiGet<{ databases: string[] }>('/api/query/databases') databases = (res.databases ?? []).map(name => ({ name })) } catch { databases = [] } finally { loading = false } } export async function loadTables(dbName: string): Promise { databases = databases.map(db => { if (db.name !== dbName) return db return { ...db, loading: true, expanded: true } }) try { const res = await apiGet<{ tables: Array<{ name: string; engine: string }> }>(`/api/query/tables?database=${encodeURIComponent(dbName)}`) const tables: Table[] = (res.tables ?? []).map(t => ({ name: t.name, engine: t.engine })) databases = databases.map(db => { if (db.name !== dbName) return db return { ...db, tables, loading: false } }) } catch { databases = databases.map(db => { if (db.name !== dbName) return db return { ...db, loading: false } }) } } export async function loadColumns(dbName: string, tableName: string): Promise { databases = databases.map(db => { if (db.name !== dbName) return db return { ...db, tables: db.tables?.map(t => { if (t.name !== tableName) return t return { ...t, loading: true, expanded: true } }), } }) try { const res = await apiGet<{ columns: Column[] }>(`/api/query/columns?database=${encodeURIComponent(dbName)}&table=${encodeURIComponent(tableName)}`) const columns: Column[] = res.columns ?? [] databases = databases.map(db => { if (db.name !== dbName) return db return { ...db, tables: db.tables?.map(t => { if (t.name !== tableName) return t return { ...t, columns, loading: false } }), } }) } catch { databases = databases.map(db => { if (db.name !== dbName) return db return { ...db, tables: db.tables?.map(t => { if (t.name !== tableName) return t return { ...t, loading: false } }), } }) } } export function toggleDatabase(dbName: string): void { const db = databases.find(d => d.name === dbName) if (!db) return if (db.expanded) { databases = databases.map(d => d.name === dbName ? { ...d, expanded: false } : d) } else { loadTables(dbName) } } export function toggleTable(dbName: string, tableName: string): void { const db = databases.find(d => d.name === dbName) const table = db?.tables?.find(t => t.name === tableName) if (!table) return if (table.expanded) { databases = databases.map(d => { if (d.name !== dbName) return d return { ...d, tables: d.tables?.map(t => t.name === tableName ? { ...t, expanded: false } : t), } }) } else { loadColumns(dbName, tableName) } } ================================================ FILE: ui/src/lib/stores/session.svelte.ts ================================================ import type { Session } from '../types/api' import { checkSession, login as apiLogin, logout as apiLogout } from '../api/auth' let session = $state(null) let loading = $state(true) let error = $state(null) export function getSession(): Session | null { return session } export function isLoading(): boolean { return loading } export function getError(): string | null { return error } export function isAuthenticated(): boolean { return session !== null } /** Initialize session from server cookie */ export async function initSession(): Promise { loading = true error = null try { session = await checkSession() } catch (e) { session = null } finally { loading = false } } /** Log in and set session */ export async function login(connectionId: string, username: string, password: string): Promise { error = null loading = true try { const res = await apiLogin({ connectionId, username, password }) session = res.session } catch (e: any) { error = e.message || 'Login failed' throw e } finally { loading = false } } /** Log out and clear session */ export async function logout(): Promise { try { await apiLogout() } finally { session = null } } ================================================ FILE: ui/src/lib/stores/tabs.svelte.ts ================================================ import type { ColumnMeta, QueryStats } from '../types/query' import type { ModelEditState } from '../types/models' import { createUUID } from '../utils/uuid' import { pushTabRouteForTab } from './router.svelte' // ── Tab types ──────────────────────────────────────────────────── export type TabType = 'home' | 'query' | 'table' | 'database' | 'dashboard' | 'model' | 'saved-queries' | 'settings' | 'dashboards' | 'schedules' | 'brain' | 'admin' | 'governance' | 'pipelines' | 'models' interface TabBase { id: string type: TabType name: string } export interface QueryTab extends TabBase { type: 'query' sql: string dirty: boolean savedQueryId?: string baseSql?: string } export interface TableTab extends TabBase { type: 'table' database: string table: string } export interface DatabaseTab extends TabBase { type: 'database' database: string } export interface DashboardTab extends TabBase { type: 'dashboard' dashboardId: string } export interface ModelTab extends TabBase { type: 'model' modelId: string dirty: boolean edit: ModelEditState base: ModelEditState status: string lastError: string | null } export interface HomeTab extends TabBase { type: 'home' } export interface SingletonTab extends TabBase { type: 'saved-queries' | 'settings' | 'dashboards' | 'schedules' | 'brain' | 'admin' | 'governance' | 'pipelines' | 'models' } export type Tab = HomeTab | QueryTab | TableTab | DatabaseTab | DashboardTab | ModelTab | SingletonTab // ── Tab Groups (split view) ───────────────────────────────────── export interface TabGroup { id: string // 'left' | 'right' tabIds: string[] // ordered tab IDs in this group activeTabId: string } // ── Per-tab query results ──────────────────────────────────────── export interface TabResult { meta: ColumnMeta[] data: unknown[][] stats: QueryStats | null elapsedMs: number error: string | null running: boolean } // ── Persistence ───────────────────────────────────────────────── const STORAGE_KEY = 'ch-ui-tabs' const HOME_TAB_ID = 'home' const HOME_TAB_NAME = 'Home' interface StorageFormat { tabs: Tab[] groups: TabGroup[] focusedGroupId: string } function saveTabs(): void { try { localStorage.setItem(STORAGE_KEY, JSON.stringify({ tabs, groups, focusedGroupId, })) } catch { // localStorage full or unavailable } } function loadTabs(): StorageFormat { try { const raw = localStorage.getItem(STORAGE_KEY) if (raw) { const parsed = JSON.parse(raw) if (Array.isArray(parsed.tabs) && parsed.tabs.length > 0) { // Derive nextNum from existing query tab names for (const t of parsed.tabs) { if (t.type === 'query') { const match = t.name.match(/^Query (\d+)$/) if (match) { const n = parseInt(match[1], 10) if (n >= nextNum) nextNum = n + 1 } } } // New format: has groups array if (Array.isArray(parsed.groups) && parsed.groups.length > 0) { return normalizeTabsState({ tabs: parsed.tabs, groups: parsed.groups, focusedGroupId: parsed.focusedGroupId || 'left', }) } // Legacy migration: old format had { tabs, activeTabId } const activeId = parsed.activeTabId || parsed.tabs[0].id return normalizeTabsState({ tabs: parsed.tabs, groups: [{ id: 'left', tabIds: parsed.tabs.map((t: Tab) => t.id), activeTabId: activeId }], focusedGroupId: 'left', }) } } } catch { // corrupt data } const homeTab = createHomeTab() return normalizeTabsState({ tabs: [homeTab], groups: [{ id: 'left', tabIds: [homeTab.id], activeTabId: homeTab.id }], focusedGroupId: 'left', }) } // ── State ──────────────────────────────────────────────────────── let nextNum = 1 interface CreateQueryTabOptions { name?: string savedQueryId?: string baseSql?: string } function createQueryTab(sql = '', options: CreateQueryTabOptions = {}): QueryTab { const id = createUUID() const name = options.name?.trim() ? options.name.trim() : `Query ${nextNum++}` return { id, type: 'query', name, sql, dirty: false, savedQueryId: options.savedQueryId, baseSql: options.baseSql ?? sql, } } function createHomeTab(): HomeTab { return { id: HOME_TAB_ID, type: 'home', name: HOME_TAB_NAME, } } function normalizeTabsState(state: StorageFormat): StorageFormat { const existingHome = state.tabs.find((tab) => tab.type === 'home') as HomeTab | undefined const homeTab = existingHome ?? createHomeTab() const nonHomeTabs = state.tabs.filter((tab) => tab.type !== 'home' && tab.id !== HOME_TAB_ID) const normalizedTabs: Tab[] = [homeTab, ...nonHomeTabs] const tabIdSet = new Set(normalizedTabs.map((tab) => tab.id)) const incomingGroups = state.groups.length > 0 ? state.groups.map((group) => ({ ...group })) : [{ id: 'left', tabIds: [], activeTabId: homeTab.id }] if (!incomingGroups.some((group) => group.id === 'left')) { incomingGroups[0] = { ...incomingGroups[0], id: 'left' } } if (incomingGroups.length > 2) { incomingGroups.splice(2) } if (incomingGroups.length === 2) { incomingGroups[1] = { ...incomingGroups[1], id: 'right' } } const normalizedGroups = incomingGroups.map((group) => { const seen = new Set() const ids = group.tabIds.filter((tabId) => { if (!tabIdSet.has(tabId) || tabId === homeTab.id || seen.has(tabId)) return false seen.add(tabId) return true }) return { ...group, tabIds: ids } }) const leftGroup = normalizedGroups.find((group) => group.id === 'left') ?? normalizedGroups[0] if (!leftGroup) { normalizedGroups.push({ id: 'left', tabIds: [homeTab.id], activeTabId: homeTab.id }) } else { leftGroup.tabIds = [homeTab.id, ...leftGroup.tabIds] } const assignedTabIds = new Set(normalizedGroups.flatMap((group) => group.tabIds)) for (const tab of nonHomeTabs) { if (!assignedTabIds.has(tab.id)) { const target = normalizedGroups.find((group) => group.id === 'left') ?? normalizedGroups[0] target.tabIds.push(tab.id) assignedTabIds.add(tab.id) } } const hydratedGroups = normalizedGroups .filter((group) => group.id === 'left' || group.tabIds.length > 0) .map((group) => { const activeTabId = group.tabIds.includes(group.activeTabId) ? group.activeTabId : (group.tabIds[0] ?? homeTab.id) return { ...group, activeTabId } }) const groups = hydratedGroups.length > 0 ? hydratedGroups : [{ id: 'left', tabIds: [homeTab.id], activeTabId: homeTab.id }] const focusedGroupId = groups.some((group) => group.id === state.focusedGroupId) ? state.focusedGroupId : 'left' return { tabs: normalizedTabs, groups, focusedGroupId, } } const initial = loadTabs() let tabs = $state(initial.tabs.map((tab) => { if (tab.type !== 'query') return tab const queryTab = tab as QueryTab return { ...queryTab, baseSql: typeof queryTab.baseSql === 'string' ? queryTab.baseSql : (queryTab.dirty ? '' : queryTab.sql), } })) let groups = $state(initial.groups) let focusedGroupId = $state(initial.focusedGroupId) let results = $state>(new Map()) // Auto-save on any change (debounced via microtask) let saveQueued = false function queueSave(): void { if (saveQueued) return saveQueued = true queueMicrotask(() => { saveTabs() saveQueued = false }) } // ── Internal helpers ──────────────────────────────────────────── function findGroupForTab(tabId: string): string | undefined { return groups.find(g => g.tabIds.includes(tabId))?.id } function isHomeTabId(tabId: string): boolean { const tab = tabs.find((entry) => entry.id === tabId) return !!tab && tab.type === 'home' } function resolveTargetGroupId(targetGroupId?: string): string { const candidate = targetGroupId ?? focusedGroupId if (groups.some((group) => group.id === candidate)) return candidate return groups[0]?.id ?? 'left' } // ── Getters ───────────────────────────────────────────────────── export function getTabs(): Tab[] { return tabs } /** Backward-compat: returns focused group's active tab ID */ export function getActiveTabId(): string { const group = groups.find(g => g.id === focusedGroupId) ?? groups[0] return group?.activeTabId ?? '' } /** Backward-compat: returns focused group's active tab */ export function getActiveTab(): Tab | undefined { return tabs.find(t => t.id === getActiveTabId()) } // ── Group getters ─────────────────────────────────────────────── export function getGroups(): TabGroup[] { return groups } export function getFocusedGroupId(): string { return focusedGroupId } export function isSplit(): boolean { return groups.length === 2 } export function getGroupTabs(groupId: string): Tab[] { const group = groups.find(g => g.id === groupId) if (!group) return [] return group.tabIds.map(id => tabs.find(t => t.id === id)).filter(Boolean) as Tab[] } export function getGroupActiveTab(groupId: string): Tab | undefined { const group = groups.find(g => g.id === groupId) if (!group) return undefined return tabs.find(t => t.id === group.activeTabId) } export function getGroupActiveTabId(groupId: string): string { const group = groups.find(g => g.id === groupId) return group?.activeTabId ?? '' } // ── Tab result accessors ───────────────────────────────────────── export function getTabResult(tabId: string): TabResult | undefined { return results.get(tabId) } export function setTabResult(tabId: string, partial: Partial): void { const current = results.get(tabId) ?? { meta: [], data: [], stats: null, elapsedMs: 0, error: null, running: false, } const updated = new Map(results) updated.set(tabId, { ...current, ...partial }) results = updated } export function clearTabResult(tabId: string): void { const updated = new Map(results) updated.delete(tabId) results = updated } // ── Actions ───────────────────────────────────────────────────── export function setActiveTab(id: string, groupId?: string): void { const gid = groupId ?? findGroupForTab(id) ?? focusedGroupId groups = groups.map(g => g.id === gid ? { ...g, activeTabId: id } : g ) focusedGroupId = gid // Sync URL to match the activated tab const tab = tabs.find(t => t.id === id) if (tab) pushTabRouteForTab(tab) queueSave() } export function setFocusedGroup(groupId: string): void { focusedGroupId = groupId } // ── Open tabs (with deduplication) ─────────────────────────────── export function openHomeTab(): void { const homeTab = tabs.find((tab) => tab.type === 'home') as HomeTab | undefined if (homeTab) { setActiveTab(homeTab.id, 'left') return } const tab = createHomeTab() tabs = [tab, ...tabs] const leftGroup = groups.find((group) => group.id === 'left') if (leftGroup) { groups = groups.map((group) => group.id === 'left' ? { ...group, tabIds: [tab.id, ...group.tabIds], activeTabId: tab.id } : group, ) } else { groups = [{ id: 'left', tabIds: [tab.id], activeTabId: tab.id }, ...groups] } focusedGroupId = 'left' pushTabRouteForTab(tab) queueSave() } export function openQueryTab(sql = '', targetGroupId?: string): void { const tab = createQueryTab(sql) tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g ) focusedGroupId = gid queueSave() } interface SavedQueryTabInput { id: string name: string query: string } export function openSavedQueryTab(savedQuery: SavedQueryTabInput, targetGroupId?: string): void { const existing = tabs.find( (tab) => tab.type === 'query' && (tab as QueryTab).savedQueryId === savedQuery.id, ) as QueryTab | undefined if (existing) { tabs = tabs.map((tab) => { if (tab.id !== existing.id || tab.type !== 'query') return tab if (tab.dirty) return tab return { ...tab, name: savedQuery.name, sql: savedQuery.query, baseSql: savedQuery.query, dirty: false, } }) setActiveTab(existing.id) return } const tab = createQueryTab(savedQuery.query, { name: savedQuery.name, savedQueryId: savedQuery.id, baseSql: savedQuery.query, }) tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g, ) focusedGroupId = gid queueSave() } export function openTableTab(database: string, table: string, targetGroupId?: string): void { const existing = tabs.find( t => t.type === 'table' && t.database === database && t.table === table ) as TableTab | undefined if (existing) { setActiveTab(existing.id) return } const tab: TableTab = { id: createUUID(), type: 'table', name: `${database}.${table}`, database, table, } tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g ) focusedGroupId = gid queueSave() } export function openDatabaseTab(database: string, targetGroupId?: string): void { const existing = tabs.find( t => t.type === 'database' && t.database === database, ) as DatabaseTab | undefined if (existing) { setActiveTab(existing.id) return } const tab: DatabaseTab = { id: createUUID(), type: 'database', name: database, database, } tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g, ) focusedGroupId = gid queueSave() } export function openDashboardTab(dashboardId: string, name = 'Dashboard', targetGroupId?: string): void { const existing = tabs.find( t => t.type === 'dashboard' && t.dashboardId === dashboardId, ) as DashboardTab | undefined if (existing) { setActiveTab(existing.id) return } const tab: DashboardTab = { id: createUUID(), type: 'dashboard', name, dashboardId, } tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g, ) focusedGroupId = gid pushTabRouteForTab(tab) queueSave() } export function openSingletonTab(type: SingletonTab['type'], name: string, targetGroupId?: string): void { const existing = tabs.find(t => t.type === type) if (existing) { if (existing.name !== name) { tabs = tabs.map((tab) => (tab.id === existing.id ? { ...tab, name } : tab)) queueSave() } setActiveTab(existing.id) return } const tab: SingletonTab = { id: createUUID(), type, name, } tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g ) focusedGroupId = gid pushTabRouteForTab(tab) queueSave() } // ── Close / update ─────────────────────────────────────────────── export function closeTab(id: string): void { if (isHomeTabId(id)) return const groupId = findGroupForTab(id) if (!groupId) return // Remove from group groups = groups.map(g => { if (g.id !== groupId) return g const newTabIds = g.tabIds.filter(tid => tid !== id) let newActiveId = g.activeTabId if (g.activeTabId === id) { const idx = g.tabIds.indexOf(id) const newIdx = Math.min(idx, newTabIds.length - 1) newActiveId = newTabIds[newIdx] ?? '' } return { ...g, tabIds: newTabIds, activeTabId: newActiveId } }) // Remove tab data tabs = tabs.filter(t => t.id !== id) clearTabResult(id) // Collapse empty group const emptyGroup = groups.find(g => g.tabIds.length === 0) if (emptyGroup) { groups = groups.filter(g => g.tabIds.length > 0) if (groups.length === 0) { // Keep the pinned Home tab alive. const homeTab = createHomeTab() tabs = [homeTab] groups = [{ id: 'left', tabIds: [homeTab.id], activeTabId: homeTab.id }] } focusedGroupId = groups[0].id } // If all tabs gone from last group, ensure Home exists. if (tabs.length === 0) { const homeTab = createHomeTab() tabs = [homeTab] groups = [{ id: 'left', tabIds: [homeTab.id], activeTabId: homeTab.id }] focusedGroupId = 'left' } const normalized = normalizeTabsState({ tabs, groups, focusedGroupId }) tabs = normalized.tabs groups = normalized.groups focusedGroupId = normalized.focusedGroupId queueSave() } export function updateTabSQL(id: string, sql: string): void { tabs = tabs.map((tab) => { if (tab.id !== id || tab.type !== 'query') return tab const baseSql = typeof tab.baseSql === 'string' ? tab.baseSql : '' return { ...tab, sql, dirty: sql !== baseSql } }) queueSave() } export function renameTab(id: string, name: string): void { if (isHomeTabId(id)) return tabs = tabs.map(t => (t.id === id ? { ...t, name } : t)) queueSave() } export function markQueryTabSaved(id: string, options: { savedQueryId?: string; name?: string; baseSql?: string } = {}): void { tabs = tabs.map((tab) => { if (tab.id !== id || tab.type !== 'query') return tab const name = options.name?.trim() ? options.name.trim() : tab.name const baseSql = options.baseSql ?? tab.sql return { ...tab, name, savedQueryId: options.savedQueryId ?? tab.savedQueryId, baseSql, dirty: false, } }) queueSave() } export function isTabDirty(id: string): boolean { const tab = tabs.find((entry) => entry.id === id) if (!tab) return false if (tab.type === 'query') return !!(tab as QueryTab).dirty if (tab.type === 'model') return !!(tab as ModelTab).dirty return false } // ── Reorder (within a group) ───────────────────────────────────── export function reorderTab(groupId: string, fromIndex: number, toIndex: number): void { if (fromIndex === toIndex) return groups = groups.map(g => { if (g.id !== groupId) return g const updated = [...g.tabIds] const [moved] = updated.splice(fromIndex, 1) if (!moved || isHomeTabId(moved)) return g updated.splice(toIndex, 0, moved) if (g.id === 'left') { const homeIndex = updated.findIndex((tabId) => isHomeTabId(tabId)) if (homeIndex > 0) { const [homeId] = updated.splice(homeIndex, 1) updated.unshift(homeId) } } return { ...g, tabIds: updated } }) queueSave() } // ── Split / move / unsplit ─────────────────────────────────────── export function splitTabToSide(tabId: string, side: 'left' | 'right'): void { if (isHomeTabId(tabId)) return if (groups.length >= 2) { moveTabToGroup(tabId, side) return } const sourceGroup = groups.find(g => g.tabIds.includes(tabId)) if (!sourceGroup || sourceGroup.tabIds.length <= 1) return const remainingTabIds = sourceGroup.tabIds.filter(id => id !== tabId) const idx = sourceGroup.tabIds.indexOf(tabId) const remainingActive = sourceGroup.activeTabId === tabId ? remainingTabIds[Math.min(idx, remainingTabIds.length - 1)] ?? remainingTabIds[0] ?? '' : sourceGroup.activeTabId if (side === 'right') { groups = [ { id: 'left', tabIds: remainingTabIds, activeTabId: remainingActive }, { id: 'right', tabIds: [tabId], activeTabId: tabId }, ] focusedGroupId = 'right' } else { groups = [ { id: 'left', tabIds: [tabId], activeTabId: tabId }, { id: 'right', tabIds: remainingTabIds, activeTabId: remainingActive }, ] focusedGroupId = 'left' } const normalized = normalizeTabsState({ tabs, groups, focusedGroupId }) tabs = normalized.tabs groups = normalized.groups focusedGroupId = normalized.focusedGroupId queueSave() } export function splitTab(tabId: string): void { if (isHomeTabId(tabId)) return if (groups.length >= 2) { // Already split — move to other group const sourceGroupId = findGroupForTab(tabId) const targetGroupId = sourceGroupId === 'left' ? 'right' : 'left' moveTabToGroup(tabId, targetGroupId) return } splitTabToSide(tabId, 'right') } export function moveTabToGroup(tabId: string, targetGroupId: string): void { if (isHomeTabId(tabId)) return const sourceGroupId = findGroupForTab(tabId) if (!sourceGroupId || sourceGroupId === targetGroupId) return // If target group doesn't exist yet, create it (this enables cross-group drag to create split) if (!groups.find(g => g.id === targetGroupId)) { const sourceGroup = groups.find(g => g.id === sourceGroupId) if (!sourceGroup || sourceGroup.tabIds.length <= 1) return // Create split splitTab(tabId) return } groups = groups.map(g => { if (g.id === sourceGroupId) { const newTabIds = g.tabIds.filter(id => id !== tabId) const idx = g.tabIds.indexOf(tabId) const newActive = g.activeTabId === tabId ? (newTabIds[Math.min(idx, newTabIds.length - 1)] ?? newTabIds[0] ?? '') : g.activeTabId return { ...g, tabIds: newTabIds, activeTabId: newActive } } if (g.id === targetGroupId) { return { ...g, tabIds: [...g.tabIds, tabId], activeTabId: tabId } } return g }) // Collapse empty groups const emptyGroup = groups.find(g => g.tabIds.length === 0) if (emptyGroup) { groups = groups.filter(g => g.tabIds.length > 0) focusedGroupId = groups[0]?.id ?? 'left' } else { focusedGroupId = targetGroupId } const normalized = normalizeTabsState({ tabs, groups, focusedGroupId }) tabs = normalized.tabs groups = normalized.groups focusedGroupId = normalized.focusedGroupId queueSave() } export function unsplit(): void { const allTabIds = groups.flatMap(g => g.tabIds) const homeId = tabs.find((tab) => tab.type === 'home')?.id ?? HOME_TAB_ID const ordered = [homeId, ...allTabIds.filter((id) => id !== homeId)] const preferredActive = groups.find(g => g.id === focusedGroupId)?.activeTabId ?? ordered[0] const activeId = preferredActive === homeId ? homeId : (ordered.includes(preferredActive) ? preferredActive : homeId) groups = [{ id: 'left', tabIds: ordered, activeTabId: activeId }] focusedGroupId = 'left' queueSave() } // ── Model tabs ────────────────────────────────────────────────── function modelEditEqual(a: ModelEditState, b: ModelEditState): boolean { return a.modelName === b.modelName && a.description === b.description && a.targetDatabase === b.targetDatabase && a.materialization === b.materialization && a.sqlBody === b.sqlBody && a.tableEngine === b.tableEngine && a.orderBy === b.orderBy } interface ModelTabInput { id: string name: string description: string target_database: string materialization: string sql_body: string table_engine: string order_by: string status: string last_error: string | null } export function openModelTab(model: ModelTabInput, targetGroupId?: string): void { const existing = tabs.find(t => t.type === 'model' && (t as ModelTab).modelId === model.id) as ModelTab | undefined if (existing) { setActiveTab(existing.id) return } const editState: ModelEditState = { modelName: model.name, description: model.description, targetDatabase: model.target_database, materialization: model.materialization, sqlBody: model.sql_body, tableEngine: model.table_engine, orderBy: model.order_by, } const tab: ModelTab = { id: createUUID(), type: 'model', name: model.name, modelId: model.id, dirty: false, edit: { ...editState }, base: { ...editState }, status: model.status, lastError: model.last_error ?? null, } tabs = [...tabs, tab] const gid = resolveTargetGroupId(targetGroupId) groups = groups.map(g => g.id === gid ? { ...g, tabIds: [...g.tabIds, tab.id], activeTabId: tab.id } : g ) focusedGroupId = gid pushTabRouteForTab(tab) queueSave() } export function updateModelTabEdit(tabId: string, partial: Partial): void { tabs = tabs.map(tab => { if (tab.id !== tabId || tab.type !== 'model') return tab const modelTab = tab as ModelTab const edit = { ...modelTab.edit, ...partial } const dirty = !modelEditEqual(edit, modelTab.base) const name = edit.modelName || modelTab.name return { ...modelTab, edit, dirty, name } }) queueSave() } export function markModelTabSaved(tabId: string, model: { name: string; status: string; last_error: string | null }): void { tabs = tabs.map(tab => { if (tab.id !== tabId || tab.type !== 'model') return tab const modelTab = tab as ModelTab return { ...modelTab, base: { ...modelTab.edit }, dirty: false, name: model.name, status: model.status, lastError: model.last_error } }) queueSave() } export function updateModelTabStatus(tabId: string, status: string, lastError: string | null): void { tabs = tabs.map(tab => { if (tab.id !== tabId || tab.type !== 'model') return tab return { ...tab, status, lastError } }) queueSave() } // Legacy alias export const addTab = openQueryTab ================================================ FILE: ui/src/lib/stores/theme.svelte.ts ================================================ type Theme = 'dark' | 'light' const initial = (localStorage.getItem('ch-ui-theme') as Theme) || 'dark' let theme = $state(initial) applyTheme(initial) export function getTheme(): Theme { return theme } export function toggleTheme(): void { theme = theme === 'dark' ? 'light' : 'dark' localStorage.setItem('ch-ui-theme', theme) applyTheme(theme) } function applyTheme(t: Theme): void { if (t === 'dark') { document.documentElement.classList.add('dark') } else { document.documentElement.classList.remove('dark') } } ================================================ FILE: ui/src/lib/stores/toast.svelte.ts ================================================ import { toast } from 'svelte-sonner' import type { ExternalToast } from 'svelte-sonner' export type ToastType = 'info' | 'success' | 'error' | 'warning' const DEFAULT_DURATION: Record = { success: 3600, info: 4400, warning: 5600, error: 7000, } function normalizeMessage(message: string): string { return (message ?? '').trim() } function resolveToastOptions( type: ToastType, optionsOrDuration?: number | ExternalToast, ): ExternalToast { if (typeof optionsOrDuration === 'number') { return { duration: Math.max(0, optionsOrDuration) } } return { duration: optionsOrDuration?.duration ?? DEFAULT_DURATION[type], ...optionsOrDuration, } } export function addToast(message: string, type: ToastType = 'info', duration?: number): void { switch (type) { case 'success': success(message, duration) return case 'error': error(message, duration) return case 'warning': warning(message, duration) return case 'info': default: info(message, duration) return } } export function removeToast(id: number | string): void { toast.dismiss(id) } export function dismiss(id?: number | string): void { toast.dismiss(id) } export function getToasts() { return toast.getActiveToasts() } export function success(message: string, optionsOrDuration?: number | ExternalToast): void { const cleanMessage = normalizeMessage(message) if (!cleanMessage) return toast.success(cleanMessage, resolveToastOptions('success', optionsOrDuration)) } export function error(message: string, optionsOrDuration?: number | ExternalToast): void { const cleanMessage = normalizeMessage(message) if (!cleanMessage) return toast.error(cleanMessage, resolveToastOptions('error', optionsOrDuration)) } export function warning(message: string, optionsOrDuration?: number | ExternalToast): void { const cleanMessage = normalizeMessage(message) if (!cleanMessage) return toast.warning(cleanMessage, resolveToastOptions('warning', optionsOrDuration)) } export function info(message: string, optionsOrDuration?: number | ExternalToast): void { const cleanMessage = normalizeMessage(message) if (!cleanMessage) return toast.info(cleanMessage, resolveToastOptions('info', optionsOrDuration)) } ================================================ FILE: ui/src/lib/types/alerts.ts ================================================ export type AlertChannelType = 'smtp' | 'resend' | 'brevo' export type AlertSeverity = 'info' | 'warn' | 'error' | 'critical' export type AlertEventType = 'policy.violation' | 'schedule.failed' | 'schedule.slow' | '*' export interface AlertChannel { id: string name: string channel_type: AlertChannelType is_active: boolean created_by?: string | null created_at: string updated_at: string config: Record has_secret: boolean } export interface AlertRuleRoute { id: string rule_id: string channel_id: string channel_name: string channel_type: AlertChannelType recipients: string[] is_active: boolean delivery_mode: 'immediate' | 'digest' | string digest_window_minutes: number escalation_channel_id?: string | null escalation_channel_name?: string | null escalation_channel_type?: AlertChannelType | string | null escalation_recipients: string[] escalation_after_failures: number created_at: string updated_at: string } export interface AlertRule { id: string name: string event_type: AlertEventType | string severity_min: AlertSeverity enabled: boolean cooldown_seconds: number max_attempts: number subject_template?: string | null body_template?: string | null created_by?: string | null created_at: string updated_at: string routes: AlertRuleRoute[] } export interface AlertEvent { id: string connection_id?: string | null event_type: string severity: AlertSeverity | string title: string message: string payload_json?: string | null fingerprint?: string | null source_ref?: string | null status: string created_at: string processed_at?: string | null } ================================================ FILE: ui/src/lib/types/api.ts ================================================ /** Standard API response envelope */ export interface ApiResponse { success: boolean error?: string data?: T } /** Session info returned by /api/auth/session */ export interface Session { user: string role: string connectionId: string connectionName: string connectionOnline: boolean expiresAt: string version?: string appVersion?: string } /** Connection info */ export interface Connection { id: string name: string status: string online: boolean created_at: string host_info?: HostInfo } /** Host machine metrics from agent */ export interface HostInfo { hostname: string os: string arch: string cpu_cores: number memory_total: number memory_free: number disk_total: number disk_free: number go_version: string agent_uptime: number collected_at: string } /** License info returned by the server */ export interface LicenseInfo { edition: string valid: boolean customer?: string expires_at?: string license_id?: string } /** Saved query */ export interface SavedQuery { id: string name: string query: string description?: string created_at: string updated_at: string } /** Dashboard */ export interface Dashboard { id: string name: string description: string | null created_by: string created_at: string updated_at: string } /** Dashboard panel */ export interface Panel { id: string dashboard_id: string name: string panel_type: string query: string connection_id: string | null config: string layout_x: number layout_y: number layout_w: number layout_h: number created_at: string updated_at: string } /** Scheduled query */ export interface Schedule { id: string name: string saved_query_id: string connection_id: string | null cron: string timezone: string enabled: boolean timeout_ms: number last_run_at: string | null next_run_at: string | null last_status: string | null last_error: string | null created_by: string created_at: string updated_at: string } /** Schedule execution run */ export interface ScheduleRun { id: string schedule_id: string started_at: string finished_at: string | null status: string rows_affected: number elapsed_ms: number error: string | null } /** Panel visualization config (stored as JSON in panel.config) */ export interface PanelConfig { chartType: 'table' | 'stat' | 'timeseries' | 'bar' xColumn?: string yColumns?: string[] colors?: string[] legendPosition?: 'bottom' | 'right' | 'none' } /** Audit log entry */ export interface AuditLog { id: string action: string username: string | null details: string | null ip_address: string | null created_at: string parsed_details?: Record } /** Admin stats overview */ export interface AdminStats { users_count: number connections: number online: number login_count: number query_count: number } ================================================ FILE: ui/src/lib/types/brain.ts ================================================ export interface SchemaContextEntry { database: string table: string columns: { name: string; type: string }[] } export interface BrainChat { id: string connection_id: string username: string title: string provider_id?: string | null model_id?: string | null archived: boolean last_message_at?: string | null context_database?: string | null context_table?: string | null context_tables?: string | null created_at: string updated_at: string } export interface BrainMessage { id: string chat_id: string role: 'user' | 'assistant' | string content: string status: string error?: string | null created_at: string updated_at: string } export interface BrainArtifact { id: string chat_id: string message_id?: string | null type: string title: string content: string created_by?: string | null created_at: string } export interface BrainModelOption { id: string name: string display_name?: string provider_id: string provider_name: string provider_kind: string is_active: boolean is_default: boolean provider_active: boolean provider_default: boolean } export interface BrainProviderAdmin { id: string name: string kind: string base_url?: string | null has_api_key: boolean is_active: boolean is_default: boolean created_by?: string | null created_at: string updated_at: string } export interface BrainSkill { id: string name: string content: string is_active: boolean is_default: boolean created_by?: string | null created_at: string updated_at: string } ================================================ FILE: ui/src/lib/types/governance.ts ================================================ // ── Sync ──────────────────────────────────────────────────────── export interface GovernanceSettings { sync_enabled: boolean updated_at: string updated_by: string banner_dismissed: boolean syncer_running: boolean } export interface SyncState { id: string connection_id: string sync_type: 'metadata' | 'query_log' | 'access' last_synced_at: string | null watermark: string | null status: 'idle' | 'running' | 'error' last_error: string | null row_count: number created_at: string updated_at: string } export interface SyncResult { metadata?: { databases_synced: number; tables_synced: number; columns_synced: number; schema_changes: number } metadata_error?: string query_log?: { queries_ingested: number; lineage_edges_found: number; violations_found: number; new_watermark: string } query_log_error?: string access?: { users_synced: number; roles_synced: number; grants_synced: number; matrix_entries: number; over_permissions: number } access_error?: string } // ── Overview ──────────────────────────────────────────────────── export interface GovernanceOverview { database_count: number table_count: number column_count: number tagged_table_count: number user_count: number role_count: number query_count_24h: number lineage_edge_count: number policy_count: number violation_count: number incident_count: number schema_change_count: number sync_states: SyncState[] recent_changes: SchemaChange[] recent_violations: PolicyViolation[] } // ── Metadata ──────────────────────────────────────────────────── export interface GovDatabase { id: string connection_id: string name: string engine: string first_seen: string last_updated: string is_deleted: boolean } export interface GovTable { id: string connection_id: string database_name: string table_name: string engine: string table_uuid: string total_rows: number total_bytes: number partition_count: number first_seen: string last_updated: string is_deleted: boolean tags?: string[] } export interface GovColumn { id: string connection_id: string database_name: string table_name: string column_name: string column_type: string column_position: number default_kind: string | null default_expression: string | null comment: string | null first_seen: string last_updated: string is_deleted: boolean tags?: string[] } export interface SchemaChange { id: string connection_id: string change_type: string database_name: string table_name: string column_name: string old_value: string new_value: string detected_at: string created_at: string } // ── Query Log ─────────────────────────────────────────────────── export interface QueryLogEntry { id: string connection_id: string query_id: string ch_user: string query_text: string normalized_hash: string query_kind: string event_time: string duration_ms: number read_rows: number read_bytes: number result_rows: number written_rows: number written_bytes: number memory_usage: number tables_used: string is_error: boolean error_message: string | null } export interface TopQuery { normalized_hash: string count: number avg_duration_ms: number total_read_rows: number sample_query: string last_seen: string } // ── Lineage ───────────────────────────────────────────────────── export interface ColumnLineageEdge { source_column: string target_column: string } export interface LineageEdge { id: string source_database: string source_table: string target_database: string target_table: string query_id: string edge_type: string ch_user: string detected_at: string column_edges?: ColumnLineageEdge[] } export interface LineageNode { id: string database: string table: string type: 'source' | 'target' | 'current' | 'materialized_view' | 'view' | string columns?: GovColumn[] } export interface LineageGraph { nodes: LineageNode[] edges: LineageEdge[] } // ── Tags ──────────────────────────────────────────────────────── export interface TagEntry { id: string connection_id: string object_type: 'table' | 'column' database_name: string table_name: string column_name: string tag: string tagged_by: string created_at: string } // ── Access ────────────────────────────────────────────────────── export interface ChUser { id: string name: string auth_type: string | null host_ip: string | null default_roles: string | null first_seen: string last_updated: string } export interface ChRole { id: string name: string first_seen: string last_updated: string } export interface AccessMatrixEntry { id: string user_name: string role_name: string | null database_name: string | null table_name: string | null privilege: string is_direct_grant: boolean last_query_time: string | null } export interface OverPermission { user_name: string role_name: string | null database_name: string | null table_name: string | null privilege: string last_query_time: string | null days_since_query: number | null reason: string } // ── Policies ──────────────────────────────────────────────────── export interface Policy { id: string connection_id: string name: string description: string | null object_type: 'database' | 'table' | 'column' object_database: string | null object_table: string | null object_column: string | null required_role: string severity: string enforcement_mode: 'warn' | 'block' enabled: boolean created_by: string | null created_at: string updated_at: string } export interface PolicyViolation { id: string connection_id: string policy_id: string query_log_id: string ch_user: string violation_detail: string severity: string detection_phase?: 'post_exec' | 'pre_exec_block' | string request_endpoint?: string | null detected_at: string created_at: string policy_name?: string } export interface GovernanceObjectComment { id: string connection_id: string object_type: 'table' | 'column' | string database_name: string table_name: string column_name: string comment_text: string created_by?: string | null created_at: string updated_at: string } export interface GovernanceIncident { id: string connection_id: string source_type: 'manual' | 'violation' | 'over_permission' | string source_ref?: string | null dedupe_key?: string | null title: string severity: 'info' | 'warn' | 'error' | 'critical' | string status: 'open' | 'triaged' | 'in_progress' | 'resolved' | 'dismissed' | string assignee?: string | null details?: string | null resolution_note?: string | null occurrence_count: number first_seen_at: string last_seen_at: string resolved_at?: string | null created_by?: string | null created_at: string updated_at: string } export interface GovernanceIncidentComment { id: string incident_id: string comment_text: string created_by?: string | null created_at: string } ================================================ FILE: ui/src/lib/types/models.ts ================================================ export type Materialization = 'view' | 'table' export type ModelStatus = 'draft' | 'success' | 'error' export type RunStatus = 'running' | 'success' | 'partial' | 'error' export type ResultStatus = 'pending' | 'running' | 'success' | 'error' | 'skipped' export interface Model { id: string name: string description: string connection_id: string target_database: string materialization: Materialization sql_body: string table_engine: string order_by: string status: ModelStatus last_error: string | null last_run_at: string | null created_by: string | null created_at: string updated_at: string } export interface ModelRun { id: string connection_id: string status: RunStatus total_models: number succeeded: number failed: number skipped: number started_at: string finished_at: string | null triggered_by: string | null created_at: string } export interface ModelRunResult { id: string run_id: string model_id: string model_name: string status: ResultStatus resolved_sql: string | null elapsed_ms: number error: string | null started_at: string | null finished_at: string | null created_at: string } export interface DAGNode { id: string data: { name: string materialization: Materialization status: ModelStatus target_database: string } position: { x: number; y: number } } export interface DAGEdge { id: string source: string target: string } export interface ModelDAG { nodes: DAGNode[] edges: DAGEdge[] } export interface ValidationError { model_id?: string model_name?: string error: string } export interface ValidationResult { valid: boolean errors: ValidationError[] } export interface ModelSchedule { id: string connection_id: string anchor_model_id: string | null cron: string enabled: boolean last_run_at: string | null next_run_at: string | null last_status: string | null last_error: string | null created_by: string | null created_at: string updated_at: string } export interface Pipeline { anchor_model_id: string model_ids: string[] schedule: ModelSchedule | null } export interface ModelEditState { modelName: string description: string targetDatabase: string materialization: string sqlBody: string tableEngine: string orderBy: string } ================================================ FILE: ui/src/lib/types/pipelines.ts ================================================ export type PipelineStatus = 'draft' | 'stopped' | 'starting' | 'running' | 'error' | 'stopping' export type NodeType = | 'source_kafka' | 'source_webhook' | 'source_database' | 'source_s3' | 'sink_clickhouse' export interface Pipeline { id: string name: string description: string | null connection_id: string status: PipelineStatus config: string created_by: string | null last_started_at: string | null last_stopped_at: string | null last_error: string | null created_at: string updated_at: string } export interface PipelineNode { id: string pipeline_id: string node_type: NodeType label: string position_x: number position_y: number config_encrypted: string created_at: string updated_at: string } export interface PipelineEdge { id: string pipeline_id: string source_node_id: string target_node_id: string source_handle: string | null target_handle: string | null created_at: string } export interface PipelineGraph { nodes: PipelineNode[] edges: PipelineEdge[] } export interface PipelineRun { id: string pipeline_id: string status: 'running' | 'success' | 'error' | 'stopped' started_at: string finished_at: string | null rows_ingested: number bytes_ingested: number errors_count: number last_error: string | null metrics_json: string created_at: string } export interface PipelineRunLog { id: string run_id: string level: 'debug' | 'info' | 'warn' | 'error' message: string created_at: string } export interface ConnectorFieldDef { key: string label: string type: 'text' | 'password' | 'number' | 'select' | 'textarea' | 'toggle' | 'info' placeholder?: string required?: boolean default?: unknown options?: { value: string; label: string }[] help?: string } export const SOURCE_NODE_TYPES: { type: NodeType; label: string; description: string }[] = [ { type: 'source_kafka', label: 'Kafka', description: 'Stream from Kafka topic' }, { type: 'source_webhook', label: 'Webhook', description: 'Receive HTTP POST events' }, { type: 'source_database', label: 'Database', description: 'Poll from PostgreSQL, MySQL, or SQLite' }, { type: 'source_s3', label: 'S3', description: 'Read files from S3-compatible storage' }, ] export const SINK_NODE_TYPES: { type: NodeType; label: string; description: string }[] = [ { type: 'sink_clickhouse', label: 'ClickHouse', description: 'Insert into ClickHouse table' }, ] export const CONNECTOR_FIELDS: Record = { source_kafka: [ { key: 'brokers', label: 'Brokers', type: 'text', placeholder: 'broker1:9092,broker2:9092', required: true, help: 'Comma-separated list of Kafka broker addresses' }, { key: 'topic', label: 'Topic', type: 'text', required: true }, { key: 'consumer_group', label: 'Consumer Group', type: 'text', required: true, default: 'ch-ui-pipeline' }, { key: 'sasl_mechanism', label: 'SASL Mechanism', type: 'select', options: [ { value: '', label: 'None' }, { value: 'PLAIN', label: 'PLAIN' }, { value: 'SCRAM-SHA-256', label: 'SCRAM-SHA-256' }, { value: 'SCRAM-SHA-512', label: 'SCRAM-SHA-512' }, ], default: '' }, { key: 'sasl_username', label: 'SASL Username', type: 'text' }, { key: 'sasl_password', label: 'SASL Password', type: 'password' }, { key: 'use_tls', label: 'Enable TLS', type: 'toggle', default: false }, { key: 'batch_size', label: 'Batch Size', type: 'number', default: 1000, help: 'Records per batch before flushing to ClickHouse' }, { key: 'batch_timeout_ms', label: 'Batch Timeout (ms)', type: 'number', default: 5000 }, ], source_webhook: [ { key: 'webhook_url', label: 'Webhook URL', type: 'info', help: 'POST JSON data to this URL. Include Authorization: Bearer header if auth token is set.' }, { key: 'auth_enabled', label: 'Require Authentication', type: 'toggle', default: false, help: 'When enabled, a Bearer token is generated. Include it in the Authorization header of requests.' }, { key: 'batch_size', label: 'Batch Size', type: 'number', default: 100 }, { key: 'batch_timeout_ms', label: 'Batch Timeout (ms)', type: 'number', default: 2000 }, ], source_database: [ { key: 'db_type', label: 'Database Type', type: 'select', required: true, options: [ { value: 'postgres', label: 'PostgreSQL' }, { value: 'mysql', label: 'MySQL' }, { value: 'sqlite', label: 'SQLite' }, ] }, { key: 'connection_string', label: 'Connection String', type: 'password', required: true, placeholder: 'postgres://user:pass@host/db or /path/to/file.db', help: 'For SQLite, use a file path like /data/my.db' }, { key: 'query', label: 'SQL Query', type: 'textarea', required: true, placeholder: 'SELECT * FROM events WHERE id > $1', help: 'Use $1 placeholder with watermark column for incremental polling' }, { key: 'poll_interval', label: 'Poll Interval (seconds)', type: 'number', default: 60, help: 'Seconds between each poll' }, { key: 'watermark_column', label: 'Watermark Column', type: 'text', help: 'Column for incremental polling (e.g. id or created_at)' }, { key: 'batch_size', label: 'Batch Size', type: 'number', default: 1000 }, ], source_s3: [ { key: 'endpoint', label: 'S3 Endpoint', type: 'text', placeholder: 'https://s3.amazonaws.com', help: 'S3-compatible endpoint URL. Leave empty for AWS S3.' }, { key: 'region', label: 'Region', type: 'text', default: 'us-east-1' }, { key: 'bucket', label: 'Bucket', type: 'text', required: true }, { key: 'prefix', label: 'Key Prefix', type: 'text', placeholder: 'data/events/' }, { key: 'access_key', label: 'Access Key ID', type: 'password', required: true }, { key: 'secret_key', label: 'Secret Access Key', type: 'password', required: true }, { key: 'format', label: 'File Format', type: 'select', required: true, options: [ { value: 'json', label: 'JSON' }, { value: 'ndjson', label: 'JSON Lines (NDJSON)' }, { value: 'csv', label: 'CSV' }, ], default: 'json' }, { key: 'poll_interval', label: 'Poll Interval (seconds)', type: 'number', default: 300, help: 'Seconds between each poll' }, { key: 'batch_size', label: 'Batch Size', type: 'number', default: 1000 }, ], sink_clickhouse: [ { key: 'database', label: 'Target Database', type: 'text', required: true, default: 'default' }, { key: 'table', label: 'Target Table', type: 'text', required: true }, { key: 'create_table', label: 'Create Table If Not Exists', type: 'toggle', default: false }, { key: 'create_table_engine', label: 'Table Engine', type: 'select', options: [ { value: 'MergeTree', label: 'MergeTree' }, { value: 'ReplacingMergeTree', label: 'ReplacingMergeTree' }, { value: 'SummingMergeTree', label: 'SummingMergeTree' }, ], default: 'MergeTree', help: 'Only used when "Create Table" is enabled' }, { key: 'create_table_order_by', label: 'ORDER BY', type: 'text', placeholder: 'tuple()', help: 'ClickHouse ORDER BY clause' }, ], } ================================================ FILE: ui/src/lib/types/query.ts ================================================ /** Column metadata from ClickHouse */ export interface ColumnMeta { name: string type: string } /** Query result in JSONCompact format (positional arrays) */ export interface CompactResult { meta: ColumnMeta[] data: unknown[][] rows: number statistics?: QueryStats } /** Query execution statistics */ export interface QueryStats { elapsed: number rows_read: number bytes_read: number } /** Explorer data response (server-side paginated) */ export interface ExplorerDataResponse { success: boolean meta: ColumnMeta[] data: unknown[][] rows: number total_rows: number page: number page_size: number } /** Legacy query result (JSON format, row objects) */ export interface LegacyQueryResult { success: boolean data: Record[] meta: ColumnMeta[] statistics?: QueryStats rows: number elapsed_ms: number } export interface SampleQueryResult extends LegacyQueryResult { sampling_mode?: 'per_shard' | 'global' warning?: string } export interface QueryPlanNode { id: string parent_id?: string level: number label: string } export interface QueryPlanResult { success: boolean source: string lines: string[] nodes: QueryPlanNode[] } export interface QueryProfileResult { success: boolean available: boolean reason?: string profile?: Record } /** Per-table estimate from EXPLAIN ESTIMATE */ export interface TableEstimate { database: string table: string parts: number rows: number marks: number } /** Query cost estimate result */ export interface QueryEstimateResult { success: boolean tables: TableEstimate[] total_rows: number total_parts: number total_marks: number error?: string } /** NDJSON stream message types */ export type StreamMessage = | { type: 'meta'; meta: ColumnMeta[] } | { type: 'chunk'; data: unknown[][]; seq: number } | { type: 'done'; statistics?: QueryStats; total_rows: number } | { type: 'error'; error: string } ================================================ FILE: ui/src/lib/types/schema.ts ================================================ /** Database in the schema tree */ export interface Database { name: string tables?: Table[] expanded?: boolean loading?: boolean } /** Table in the schema tree */ export interface Table { name: string engine?: string columns?: Column[] expanded?: boolean loading?: boolean } /** Column in the schema tree */ export interface Column { name: string type: string default_type?: string default_expression?: string comment?: string } ================================================ FILE: ui/src/lib/utils/calendar.ts ================================================ /** Number of days in a given month (1-12). */ export function daysInMonth(year: number, month: number): number { return new Date(year, month, 0).getDate() } /** Day-of-week (0=Sun..6=Sat) for the 1st of a month (1-12). */ export function firstDayOfWeek(year: number, month: number): number { return new Date(year, month - 1, 1).getDay() } /** * Build a calendar grid for a given month. * Returns rows of 7 cells. Cells outside the month are null. */ export function buildMonthGrid(year: number, month: number): (Date | null)[][] { const total = daysInMonth(year, month) const startDay = firstDayOfWeek(year, month) const grid: (Date | null)[][] = [] let day = 1 for (let row = 0; row < 6; row++) { const week: (Date | null)[] = [] for (let col = 0; col < 7; col++) { if (row === 0 && col < startDay) { week.push(null) } else if (day > total) { week.push(null) } else { week.push(new Date(year, month - 1, day)) day++ } } grid.push(week) if (day > total) break } return grid } /** Navigate months by delta, returns new { year, month }. */ export function shiftMonth(year: number, month: number, delta: number): { year: number; month: number } { const d = new Date(year, month - 1 + delta, 1) return { year: d.getFullYear(), month: d.getMonth() + 1 } } /** Check if two dates are the same calendar day. */ export function isSameDay(a: Date, b: Date): boolean { return ( a.getFullYear() === b.getFullYear() && a.getMonth() === b.getMonth() && a.getDate() === b.getDate() ) } /** Check if date falls within [from, to] inclusive (day-level). */ export function isInRange(date: Date, from: Date, to: Date): boolean { const t = date.getTime() const lo = new Date(from.getFullYear(), from.getMonth(), from.getDate()).getTime() const hi = new Date(to.getFullYear(), to.getMonth(), to.getDate(), 23, 59, 59, 999).getTime() return t >= lo && t <= hi } /** Check if a date is today. */ export function isToday(date: Date): boolean { return isSameDay(date, new Date()) } /** Month name from month number (1-12). */ export function monthName(month: number): string { return new Date(2000, month - 1, 1).toLocaleString('en', { month: 'long' }) } ================================================ FILE: ui/src/lib/utils/ch-types.ts ================================================ /** Map ClickHouse types to display categories for cell rendering */ export type DisplayType = 'number' | 'string' | 'date' | 'bool' | 'json' | 'null' | 'unknown' export function getDisplayType(chType: string): DisplayType { const t = chType.replace(/Nullable\((.+)\)/, '$1').replace(/LowCardinality\((.+)\)/, '$1') if (/^(U?Int|Float|Decimal)/.test(t)) return 'number' if (/^(Date|DateTime)/.test(t)) return 'date' if (/^(Bool)/.test(t)) return 'bool' if (/^(String|FixedString|Enum|UUID|IPv[46])/.test(t)) return 'string' if (/^(Array|Map|Tuple|Nested|JSON)/.test(t)) return 'json' return 'unknown' } /** Check if a value should be right-aligned (numbers) */ export function isRightAligned(chType: string): boolean { return getDisplayType(chType) === 'number' } ================================================ FILE: ui/src/lib/utils/chart-transform.ts ================================================ import type { PanelConfig } from '../types/api' import type uPlot from 'uplot' export interface ColumnMeta { name: string type: string } export const DEFAULT_COLORS = [ '#F97316', // orange '#FB923C', // orange light '#F59E0B', // amber '#D97706', // amber deep '#10B981', // emerald '#84CC16', // lime '#EF4444', // red '#EC4899', // pink ] export const TIME_RANGES = [ { label: 'Last 5m', value: '5m', seconds: 300 }, { label: 'Last 15m', value: '15m', seconds: 900 }, { label: 'Last 1h', value: '1h', seconds: 3600 }, { label: 'Last 6h', value: '6h', seconds: 21600 }, { label: 'Last 24h', value: '24h', seconds: 86400 }, { label: 'Last 7d', value: '7d', seconds: 604800 }, { label: 'Last 30d', value: '30d', seconds: 2592000 }, ] export interface ExtendedPreset { label: string value: string group: 'recent' | 'named' | 'duration' } export const EXTENDED_PRESETS: ExtendedPreset[] = [ { label: 'Last 5 minutes', value: '5m', group: 'recent' }, { label: 'Last 15 minutes', value: '15m', group: 'recent' }, { label: 'Last 30 minutes', value: '30m', group: 'recent' }, { label: 'Last 1 hour', value: '1h', group: 'recent' }, { label: 'Last 3 hours', value: '3h', group: 'recent' }, { label: 'Last 6 hours', value: '6h', group: 'recent' }, { label: 'Last 12 hours', value: '12h', group: 'recent' }, { label: 'Last 24 hours', value: '24h', group: 'recent' }, { label: 'Today', value: 'preset:today', group: 'named' }, { label: 'Yesterday', value: 'preset:yesterday', group: 'named' }, { label: 'This Week', value: 'preset:this-week', group: 'named' }, { label: 'Last Week', value: 'preset:last-week', group: 'named' }, { label: 'This Month', value: 'preset:this-month', group: 'named' }, { label: 'Last Month', value: 'preset:last-month', group: 'named' }, { label: 'Last 7 days', value: '7d', group: 'duration' }, { label: 'Last 30 days', value: '30d', group: 'duration' }, { label: 'Last 3 Months', value: 'preset:last-3-months', group: 'named' }, { label: 'Last 6 Months', value: 'preset:last-6-months', group: 'named' }, ] const DATE_TYPES = new Set([ 'Date', 'Date32', 'DateTime', 'DateTime64', 'Nullable(Date)', 'Nullable(Date32)', 'Nullable(DateTime)', 'Nullable(DateTime64)', ]) const NUMERIC_TYPES = new Set([ 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'UInt128', 'UInt256', 'Int8', 'Int16', 'Int32', 'Int64', 'Int128', 'Int256', 'Float32', 'Float64', 'Decimal', ]) export function isDateType(chType: string): boolean { if (DATE_TYPES.has(chType)) return true return /^(Nullable\()?(Date|DateTime)/.test(chType) } export function isNumericType(chType: string): boolean { const base = chType.replace(/^Nullable\(/, '').replace(/\)$/, '') if (NUMERIC_TYPES.has(base)) return true return /^(U?Int|Float|Decimal)/.test(base) } /** True when x column is categorical (not date, not numeric — i.e. String). */ export function isCategoricalX(meta: ColumnMeta[], xColumn: string): boolean { const xMeta = meta.find(m => m.name === xColumn) if (!xMeta) return false return !isDateType(xMeta.type) && !isNumericType(xMeta.type) } /** * Transform dashboard API row-objects into uPlot's AlignedData format. * Returns [xValues[], y1Values[], y2Values[], ...] */ export function toUPlotData( data: Record[], meta: ColumnMeta[], config: PanelConfig, ): uPlot.AlignedData { if (!data.length || !config.xColumn || !config.yColumns?.length) { return [new Float64Array(0)] } const xCol = config.xColumn const xMeta = meta.find(m => m.name === xCol) const isTime = xMeta ? isDateType(xMeta.type) : false const xArr = new Float64Array(data.length) for (let i = 0; i < data.length; i++) { const raw = data[i][xCol] if (isTime) { const ts = new Date(raw as string).getTime() xArr[i] = ts / 1000 // uPlot uses unix seconds } else { xArr[i] = Number(raw) || i } } const series: uPlot.AlignedData = [xArr] for (const yCol of config.yColumns) { const yArr = new Float64Array(data.length) for (let i = 0; i < data.length; i++) { yArr[i] = Number(data[i][yCol]) || 0 } series.push(yArr) } return series } /** Extract single stat value from first row, first column */ export function getStatValue(data: Record[], meta: ColumnMeta[]): string { if (data.length > 0 && meta.length > 0) { const key = meta[0].name const val = data[0][key] ?? data[0][Object.keys(data[0])[0]] if (val === null || val === undefined) return '--' const num = Number(val) if (!isNaN(num)) { return num.toLocaleString() } return String(val) } return '--' } ================================================ FILE: ui/src/lib/utils/dashboard-time.test.ts ================================================ import { describe, expect, it } from 'vitest' import { decodeAbsoluteDashboardRange, encodeAbsoluteDashboardRange, formatDashboardTimeRangeLabel, resolveNamedPreset, toDashboardTimeRangePayload, } from './dashboard-time' describe('dashboard-time', () => { it('encodes and decodes absolute ranges', () => { const from = '2026-01-01T00:00:00.000Z' const to = '2026-01-01T01:00:00.000Z' const encoded = encodeAbsoluteDashboardRange(from, to) expect(encoded).toBe(`abs:${from}|${to}`) expect(decodeAbsoluteDashboardRange(encoded)).toEqual({ from, to }) }) it('parses shorthand relative tokens', () => { expect(toDashboardTimeRangePayload('5min')).toEqual({ type: 'relative', from: '5m', to: 'now', }) }) it('parses explicit relative ranges', () => { expect(toDashboardTimeRangePayload('now-2h to now')).toEqual({ type: 'relative', from: 'now-2h', to: 'now', }) }) it('parses absolute range strings', () => { const res = toDashboardTimeRangePayload('2026-01-01T00:00:00Z to 2026-01-01T01:00:00Z') expect(res.type).toBe('absolute') expect(res.from).toBe('2026-01-01T00:00:00.000Z') expect(res.to).toBe('2026-01-01T01:00:00.000Z') }) it('formats common labels', () => { expect(formatDashboardTimeRangeLabel('1h')).toBe('Last 1h') expect(formatDashboardTimeRangeLabel('7d')).toBe('Last 7d') }) it('formats new duration labels', () => { expect(formatDashboardTimeRangeLabel('30m')).toBe('Last 30m') expect(formatDashboardTimeRangeLabel('3h')).toBe('Last 3h') expect(formatDashboardTimeRangeLabel('12h')).toBe('Last 12h') }) it('formats named preset labels', () => { expect(formatDashboardTimeRangeLabel('preset:today')).toBe('Today') expect(formatDashboardTimeRangeLabel('preset:yesterday')).toBe('Yesterday') expect(formatDashboardTimeRangeLabel('preset:this-week')).toBe('This Week') expect(formatDashboardTimeRangeLabel('preset:last-week')).toBe('Last Week') expect(formatDashboardTimeRangeLabel('preset:this-month')).toBe('This Month') expect(formatDashboardTimeRangeLabel('preset:last-month')).toBe('Last Month') expect(formatDashboardTimeRangeLabel('preset:last-3-months')).toBe('Last 3 Months') expect(formatDashboardTimeRangeLabel('preset:last-6-months')).toBe('Last 6 Months') }) describe('resolveNamedPreset', () => { it('resolves preset:today to absolute range', () => { const result = resolveNamedPreset('preset:today') expect(result).not.toBeNull() const from = new Date(result!.from) const to = new Date(result!.to) expect(from.getHours() + from.getMinutes() + from.getSeconds()).toBe(0) expect(to.getTime()).toBeLessThanOrEqual(Date.now()) expect(to.getTime()).toBeGreaterThan(from.getTime()) }) it('resolves preset:yesterday', () => { const result = resolveNamedPreset('preset:yesterday') expect(result).not.toBeNull() const from = new Date(result!.from) const to = new Date(result!.to) expect(to.getTime() - from.getTime()).toBe(86400000) // exactly 1 day }) it('resolves preset:this-week', () => { const result = resolveNamedPreset('preset:this-week') expect(result).not.toBeNull() const from = new Date(result!.from) expect(from.getDay()).toBe(0) // starts on Sunday }) it('resolves preset:last-week', () => { const result = resolveNamedPreset('preset:last-week') expect(result).not.toBeNull() const from = new Date(result!.from) const to = new Date(result!.to) expect(from.getDay()).toBe(0) expect(to.getDay()).toBe(0) expect(to.getTime() - from.getTime()).toBe(604800000) // exactly 7 days }) it('resolves preset:this-month', () => { const result = resolveNamedPreset('preset:this-month') expect(result).not.toBeNull() const from = new Date(result!.from) expect(from.getDate()).toBe(1) }) it('resolves preset:last-month', () => { const result = resolveNamedPreset('preset:last-month') expect(result).not.toBeNull() const from = new Date(result!.from) const to = new Date(result!.to) expect(from.getDate()).toBe(1) expect(to.getDate()).toBe(1) }) it('resolves preset:last-3-months and preset:last-6-months', () => { const r3 = resolveNamedPreset('preset:last-3-months') const r6 = resolveNamedPreset('preset:last-6-months') expect(r3).not.toBeNull() expect(r6).not.toBeNull() expect(new Date(r6!.from).getTime()).toBeLessThan(new Date(r3!.from).getTime()) }) it('returns null for unknown presets', () => { expect(resolveNamedPreset('preset:unknown')).toBeNull() expect(resolveNamedPreset('not-a-preset')).toBeNull() }) }) describe('toDashboardTimeRangePayload with named presets', () => { it('resolves preset:today to absolute payload', () => { const result = toDashboardTimeRangePayload('preset:today') expect(result.type).toBe('absolute') expect(result.from).toBeTruthy() expect(result.to).toBeTruthy() }) it('resolves preset:yesterday to absolute payload', () => { const result = toDashboardTimeRangePayload('preset:yesterday') expect(result.type).toBe('absolute') }) }) }) ================================================ FILE: ui/src/lib/utils/dashboard-time.ts ================================================ export interface DashboardTimeRangePayload { type: 'relative' | 'absolute' from: string to: string } const relativeToken = /^(?:now-)?\s*\d+\s*[a-zA-Z]+$/ export function encodeAbsoluteDashboardRange(fromISO: string, toISO: string): string { return `abs:${fromISO}|${toISO}` } export function decodeAbsoluteDashboardRange(value: string): { from: string; to: string } | null { const trimmed = value.trim() if (!trimmed.startsWith('abs:')) return null const payload = trimmed.slice(4) const sep = payload.indexOf('|') if (sep <= 0) return null const from = payload.slice(0, sep).trim() const to = payload.slice(sep + 1).trim() if (!from || !to) return null return { from, to } } function normalizeRelative(value: string, fallback: string): string { const trimmed = value.trim() if (!trimmed) return fallback const lower = trimmed.toLowerCase() if (lower === 'now') return 'now' // normalize forms like "5min", "now-5 minutes", "2hrs" const match = lower.match(/^(now-)?\s*(\d+)\s*([a-z]+)$/) if (!match) { return fallback } const prefix = match[1] ? 'now-' : '' const amount = match[2] const rawUnit = match[3] let unit = 'm' if (rawUnit === 's' || rawUnit === 'sec' || rawUnit === 'secs' || rawUnit === 'second' || rawUnit === 'seconds') { unit = 's' } else if (rawUnit === 'm' || rawUnit === 'min' || rawUnit === 'mins' || rawUnit === 'minute' || rawUnit === 'minutes') { unit = 'm' } else if (rawUnit === 'h' || rawUnit === 'hr' || rawUnit === 'hrs' || rawUnit === 'hour' || rawUnit === 'hours') { unit = 'h' } else if (rawUnit === 'd' || rawUnit === 'day' || rawUnit === 'days') { unit = 'd' } else if (rawUnit === 'w' || rawUnit === 'week' || rawUnit === 'weeks') { unit = 'w' } else if (rawUnit === 'mo' || rawUnit === 'mon' || rawUnit === 'month' || rawUnit === 'months' || rawUnit === 'mth') { unit = 'M' } else if (rawUnit === 'y' || rawUnit === 'yr' || rawUnit === 'yrs' || rawUnit === 'year' || rawUnit === 'years') { unit = 'y' } return `${prefix}${amount}${unit}` } function isAbsoluteToken(value: string): boolean { if (!value) return false if (value.toLowerCase().startsWith('now')) return false return !Number.isNaN(Date.parse(value)) } // ── Named preset resolution ──────────────────────────────── const PRESET_LABELS: Record = { 'preset:today': 'Today', 'preset:yesterday': 'Yesterday', 'preset:this-week': 'This Week', 'preset:last-week': 'Last Week', 'preset:this-month': 'This Month', 'preset:last-month': 'Last Month', 'preset:last-3-months': 'Last 3 Months', 'preset:last-6-months': 'Last 6 Months', } const DURATION_LABELS: Record = { '5m': 'Last 5m', '15m': 'Last 15m', '30m': 'Last 30m', '1h': 'Last 1h', '3h': 'Last 3h', '6h': 'Last 6h', '12h': 'Last 12h', '24h': 'Last 24h', '7d': 'Last 7d', '30d': 'Last 30d', } export function resolveNamedPreset(name: string): { from: string; to: string } | null { const now = new Date() const startOfDay = new Date(now.getFullYear(), now.getMonth(), now.getDate()) switch (name) { case 'preset:today': return { from: startOfDay.toISOString(), to: now.toISOString() } case 'preset:yesterday': { const yd = new Date(startOfDay) yd.setDate(yd.getDate() - 1) return { from: yd.toISOString(), to: startOfDay.toISOString() } } case 'preset:this-week': { const dow = now.getDay() const startOfWeek = new Date(startOfDay) startOfWeek.setDate(startOfWeek.getDate() - dow) return { from: startOfWeek.toISOString(), to: now.toISOString() } } case 'preset:last-week': { const dow = now.getDay() const endOfLastWeek = new Date(startOfDay) endOfLastWeek.setDate(endOfLastWeek.getDate() - dow) const startOfLastWeek = new Date(endOfLastWeek) startOfLastWeek.setDate(startOfLastWeek.getDate() - 7) return { from: startOfLastWeek.toISOString(), to: endOfLastWeek.toISOString() } } case 'preset:this-month': { const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1) return { from: startOfMonth.toISOString(), to: now.toISOString() } } case 'preset:last-month': { const startOfThisMonth = new Date(now.getFullYear(), now.getMonth(), 1) const startOfLastMonth = new Date(now.getFullYear(), now.getMonth() - 1, 1) return { from: startOfLastMonth.toISOString(), to: startOfThisMonth.toISOString() } } case 'preset:last-3-months': { const d = new Date(now) d.setMonth(d.getMonth() - 3) return { from: d.toISOString(), to: now.toISOString() } } case 'preset:last-6-months': { const d = new Date(now) d.setMonth(d.getMonth() - 6) return { from: d.toISOString(), to: now.toISOString() } } default: return null } } // ── Public API ────────────────────────────────────────────── export function formatDashboardTimeRangeLabel(value: string): string { const absolute = decodeAbsoluteDashboardRange(value) if (absolute) { const from = new Date(absolute.from).toLocaleString() const to = new Date(absolute.to).toLocaleString() return `${from} -> ${to}` } const trimmed = value.trim() if (!trimmed) return 'Last 1h' if (PRESET_LABELS[trimmed]) return PRESET_LABELS[trimmed] if (DURATION_LABELS[trimmed]) return DURATION_LABELS[trimmed] if (trimmed.includes(' to ')) return trimmed return trimmed } export function toDashboardTimeRangePayload(value: string): DashboardTimeRangePayload { const trimmed = value.trim() // Named presets — resolved at query time to absolute ranges if (trimmed.startsWith('preset:')) { const resolved = resolveNamedPreset(trimmed) if (resolved) { return { type: 'absolute', from: resolved.from, to: resolved.to } } } const absolute = decodeAbsoluteDashboardRange(trimmed) if (absolute) { return { type: 'absolute', from: absolute.from, to: absolute.to, } } if (!trimmed) { return { type: 'relative', from: '1h', to: 'now', } } if (trimmed.includes(' to ')) { const [rawFrom, rawTo] = trimmed.split(/\s+to\s+/i).map((part) => part.trim()) if (isAbsoluteToken(rawFrom) && isAbsoluteToken(rawTo)) { return { type: 'absolute', from: new Date(rawFrom).toISOString(), to: new Date(rawTo).toISOString(), } } return { type: 'relative', from: normalizeRelative(rawFrom, '1h'), to: normalizeRelative(rawTo, 'now'), } } // Accept Grafana-style shorthand like "now-5m", "now-5min", "5m" if (trimmed.toLowerCase().startsWith('now-') || relativeToken.test(trimmed)) { return { type: 'relative', from: normalizeRelative(trimmed, '1h'), to: 'now', } } return { type: 'relative', from: normalizeRelative(trimmed, '1h'), to: 'now', } } ================================================ FILE: ui/src/lib/utils/export.ts ================================================ import type { ColumnMeta } from '../types/query' function normalizeScalar(val: unknown): string { if (val === null || val === undefined) return '' if (typeof val === 'object') return JSON.stringify(val) return String(val) } function escapeDelimited(val: unknown, delimiter: ',' | '\t'): string { let s = normalizeScalar(val) // Prevent CSV formula injection: prefix dangerous leading characters with a single quote // so spreadsheet applications don't interpret them as formulas if (s.length > 0 && /^[=+\-@\t\r]/.test(s)) { s = "'" + s } if (s.includes(delimiter) || s.includes('"') || s.includes('\n') || s.includes('\r')) { return '"' + s.replace(/"/g, '""') + '"' } return s } function rowsToObjects(meta: ColumnMeta[], data: unknown[][]): Record[] { const names = meta.map(c => c.name) return data.map((row) => { const obj: Record = {} for (let j = 0; j < names.length; j++) obj[names[j]] = row[j] ?? null return obj }) } /** Generate RFC 4180 compliant CSV from query results */ export function generateCSV(meta: ColumnMeta[], data: unknown[][]): string { const header = meta.map(c => escapeDelimited(c.name, ',')).join(',') const rows = data.map(row => row.map(v => escapeDelimited(v, ',')).join(',')) return header + '\n' + rows.join('\n') } /** Generate TSV (TabSeparated) */ export function generateTSV(meta: ColumnMeta[], data: unknown[][]): string { const header = meta.map(c => escapeDelimited(c.name, '\t')).join('\t') const rows = data.map(row => row.map(v => escapeDelimited(v, '\t')).join('\t')) return header + '\n' + rows.join('\n') } /** Generate JSONEachRow / JSONLines from query results */ export function generateJSONLines(meta: ColumnMeta[], data: unknown[][]): string { return rowsToObjects(meta, data).map((obj) => JSON.stringify(obj)).join('\n') } /** Generate JSON from query results (array of objects) */ export function generateJSON(meta: ColumnMeta[], data: unknown[][]): string { return JSON.stringify(rowsToObjects(meta, data), null, 2) } /** Generate JSONCompact-like payload */ export function generateJSONCompact(meta: ColumnMeta[], data: unknown[][]): string { return JSON.stringify({ meta, data, rows: data.length, }, null, 2) } /** Generate markdown table */ export function generateMarkdown(meta: ColumnMeta[], data: unknown[][]): string { if (!meta.length) return '' const header = '| ' + meta.map((c) => c.name).join(' | ') + ' |' const separator = '| ' + meta.map(() => '---').join(' | ') + ' |' const rows = data.map((row) => { const cells = row.map((v) => normalizeScalar(v).replace(/\|/g, '\\|').replace(/\n/g, '
')) return '| ' + cells.join(' | ') + ' |' }) return [header, separator, ...rows].join('\n') } function escapeSQLString(value: string): string { return value.replace(/\\/g, '\\\\').replace(/'/g, "\\'") } /** Generate INSERT INTO ... VALUES SQL */ export function generateSQLInsert(meta: ColumnMeta[], data: unknown[][], table = 'result_set'): string { if (!meta.length) return '' const columns = meta.map((c) => `\`${c.name.replace(/`/g, '``')}\``).join(', ') const values = data.map((row) => { const fields = row.map((v) => { if (v === null || v === undefined) return 'NULL' if (typeof v === 'number' && Number.isFinite(v)) return String(v) if (typeof v === 'boolean') return v ? '1' : '0' // Large integers are stored as strings by safeParse; emit them unquoted if (typeof v === 'string' && /^-?\d+$/.test(v)) return v if (typeof v === 'object') return `'${escapeSQLString(JSON.stringify(v))}'` return `'${escapeSQLString(String(v))}'` }) return `(${fields.join(', ')})` }) return `INSERT INTO \`${table.replace(/`/g, '``')}\` (${columns}) VALUES\n${values.join(',\n')};` } /** Generate lightweight XML export */ export function generateXML(meta: ColumnMeta[], data: unknown[][]): string { const rows = rowsToObjects(meta, data) const xmlTagName = (name: string): string => { const cleaned = name.replace(/[^a-zA-Z0-9_.-]/g, '_') return /^[a-zA-Z_]/.test(cleaned) ? cleaned : `c_${cleaned}` } const escapeXml = (s: string): string => s .replace(/&/g, '&') .replace(//g, '>') .replace(/"/g, '"') .replace(/'/g, ''') const xmlRows = rows.map((row) => { const cols = meta.map((col) => { const value = row[col.name] const content = value === null || value === undefined ? '' : escapeXml(typeof value === 'object' ? JSON.stringify(value) : String(value)) const tag = xmlTagName(col.name) return ` <${tag}>${content}` }).join('\n') return ` \n${cols}\n ` }).join('\n') return `\n\n${xmlRows}\n` } /** Copy text to clipboard */ export async function copyToClipboard(text: string): Promise { await navigator.clipboard.writeText(text) } /** Download content as a file */ export function downloadFile(content: string, filename: string, mimeType: string): void { const blob = new Blob([content], { type: mimeType }) const url = URL.createObjectURL(blob) const a = document.createElement('a') a.href = url a.download = filename document.body.appendChild(a) a.click() document.body.removeChild(a) URL.revokeObjectURL(url) } ================================================ FILE: ui/src/lib/utils/format.ts ================================================ /** Format a number with locale-aware separators */ export function formatNumber(n: number): string { return n.toLocaleString() } /** Format bytes to human readable (KB, MB, GB) */ export function formatBytes(bytes: number): string { if (bytes === 0) return '0 B' const units = ['B', 'KB', 'MB', 'GB', 'TB'] const i = Math.floor(Math.log(bytes) / Math.log(1024)) const value = bytes / Math.pow(1024, i) return `${value.toFixed(i === 0 ? 0 : 1)} ${units[i]}` } /** Format elapsed seconds to human readable */ export function formatElapsed(seconds: number): string { if (seconds < 0.001) return '<1ms' if (seconds < 1) return `${(seconds * 1000).toFixed(0)}ms` if (seconds < 60) return `${seconds.toFixed(2)}s` const mins = Math.floor(seconds / 60) const secs = seconds % 60 return `${mins}m ${secs.toFixed(0)}s` } /** Format a duration in milliseconds */ export function formatDuration(ms: number): string { return formatElapsed(ms / 1000) } ================================================ FILE: ui/src/lib/utils/grid-layout.ts ================================================ /** Dashboard grid layout utilities — pure functions, no framework dependency */ export const COLS = 12 export const ROW_H = 60 export const GAP = 16 export const MIN_W = 2 export const MIN_H = 2 export interface LayoutItem { id: string x: number y: number w: number h: number } /** Convert grid coordinates to absolute pixel position */ export function gridToPixel( item: { x: number; y: number; w: number; h: number }, colW: number, ): { left: number; top: number; width: number; height: number } { return { left: item.x * (colW + GAP), top: item.y * (ROW_H + GAP), width: item.w * (colW + GAP) - GAP, height: item.h * (ROW_H + GAP) - GAP, } } /** Compute column width from container width */ export function calcColW(containerWidth: number): number { return (containerWidth - GAP * (COLS - 1)) / COLS } /** AABB overlap test */ export function rectsOverlap(a: LayoutItem, b: LayoutItem): boolean { if (a.id === b.id) return false return ( a.x < b.x + b.w && a.x + a.w > b.x && a.y < b.y + b.h && a.y + a.h > b.y ) } /** * Compact layout: resolve collisions and apply gravity. * The moved panel (movedId) keeps its target position; * overlapping panels are pushed below it, then all panels * are pulled upward as far as possible. */ export function compact(items: LayoutItem[], movedId?: string): LayoutItem[] { const result = items.map(i => ({ ...i })) const moved = movedId ? result.find(i => i.id === movedId) : undefined // Push panels that overlap the moved panel below it if (moved) { for (const item of result) { if (item.id === movedId) continue if (rectsOverlap(moved, item)) { item.y = moved.y + moved.h } } // Cascade: resolve secondary overlaps caused by pushing const others = result.filter(i => i.id !== movedId).sort((a, b) => a.y - b.y || a.x - b.x) for (let i = 0; i < others.length; i++) { for (let j = i + 1; j < others.length; j++) { if (rectsOverlap(others[i], others[j])) { others[j].y = others[i].y + others[i].h } } } } // Sort by y then x for gravity pass (moved item excluded from sorting priority) const sorted = [...result].sort((a, b) => a.y - b.y || a.x - b.x) // Gravity: pull each non-moved panel upward as far as possible let changed = true while (changed) { changed = false for (const item of sorted) { if (item.id === movedId) continue while (item.y > 0) { const test = { ...item, y: item.y - 1 } const collides = result.some( o => o.id !== item.id && rectsOverlap(test, o), ) if (!collides) { item.y-- changed = true } else { break } } } } return result } /** Compute the total grid height in pixels from the bottommost panel */ export function containerHeight(items: LayoutItem[]): number { if (items.length === 0) return ROW_H const maxBottom = Math.max(...items.map(i => i.y + i.h)) return maxBottom * (ROW_H + GAP) } ================================================ FILE: ui/src/lib/utils/lineage-layout.ts ================================================ import type { Node } from '@xyflow/svelte' import type { LineageNode, LineageEdge } from '../types/governance' const LAYER_GAP = 300 const NODE_GAP = 140 const NODE_WIDTH = 220 export interface LineageFlowNode extends Node { data: { database: string table: string nodeType: string columns: LineageNode['columns'] linkedColumns: string[] } } /** * Assigns left-to-right layered positions to lineage nodes using BFS. * Returns SvelteFlow-compatible Node objects with computed positions. */ export function layoutLineageGraph( nodes: LineageNode[], edges: LineageEdge[], ): LineageFlowNode[] { if (nodes.length === 0) return [] // Build adjacency: source → targets const outgoing = new Map() const incoming = new Map>() // Collect all linked columns per node from column_edges const linkedColumnsMap = new Map>() for (const node of nodes) { outgoing.set(node.id, []) incoming.set(node.id, new Set()) linkedColumnsMap.set(node.id, new Set()) } for (const edge of edges) { const srcKey = `${edge.source_database}.${edge.source_table}` const tgtKey = `${edge.target_database}.${edge.target_table}` outgoing.get(srcKey)?.push(tgtKey) incoming.get(tgtKey)?.add(srcKey) // Track linked columns if (edge.column_edges) { for (const ce of edge.column_edges) { linkedColumnsMap.get(srcKey)?.add(ce.source_column) linkedColumnsMap.get(tgtKey)?.add(ce.target_column) } } } // BFS layering from roots (nodes with no incoming edges) const layers = new Map() const queue: string[] = [] for (const node of nodes) { const inc = incoming.get(node.id) if (!inc || inc.size === 0) { layers.set(node.id, 0) queue.push(node.id) } } // If no roots found (cycle), assign all to layer 0 if (queue.length === 0) { for (const node of nodes) { layers.set(node.id, 0) queue.push(node.id) } } let head = 0 while (head < queue.length) { const current = queue[head++] const currentLayer = layers.get(current) ?? 0 for (const target of outgoing.get(current) ?? []) { const existingLayer = layers.get(target) if (existingLayer === undefined || existingLayer < currentLayer + 1) { layers.set(target, currentLayer + 1) queue.push(target) } } } // Group nodes by layer const layerGroups = new Map() for (const node of nodes) { const layer = layers.get(node.id) ?? 0 const group = layerGroups.get(layer) ?? [] group.push(node) layerGroups.set(layer, group) } // Assign positions const nodeMap = new Map(nodes.map((n) => [n.id, n])) const flowNodes: LineageFlowNode[] = [] for (const [layer, group] of layerGroups) { const x = layer * LAYER_GAP const totalHeight = group.length * NODE_GAP const startY = -totalHeight / 2 for (let i = 0; i < group.length; i++) { const node = group[i] const linked = linkedColumnsMap.get(node.id) flowNodes.push({ id: node.id, type: 'lineageTable', position: { x, y: startY + i * NODE_GAP }, width: NODE_WIDTH, data: { database: node.database, table: node.table, nodeType: node.type, columns: node.columns ?? [], linkedColumns: linked ? [...linked] : [], }, }) } } return flowNodes } ================================================ FILE: ui/src/lib/utils/safe-json.ts ================================================ /** * safe-json.ts * * Precision-safe JSON parsing for ClickHouse results. * * Problem: JavaScript's JSON.parse() converts all numbers to IEEE 754 Float64, * which only has ~15.9 significant digits. ClickHouse UInt64/Int64 values like * order IDs (e.g. 816687988383154176) are silently rounded to a phantom value * (816687988383154200), making them useless as identifiers. * * Solution: Intercept large integers before they lose precision: * 1. Primary: Use TC39 Stage 4 reviver `context.source` (native, zero-cost). * Supported in Chrome 114+, Firefox 135+, Safari 18.4+ (~86% of users). * 2. Fallback: json-bigint with `storeAsString: true` for older browsers. * * Large integers are returned as strings. Consumer code must handle both * `number` (safe integers) and `string` (large integers) for numeric columns. */ import JSONbig from 'json-bigint' // Feature-detect TC39 reviver context.source support // The 3rd `context` parameter is TC39 Stage 4 but not yet in TypeScript's lib types. let hasReviverSource = false try { const probe = (_key: string, _value: unknown, ctx: { source?: string }): unknown => { if (typeof ctx?.source === 'string') hasReviverSource = true return _value } JSON.parse('1', probe as (key: string, value: unknown) => unknown) } catch { // Older environments may throw on the extra argument; fallback is used } // Lazy-initialised fallback parser (json-bigint allocates a parser object) let _fallbackParser: ReturnType | null = null function getFallbackParser() { if (!_fallbackParser) { _fallbackParser = JSONbig({ storeAsString: true }) } return _fallbackParser } /** * Parse a JSON string with precision-safe handling of large integers. * * Safe integers (|n| <= 2^53 - 1) are returned as `number`, exactly as * standard JSON.parse does. Large integers are returned as `string` to * preserve all digits. Everything else is unchanged. */ export function safeParse(text: string): any { if (hasReviverSource) { const reviver = (_key: string, value: unknown, ctx: { source: string }): unknown => { // ctx.source is the raw token string from the original JSON text. // If the parsed value rounded (i.e., it's a number that isn't a safe // integer) and the raw source was a plain integer literal, keep the // raw string so no precision is lost. if ( typeof value === 'number' && !Number.isSafeInteger(value) && /^-?\d+$/.test(ctx.source) ) { return ctx.source } return value } return JSON.parse(text, reviver as (key: string, value: unknown) => unknown) } // Fallback for browsers without reviver context.source support return getFallbackParser().parse(text) } ================================================ FILE: ui/src/lib/utils/sql.ts ================================================ const WRITE_PATTERN = /^\s*(INSERT|CREATE|DROP|ALTER|TRUNCATE|RENAME|ATTACH|DETACH|OPTIMIZE|GRANT|REVOKE|KILL|SYSTEM|SET|USE)\b/i /** Check if a query is a write (DDL/DML) operation */ export function isWriteQuery(query: string): boolean { // Strip leading SQL comments const stripped = query.replace(/^\s*--.*$/gm, '').trim() return WRITE_PATTERN.test(stripped) } ================================================ FILE: ui/src/lib/utils/stats.ts ================================================ import type { ColumnMeta } from '../types/query' import { getDisplayType, type DisplayType } from './ch-types' export interface ColumnStats { name: string type: string displayType: DisplayType count: number nulls: number nullPct: number // numeric min?: number max?: number avg?: number sum?: number // string minLen?: number maxLen?: number avgLen?: number distinct?: number // date earliest?: string latest?: string } const DISTINCT_SAMPLE = 10000 /** Compute per-column statistics in a single pass */ export function computeColumnStats(meta: ColumnMeta[], data: unknown[][]): ColumnStats[] { return meta.map((col, ci) => { const dt = getDisplayType(col.type) const total = data.length let nulls = 0 if (dt === 'number') { let min = Infinity let max = -Infinity let sum = 0 let numCount = 0 for (let r = 0; r < total; r++) { const v = data[r][ci] if (v === null || v === undefined || v === '') { nulls++; continue } const n = Number(v) if (Number.isNaN(n)) { nulls++; continue } numCount++ sum += n if (n < min) min = n if (n > max) max = n } return { name: col.name, type: col.type, displayType: dt, count: total, nulls, nullPct: total > 0 ? (nulls / total) * 100 : 0, min: numCount > 0 ? min : undefined, max: numCount > 0 ? max : undefined, avg: numCount > 0 ? sum / numCount : undefined, sum: numCount > 0 ? sum : undefined, } } if (dt === 'string') { let minLen = Infinity let maxLen = 0 let totalLen = 0 let strCount = 0 const seen = new Set() const sampleLimit = Math.min(total, DISTINCT_SAMPLE) for (let r = 0; r < total; r++) { const v = data[r][ci] if (v === null || v === undefined) { nulls++; continue } const s = String(v) strCount++ totalLen += s.length if (s.length < minLen) minLen = s.length if (s.length > maxLen) maxLen = s.length if (r < sampleLimit) seen.add(s) } return { name: col.name, type: col.type, displayType: dt, count: total, nulls, nullPct: total > 0 ? (nulls / total) * 100 : 0, minLen: strCount > 0 ? minLen : undefined, maxLen: strCount > 0 ? maxLen : undefined, avgLen: strCount > 0 ? totalLen / strCount : undefined, distinct: strCount > 0 ? seen.size : undefined, } } if (dt === 'date') { let earliest = '' let latest = '' for (let r = 0; r < total; r++) { const v = data[r][ci] if (v === null || v === undefined || v === '') { nulls++; continue } const s = String(v) if (!earliest || s < earliest) earliest = s if (!latest || s > latest) latest = s } return { name: col.name, type: col.type, displayType: dt, count: total, nulls, nullPct: total > 0 ? (nulls / total) * 100 : 0, earliest: earliest || undefined, latest: latest || undefined, } } // bool / json / unknown — just count + nulls for (let r = 0; r < total; r++) { const v = data[r][ci] if (v === null || v === undefined) nulls++ } return { name: col.name, type: col.type, displayType: dt, count: total, nulls, nullPct: total > 0 ? (nulls / total) * 100 : 0, } }) } ================================================ FILE: ui/src/lib/utils/uuid.ts ================================================ function bytesToHex(bytes: Uint8Array): string { return Array.from(bytes, (byte) => byte.toString(16).padStart(2, '0')).join('') } export function createUUID(): string { const cryptoObject = globalThis.crypto if (cryptoObject?.randomUUID) { return cryptoObject.randomUUID() } if (cryptoObject?.getRandomValues) { const bytes = new Uint8Array(16) cryptoObject.getRandomValues(bytes) // RFC 4122 v4 bits. bytes[6] = (bytes[6] & 0x0f) | 0x40 bytes[8] = (bytes[8] & 0x3f) | 0x80 const hex = bytesToHex(bytes) return `${hex.slice(0, 8)}-${hex.slice(8, 12)}-${hex.slice(12, 16)}-${hex.slice(16, 20)}-${hex.slice(20)}` } // Last-resort fallback for very restricted environments. const seed = `${Date.now()}-${Math.random()}-${Math.random()}` let hash = 0 for (let i = 0; i < seed.length; i++) { hash = ((hash << 5) - hash) + seed.charCodeAt(i) hash |= 0 } const base = Math.abs(hash).toString(16).padStart(8, '0') return `${base.slice(0, 8)}-${base.slice(0, 4)}-4${base.slice(0, 3)}-a${base.slice(0, 3)}-${base}${base.slice(0, 4)}` } ================================================ FILE: ui/src/main.ts ================================================ import { mount } from 'svelte' import './app.css' import App from './App.svelte' const app = mount(App, { target: document.getElementById('app')!, }) export default app ================================================ FILE: ui/src/pages/Admin.svelte ================================================

Admin Panel

providerSheetOpen = false} >
{ e.preventDefault() void createProvider() }} >

Provider controls which model catalog is available to all users.

skillSheetOpen = false} >
{ e.preventDefault() void saveSkill() }} >

This prompt steers SQL safety, artifact usage, and tool behavior for every chat.

createCHUserSheetOpen = false} >
{ e.preventDefault() void createClickHouseUser() }} >

Create users directly in ClickHouse for the active connection.

Command Preview

{buildCreateCHUserCommandPreview()}
{#if createCHUserErrorMessage}

Create User Error

{createCHUserErrorMessage}
{/if} {#if createCHUserExecutedCommands.length > 0}

Executed Commands

{#each createCHUserExecutedCommands as sql}
{sql}
{/each}
{/if}
editCHUserPasswordSheetOpen = false} >
{ e.preventDefault() void updateClickHouseUserPassword() }} >

User

{selectedCHUserName || '—'}

deletingProvider = null} />
{#if activeTab === 'overview'} {#if statsLoading}
{:else if stats}
Users
{stats.users_count}
Connections
{stats.online} / {stats.connections}
Queries
{stats.query_count}
Logins
{stats.login_count}
{/if}

Connections

{#if connections.length === 0}

No connections found

{:else}
{#each connections as conn}
{conn.name} {conn.id} {conn.online ? 'Online' : 'Offline'}
{/each}
{/if} {:else if activeTab === 'tunnels'}
{#if tunnelTokenPreview} {@const preview = tunnelTokenPreview}

Latest Token: {preview.connectionName}

{preview.token}
{#if preview.connectCmd}

Connect command

{preview.connectCmd}
{/if} {#if preview.serviceCmd}

Service command

{preview.serviceCmd}
{/if}
{/if} {#if tunnelsLoading}
{:else if tunnels.length === 0}

No tunnels configured

{:else}
{#each tunnels as conn} {/each}
Name ID Status Host Created Actions
{conn.name} {conn.id} {conn.online ? 'Online' : 'Offline'} {conn.host_info?.hostname || '—'} {formatTime(conn.created_at)}
{/if} {:else if activeTab === 'users'} {#if usersLoading}
{:else}

Application Users

{#if !usersSyncCheck}
User sync check is unavailable right now (connection offline or auth issue). Application users list may include stale session users.
{/if} {#if users.length === 0}

No users found

{:else}
{#each users as user} {/each}
Username Role Last Login Actions
{user.username}
{#each roleOptions as roleOpt} {/each}
{user.last_login ? formatTime(user.last_login) : '—'} {#if userRoles[user.username]} {/if}
{/if}

ClickHouse Users

{#if chUsers.length === 0}

No ClickHouse users loaded

{:else}
{#each chUsers as row} {/each}
Name Auth Type Storage Default Roles Actions
{row.name} {row.auth_type ?? '—'} {row.storage ?? '—'} {formatCHDefaultRoles(row)}
{/if} {/if} {:else if activeTab === 'brain'} {#if brainLoading}
{:else}

Brain Control Center

Providers
{brainProviders.length}
Active Providers
{brainProviders.filter(p => p.is_active).length}
Models
{brainModels.length}
Active Models
{brainModels.filter(m => m.is_active).length}
{#if brainProviders.length === 0}

No Brain providers configured yet.

{:else}
{#each brainProviders as provider} {/each}
Provider Kind Base URL Key Active Default Actions
{provider.name} {provider.kind} {provider.base_url || '—'} {provider.has_api_key ? 'Configured' : 'Missing'} toggleProvider(provider, 'is_active', (e.target as HTMLInputElement).checked)} /> toggleProvider(provider, 'is_default', true)} />
{/if}

Brain Models

modelProviderFilter = v} />
{#if brainModels.length === 0}

No models synced yet.

{:else}
{#each visibleProvidersForModels() as provider} {@const providerModels = modelsForProvider(provider.id)}
{provider.name} {provider.kind}
{providerModels.filter(m => m.is_active).length} active {providerModels.length} total
{#if providerModels.length > 0} {#each providerModels as model} {/each}
Model Active Default
{model.display_name || model.name} updateModel(model, 'is_active', (e.target as HTMLInputElement).checked)} /> updateModel(model, 'is_default', true)} />
{:else}

No models match current filters for this provider.

{/if}
{/each}
{/if}

Global Brain Skill

Active prompt preview

{truncate(skillForm.content || '', 1200)}
{/if} {:else if activeTab === 'langfuse'} {#if langfuseLoading}
{:else}

Langfuse Observability

{#if langfuseConfig.enabled} Active {:else} Inactive {/if}

Langfuse provides LLM observability for Brain chat — traces, token usage, latency, and auto-scoring for every generation.

{#if langfuseConfig.hasSecretKey || langfuseConfig.publicKey} {/if}
{/if} {/if}
================================================ FILE: ui/src/pages/Brain.svelte ================================================
selectChat(id)} onCreateChat={() => createChat('New Chat')} onRenameChat={renameChat} onDeleteChat={removeChat} />
selectedModelId = v} onDbChange={(v) => { void onHeaderDbChange(v) }} onTableChange={(v) => { void onHeaderTableChange(v) }} />
{#if messages.length === 0} {:else} {#each messages as msg, i (msg.id)} openQueryTab(sql)} /> {/each} {/if}
input = v} onAddContext={addContext} onRemoveContext={removeContext} onClearAllContexts={clearAllContexts} />
renamingChat = null} /> deletingChat = null} /> ================================================ FILE: ui/src/pages/Dashboards.svelte ================================================
{#if !dashboardId}

Dashboards

{#if listLoading}
{:else if dashboards.length === 0}

No dashboards yet

Create a dashboard to visualize your ClickHouse data

{:else}
{#each dashboards as dashboard (dashboard.id)}
openDashboardFromList(dashboard)} role="button" tabindex="0" onkeydown={(e) => { if (e.key === 'Enter') openDashboardFromList(dashboard) }} >

{dashboard.name}

{#if dashboard.description}

{dashboard.description}

{/if}
by {dashboard.created_by} {formatTime(dashboard.updated_at)}
{/each}
{/if}
{:else}
{#if editingTitle} { if (e.key === 'Enter') saveDashboardTitle(); if (e.key === 'Escape') editingTitle = false }} onblur={saveDashboardTitle} /> {:else}

{ editingTitle = true; titleInput = currentDashboard?.name ?? '' }} title="Double-click to rename" > {currentDashboard?.name ?? 'Dashboard'}

{/if} {#if currentDashboard?.description} {currentDashboard.description} {/if}
{#if panelEditorOpen} Panel builder mode {:else} {/if}
{#if panelEditorOpen} panelEditorOpen = false} onsave={handlePanelSaved} /> {:else if detailLoading}
{:else if detailError}
{detailError}
{:else if currentDashboard} { panels = updated }} oneditpanel={openEditPanel} ondeletepanel={requestDeletePanel} /> {/if}
{/if}
showCreateModal = false}>
confirmOpen = false} /> ================================================ FILE: ui/src/pages/Governance.svelte ================================================
{#if govSettings && !govSettings.sync_enabled && !govSettings.banner_dismissed}

Governance background sync is now opt-in.

Your existing data is preserved, but the syncer is paused until you enable it explicitly.

{/if}

Governance

{#if loading && !overview && !tables.length && !queryLog.length && !lineageEdges.length && !users.length && !policies.length}
{:else} {#if activeTab === 'dashboard'} {#if overview}
{overview.database_count}

Databases

{overview.table_count}

Tables

{overview.column_count}

Columns

{overview.user_count}

Users

{overview.query_count_24h}

Queries (24h)

{overview.lineage_edge_count}

Lineage Edges

{overview.policy_count}

Policies

{overview.violation_count}

Violations

{overview.incident_count || 0}

Open Incidents

{overview.tagged_table_count}

Tagged Tables

Violations Trend (7d)

Schema Change Trend (7d)

Sync Status

Status updates from governance sync workers
{#each overview.sync_states ?? [] as syncState}
{syncState.sync_type}
{syncState.last_synced_at ? formatTime(syncState.last_synced_at) : 'Never'} {syncStatusLabel(syncState.status)}
{/each}

Recent Schema Changes

{#if overview.recent_changes && overview.recent_changes.length > 0}
{#each overview.recent_changes as change}
{change.database_name}.{change.table_name} {formatTime(change.detected_at)}

{change.change_type}

{/each}
{:else}

No recent changes

{/if}

Recent Violations

{#if overview.recent_violations && overview.recent_violations.length > 0}
{#each overview.recent_violations as violation}
{violation.policy_name} {violation.severity}

{truncate(violation.violation_detail, 60)}

{formatTime(violation.detected_at)}

{/each}
{:else}

No recent violations

{/if}
{/if} {/if} {#if activeTab === 'tables'}
selectedDatabase = v} placeholder="All Databases" />
{#if filteredTables.length > 0}
{#each filteredTables as table} {/each}
Database Table Engine Rows Size Tags Details
{table.database_name} {table.table_name} {table.engine} {table.total_rows.toLocaleString()} {formatBytes(table.total_bytes)} {#if table.tags && table.tags.length > 0}
{#each table.tags as tag} {tag} {/each}
{:else} - {/if}
{:else}

No tables found

{/if}
{/if} {#if activeTab === 'queries'}
{ queryLimit = Number(v) || 100; void loadQueries(); }} placeholder="Query limit" />
{#if topQueries.length > 0}

Top Queries by Execution Count

{#each topQueries as tq} {/each}
Query Runs
{truncate(tq.sample_query, 140)} {tq.count} runs
{/if} {#if queryLog.length > 0}
{#each queryLog as entry} {/each}
Time User Type Query Duration Rows Details
{formatTime(entry.event_time)} {entry.ch_user} {entry.query_kind} {truncate(entry.query_text, 80)} {entry.duration_ms}ms {entry.read_rows.toLocaleString()}
{:else}

No query logs found

{/if}
{/if} {#if activeTab === 'lineage'}
{lineageEdges.length} edge{lineageEdges.length !== 1 ? 's' : ''}
{#if lineageGraph && lineageGraph.nodes.length > 0}
{ lineageSelectedEdge = edge; lineageQueryText = ''; lineageSheetOpen = true; if (edge.query_id) { fetchQueryByQueryID(edge.query_id) .then((res) => { lineageQueryText = res?.entry?.query_text ?? 'Query text not available'; }) .catch(() => { lineageQueryText = 'Failed to load query text'; }); } }} />
insert_select create_as_select source target current
{:else}

No lineage data. Run a sync to detect data flows.

{/if}
{#if lineageSheetOpen && lineageSelectedEdge} lineageSheetOpen = false} size="lg">
Source
{lineageSelectedEdge.source_database}.{lineageSelectedEdge.source_table}
Target
{lineageSelectedEdge.target_database}.{lineageSelectedEdge.target_table}
Type
{lineageSelectedEdge.edge_type}
User
{lineageSelectedEdge.ch_user}
{#if lineageSelectedEdge.column_edges && lineageSelectedEdge.column_edges.length > 0}
Column Mappings
{#each lineageSelectedEdge.column_edges as ce} {ce.source_column} {ce.target_column} {/each}
{/if}
Query
{#if lineageQueryText}
{lineageQueryText}
{:else}
Loading query...
{/if}
{/if} {/if} {#if activeTab === 'viewgraph'}
{viewGraphData?.nodes?.length ?? 0} node{(viewGraphData?.nodes?.length ?? 0) !== 1 ? 's' : ''}, {viewGraphData?.edges?.length ?? 0} edge{(viewGraphData?.edges?.length ?? 0) !== 1 ? 's' : ''}
{#if viewGraphData && viewGraphData.nodes.length > 0}
view_dependency (source → view) materialized_to (MV → target) source table materialized view target table view
{:else}

No views or materialized views found. Create some views in your ClickHouse instance to see the dependency graph.

{/if}
{/if} {#if activeTab === 'access'}
{#if overPermissions.length > 0}

Over-Permissions Detected

{#each groupedOverPermissions as group} {@const groupExpanded = expandedOverPermissionUsers[group.userName] ?? group.topSeverity === 'critical'}
toggleOverPermissionGroup(group.userName)} onkeydown={(e) => { if (e.key === 'Enter' || e.key === ' ') { e.preventDefault(); toggleOverPermissionGroup(group.userName); } }} class="w-full p-4 text-left flex items-center justify-between gap-3" >

{group.userName}

{group.total} risky grants across {group.databases} {group.databases === 1 ? 'database' : 'databases'}

{#if group.critical > 0} critical {group.critical} {/if} {#if group.warn > 0} warn {group.warn} {/if} {#if group.info > 0} info {group.info} {/if}
{#if groupExpanded}
{#each group.alerts as alert} {@const severity = overPermissionSeverity(alert)}

{alert.reason}

Database: {alert.database_name || '*'}

Privilege: {alert.privilege}

{severity}
{/each}
{/if}
{/each}
{/if}

Users

{#if users.length > 0}
{#each users as user} {/each}
Name Auth Type Host Default Roles
{user.name} {user.auth_type || '-'} {user.host_ip || '-'} {formatDefaultRoles(user.default_roles)}
{:else}

No users found

{/if}

Roles

{#if roles.length > 0}
{#each roles as role}

{role.name}

{/each}
{:else}

No roles found

{/if}

Access Matrix

{#if filteredAccessMatrix.length > 0}
{#each filteredAccessMatrix as entry} {/each}
User Database Table Privilege Grant Option
{entry.user_name} {entry.database_name || '*'} {entry.table_name || '*'} {entry.privilege} {entry.is_direct_grant ? 'Direct' : 'Inherited'}
{:else}

No access grants found

{/if}
{/if} {#if activeTab === 'incidents'}
incidentStatusFilter = v} placeholder="All Statuses" />
incidentSeverityFilter = v} placeholder="All Severities" />

Incident Queue

{incidents.length} incidents
{#if incidents.length > 0}
{#each incidents as incident} {/each}
Title Severity Status Occurrences Assignee Last Seen Details
{incident.title} {incident.severity} {incident.status} {incident.occurrence_count} {incident.assignee || '-'} {formatTime(incident.last_seen_at)}
{:else}

No incidents found for current filters

{/if}
{/if} {#if activeTab === 'policies'}

Policies

{#if policies.length > 0}
{#each policies as policy} {/each}
Policy Scope Role Severity Mode Status Updated Actions

{policy.name}

{#if policy.description}

{truncate(policy.description, 70)}

{/if}
{policy.object_type}{policy.object_database ? ` / ${policy.object_database}` : ''}{policy.object_table ? `.${policy.object_table}` : ''}{policy.object_column ? `.${policy.object_column}` : ''} {policy.required_role || '-'} {policy.severity} {policy.enforcement_mode} {policy.enabled ? 'enabled' : 'disabled'} {formatTime(policy.updated_at)}
{:else}

No policies configured

{/if}

Policy Violations

{#if violations.length > 0}
{#each violations as violation} {/each}
Policy Severity User Detail Detected Actions
{violation.policy_name} {violation.severity} {violation.ch_user} {truncate(violation.violation_detail, 120)} {formatTime(violation.detected_at)}
{:else}

No violations detected

{/if}
{/if} {/if} {#if activeTab === 'querylog'}
qlTimeRange = v} />
qlQueryKind = v} />
qlStatus = v} />
{#if queryLogLoading}
{:else if queryLogData.length === 0}

No query log entries found

{:else}
{#each queryLogData as row, i} expandedRow = expandedRow === i ? null : i} > {#if expandedRow === i} {/if} {/each}
Time User Query Duration Rows Status
{#if expandedRow === i}{:else}{/if} {formatTime(row.event_time)} {row.user} {truncate(row.query ?? '', 60)} {row.query_duration_ms}ms {row.read_rows ?? 0} {#if row.exception_code === 0} OK {:else} Error {/if}
{row.query}
{#if row.exception}

Error: {row.exception}

{/if}
Showing {qlOffset + 1}–{qlOffset + queryLogData.length}
{/if} {/if} {#if activeTab === 'alerts'} {#if alertsLoading}
{:else}

Alerting Control Center

Test recipients

Channels

{#if alertChannels.length === 0}

No alert channels configured.

{:else}
{#each alertChannels as channel} {/each}
Name Type Active Secret Actions
{channel.name} {channel.channel_type} toggleAlertChannel(channel, (e.target as HTMLInputElement).checked)} /> {channel.has_secret ? 'Configured' : 'Missing'}
{/if}

Rules

{#if alertRules.length === 0}

No alert rules configured.

{:else}
{#each alertRules as rule}

{rule.name}

{rule.event_type} · min {rule.severity_min} · {rule.routes.length} routes

Cooldown
{rule.cooldown_seconds}s
Max Attempts
{rule.max_attempts}
Subject Template
{rule.subject_template || 'Default'}
Body Template
{rule.body_template || 'Default'}
{#each rule.routes as route} {/each}
Channel Recipients Delivery Escalation Active
{route.channel_name} ({route.channel_type}) {route.recipients.join(', ')} {route.delivery_mode}{route.delivery_mode === 'digest' ? ` (${route.digest_window_minutes}m)` : ''} {#if route.escalation_channel_name} {route.escalation_channel_name} {#if route.escalation_after_failures > 0} after {route.escalation_after_failures} fail {/if} {:else} — {/if} {route.is_active ? 'yes' : 'no'}
{/each}
{/if}

Recent Alert Events

{#if alertEvents.length === 0}

No alert events yet.

{:else}
{#each alertEvents as evt} {/each}
Time Type Severity Title Status
{formatTime(evt.created_at)} {evt.event_type} {evt.severity} {evt.title} {evt.status}
{/if}
{/if} {/if} {#if activeTab === 'auditlog'}
{ auditLimit = Number(v) || 100; void loadAuditLogs() }} />
auditTimeRange = v} />
auditAction = v} />
auditUsername = v} />
{#if auditLoading}
{:else if auditLogs.length === 0}

No audit logs found

{:else}
{#each auditLogs as log} {/each}
Timestamp Action User Details IP
{formatTime(log.created_at)} {log.action} {log.username ?? '—'} {log.details ?? '—'} {log.ip_address ?? '—'}
{/if} {/if} {#if activeTab === 'settings'}

Governance Sync Settings

Control the background syncer that collects metadata, query history, and access data from your ClickHouse cluster.

{#if govSettingsLoading && !govSettings}
{:else if govSettings}
Status: {#if govSettings.sync_enabled} Enabled {:else} Disabled {/if} {#if govSettings.syncer_running} Running {:else if govSettings.sync_enabled} Idle {/if}
{#if govSettings.updated_at}

Last changed {formatTime(govSettings.updated_at)} {#if govSettings.updated_by}by {govSettings.updated_by}{/if}

{:else}

Never configured (default: disabled)

{/if}
{#if govSettings.sync_enabled} {:else} {/if}
{:else}

Settings unavailable.

{/if}

What governance sync does

Read this before enabling. Sync runs every 5 minutes against your ClickHouse cluster.

What it collects

  • Table and column metadata from system.tables / system.columns
  • Recent queries from system.query_log (filtered: ≥10ms, no self-polls)
  • Users, roles, and grants from system.users / system.grants

Where it's stored

Local SQLite at ./data/ch-ui.db. Never sent externally.

How it authenticates

Borrows ClickHouse credentials from an active admin session. Each borrow is recorded in the audit log (governance.credential_borrow, rate-limited to once per connection per hour).

Retention

30-day rolling window. Older query log and violation rows are pruned automatically at startup and every 5 minutes.

Toggle changes are written to the audit log (governance.sync_toggle). Disabling stops the syncer immediately; collected data is preserved.

{/if}
{#if selectedTable}

Engine

{selectedTable.engine || '-'}

Rows

{selectedTable.total_rows.toLocaleString()}

Size

{formatBytes(selectedTable.total_bytes)}

Partitions

{selectedTable.partition_count}

Columns

{selectedTableColumns.length} total
{#if tableDetailLoading}
{:else if selectedTableColumns.length > 0}
{#each selectedTableColumns as col} {/each}
Column Type Default Tags
{col.column_name} {col.column_type} {col.default_expression || '-'} {#if col.tags?.length}
{#each col.tags as tag} {tag} {/each}
{:else} - {/if}
{:else}

No column metadata available.

{/if}

Governance Notes

{#if tableNotes.length > 0}
{#each tableNotes as note}

{note.comment_text}

{note.created_by || 'unknown'} · {formatTime(note.created_at)}

{/each}
{:else}

No table notes yet.

{/if}
{/if}
{#if selectedQuery}

Kind

{selectedQuery.query_kind}

Duration

{selectedQuery.duration_ms} ms

Read Rows

{selectedQuery.read_rows.toLocaleString()}

Timestamp

{formatTime(selectedQuery.event_time)}

SQL

{selectedQuery.query_text}
{#if selectedQuery.error_message}

Error

{selectedQuery.error_message}

{/if}
{/if}
{#if selectedOverPermissionGroup}

Total Grants

{selectedOverPermissionGroup.total}

Databases

{selectedOverPermissionGroup.databases}

Top Severity

{selectedOverPermissionGroup.topSeverity}

Risky Grants

{#each selectedOverPermissionGroup.alerts as alert} {/each}
Database Privilege Reason Details
{alert.database_name || '*'} {alert.privilege} {truncate(alert.reason, 80)}
{:else if selectedOverPermission}

Grant Scope

{selectedOverPermission.user_name} · {selectedOverPermission.database_name || '*'}. {selectedOverPermission.table_name || '*'} · {selectedOverPermission.privilege}

Reason

{selectedOverPermission.reason}

Last Query Activity

{selectedOverPermission.last_query_time ? `${formatTime(selectedOverPermission.last_query_time)} (${selectedOverPermission.days_since_query ?? 0} days ago)` : 'No query usage found'}

{/if}
incidentCreateSheetOpen = false} >
Title
Assignee
Severity
o.value)} value={incidentForm.severity} onChange={(v) => incidentForm.severity = v} placeholder="Severity" />
Status
o.value)} value={incidentForm.status} onChange={(v) => incidentForm.status = v} placeholder="Status" />
Details
{#if selectedIncident}
Title
Assignee
selectedIncident && (selectedIncident.assignee = (e.currentTarget as HTMLInputElement).value)} />
Severity
o.value)} value={selectedIncident.severity} onChange={(v) => selectedIncident && (selectedIncident.severity = v)} placeholder="Severity" />
Status
o.value)} value={selectedIncident.status} onChange={(v) => selectedIncident && (selectedIncident.status = v)} placeholder="Status" />
Details
Resolution Note

Comments

{#if incidentComments.length > 0}
{#each incidentComments as comment}

{comment.comment_text}

{comment.created_by || 'unknown'} · {formatTime(comment.created_at)}

{/each}
{:else}

No comments yet.

{/if}
{/if}
{ e.preventDefault(); handlePolicySubmit(); }} class="space-y-4" >
policyForm.object_type = v as Policy['object_type']} placeholder="Object Type" />
policyForm.severity = v} placeholder="Severity" />
policyForm.enforcement_mode = v as Policy['enforcement_mode']} placeholder="Mode" />
channelSheetOpen = false} >
{ e.preventDefault(); void createAlertChannelRecord(); }} >

Channels hold delivery credentials for alert notifications.

{#if channelForm.channel_type === 'smtp'}
{:else}
{/if}
ruleSheetOpen = false} >
{ e.preventDefault(); void createAlertRuleRecord(); }} >

Rules map governance/system events to delivery routes and escalation behavior.

Routes

{#if alertChannels.length === 0}

Create at least one alert channel before adding routes.

{/if} {#each ruleRoutesDraft as route, idx}

Route {idx + 1}

{#if ruleRoutesDraft.length > 1} {/if}
{/each}
deletingNoteId = null} /> deletingChannel = null} /> deletingRule = null} /> showEnableConfirm = false} /> showDisableConfirm = false} /> ================================================ FILE: ui/src/pages/Home.svelte ================================================
Workspace Home

Welcome back{session?.user ? `, ${session.user}` : ''}

Start a new query, jump into saved work, or open tools quickly.

{#each quickLinks as item (item.id)} {/each}

Resources

{#each resources as resource (resource.id)}

{resource.title}

{resource.description}

{/each}

Recently Opened

{#if recentTabs.length === 0}

No recent workspace items yet.

{:else}
{#each recentTabs as tab (tab.id)} {@const Icon = recentIcon(tab)} {/each}
{/if}
================================================ FILE: ui/src/pages/Login.svelte ================================================ ================================================ FILE: ui/src/pages/Models.svelte ================================================

Model Pipeline

{#if !loading} {models.length} model{models.length !== 1 ? 's' : ''} {/if}
{#if loading}
Loading...
{:else if models.length === 0}

No models yet

Models are SQL transformations that form a pipeline. They can reference each other with $ref(model_name) and run in dependency order.

{:else} {#if !infoDismissed}

Models are SQL transformations that form a pipeline. Use $ref(model_name) to reference other models. Run Pipeline executes all models in dependency order — if a model fails, its dependents are automatically skipped.

{/if}
{#each pipelines as pipeline (pipeline.anchor_model_id)} {@const pipelineModels = pipeline.model_ids.map(id => modelById.get(id)).filter((m): m is Model => !!m)} {#if pipelineModels.length > 0}
{pipelineModels.length} model{pipelineModels.length !== 1 ? 's' : ''}
{#each pipelineModels as model (model.id)}
selectModel(model.id)} oncontextmenu={(e) => openContextMenu(e, model)} onkeydown={(e: KeyboardEvent) => { if (e.key === 'Enter') selectModel(model.id) }} role="button" tabindex="0" >
{#if model.materialization === 'table'} {:else} {/if} {model.name}
{model.materialization} {model.target_database} {#if model.last_run_at} {formatDate(model.last_run_at)} {/if}
{#if upstreamMap.get(model.id)?.length}
{#each upstreamMap.get(model.id)! as dep} {dep} {/each}
{/if} {#if model.last_error}

{model.last_error}

{:else if model.description}

{model.description}

{/if}
{/each}
{/if} {/each}
{/if}
{#if !loading && pipelines.length > 0} {@const scheduledPipelines = pipelines.filter(p => p.schedule)}
{#if scheduledPipelines.length > 0} {scheduledPipelines.length} pipeline{scheduledPipelines.length !== 1 ? 's' : ''} scheduled {#each scheduledPipelines as sp} {@const anchorModel = modelById.get(sp.anchor_model_id)} · {sp.schedule?.cron} {#if sp.schedule?.last_status} {sp.schedule.last_status} {/if} {/each} {:else} No pipelines scheduled {/if}
{/if}
{#if showDAG} {/if} {#if showHistory} {/if} {#if showSchedule} {/if} { confirmDeleteOpen = false; pendingDeleteId = ''; pendingDeleteName = '' }} /> ================================================ FILE: ui/src/pages/Pipelines.svelte ================================================ {#if selectedPipelineId} { pushPipelineList() loadPipelines() }} /> {:else} { showCreate = true }} onSelect={(id) => { pushPipelineDetail(id) }} onDelete={handleDelete} onStart={handleStart} onStop={handleStop} /> {/if} { showCreate = false; createName = '' }} /> ================================================ FILE: ui/src/pages/SavedQueries.svelte ================================================ e.key === 'Escape' && closeContextMenu()} />

Saved Queries

Total
{totalCount}
Updated 7d
{recentCount}
With Description
{describedCount}
SQL Characters
{totalSqlChars.toLocaleString()}
{#if searchTerm} {/if}
{#if loading}
{:else if queries.length === 0}

No saved queries yet

Save a query from the SQL editor and it will appear here.

{:else if visibleQueries.length === 0}

No query matches your filters.

Try another search, filter, or sorting mode.

{:else}
{#each visibleQueries as query (query.id)}
openContextMenu(e, query)} >

{query.name}

{countLines(query.query)} lines
{#if query.description?.trim()}

{query.description}

{/if}
{sqlPreview(query.query, densityMode === 'compact' ? 2 : 4)}
{formatRelative(query.updated_at)} {query.query.length.toLocaleString()} chars
{/each}
{/if}
{#if selectedQuery}
Updated
{formatDate(selectedQuery.updated_at)}
Created
{formatDate(selectedQuery.created_at)}
Line Count
{countLines(selectedQuery.query)}
Characters
{selectedQuery.query.length.toLocaleString()}
{#if selectedQuery.description?.trim()}
Description

{selectedQuery.description}

{/if}
SQL
{selectedQuery.query}
{/if}
================================================ FILE: ui/src/pages/Schedules.svelte ================================================

Scheduled Queries

{#if loading}
{:else if schedules.length === 0}

No scheduled queries yet

Create a schedule to run saved queries automatically

{:else}
{#each schedules as schedule (schedule.id)} {@const badge = statusBadge(schedule.last_status)} {@const queryRef = savedQueryMap.get(schedule.saved_query_id)}
{schedule.name} {schedule.cron} {schedule.timezone}
{badge.label} Last: {formatTime(schedule.last_run_at)} Next: {formatTime(schedule.next_run_at)} {#if queryRef} {queryRef.name} {/if}
{#if expandedSchedule === schedule.id}
{#if runsLoading}
{:else if runs.length === 0}

No runs yet

{:else}
{#each runs as run} {@const rb = statusBadge(run.status)} {/each}
Started Status Elapsed Rows Error Details
{formatTime(run.started_at)} {rb.label} {run.elapsed_ms}ms {run.rows_affected} {run.error ?? '—'}
{#if runsHasMore}
{/if} {/if}
{/if}
{/each}
{/if}
showModal = false}>
{#if !editingId}

Saved Query

({ value: q.id, label: q.name, hint: q.description || q.query, keywords: `${q.name} ${q.description ?? ''} ${q.query}`, }))} value={formSavedQueryId} emptyText={savedQueriesLoading ? 'Loading saved queries...' : 'No saved queries found'} placeholder="Select a saved query..." disabled={savedQueriesLoading || savedQueries.length === 0} onChange={(id) => formSavedQueryId = id} /> {#if savedQueries.length === 0}

No saved queries available. Create one first in Saved Queries.

{/if}
{/if}

e.g. 0 */6 * * * = every 6 hours

showRunSheet = false}> {#if selectedRun && selectedSchedule} {@const runBadge = statusBadge(selectedRun.status)} {@const saved = savedQueryMap.get(selectedSchedule.saved_query_id)}

Schedule

{selectedSchedule.name}

{selectedSchedule.cron} ({selectedSchedule.timezone})

Run Status

{runBadge.label}

ID: {selectedRun.id}

Started

{formatTime(selectedRun.started_at)}

Finished

{formatTime(selectedRun.finished_at)}

Elapsed

{selectedRun.elapsed_ms} ms

Rows Affected

{selectedRun.rows_affected}

{#if selectedRun.error}

Error

{selectedRun.error}
{/if}

Saved Query

{#if saved} {/if}
{#if saved}

{saved.name}

{saved.query}
{:else}

Saved query metadata not available.

{/if}
{/if}
confirmOpen = false} /> ================================================ FILE: ui/src/pages/Settings.svelte ================================================
CH-UI logo

CH-UI License

Identity, licensing, entitlements, and legal scope controls

{#if licenseState === 'loading'} Checking license... {:else if licenseState === 'pro'} Pro Active {:else if licenseState === 'expired'} Pro Expired {:else} Community Edition {/if} {license?.edition || 'community'}

Edition

{license?.edition || 'community'}

Customer

{license?.customer || 'Open Source Deployment'}

License ID

{license?.license_id || '—'}

Expiration

{formatDate(license?.expires_at)}

{#if activeTab === 'license'}

License Control

{#if loading}
Loading license status...
{:else if proActive}
Pro License Active ID: {license?.license_id || '—'}

Customer

{license?.customer || '—'}

Expires

{formatDate(license?.expires_at)}

{#if showConfirmDeactivate}

Deactivate this Pro license and downgrade to Community Edition?

{:else} {/if}
{:else if expiredLicense}
License Expired
Customer: {license?.customer || '—'}
Expired on {formatDate(license?.expires_at)}

Activate a new Pro license to restore proprietary features.

{:else}
Community Edition

Core capabilities are enabled under Apache-2.0. Activate Pro to unlock proprietary modules.

{/if} {#if !proActive}

Activate Pro License

{#if inputMode === 'idle'}
{:else}
{/if}
{/if}
{/if} {#if activeTab === 'license' || activeTab === 'instance' || activeTab === 'legal'} {/if}
================================================ FILE: ui/svelte.config.js ================================================ import { vitePreprocess } from '@sveltejs/vite-plugin-svelte' /** @type {import("@sveltejs/vite-plugin-svelte").SvelteConfig} */ export default { // Consult https://svelte.dev/docs#compile-time-svelte-preprocess // for more information about preprocessors preprocess: vitePreprocess(), } ================================================ FILE: ui/tsconfig.app.json ================================================ { "extends": "@tsconfig/svelte/tsconfig.json", "compilerOptions": { "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", "target": "ES2022", "useDefineForClassFields": true, "module": "ESNext", "types": ["svelte", "vite/client"], "composite": true, "emitDeclarationOnly": true, /** * Typecheck JS in `.svelte` and `.js` files by default. * Disable checkJs if you'd like to use dynamic types in JS. * Note that setting allowJs false does not prevent the use * of JS in `.svelte` files. */ "allowJs": true, "checkJs": true, "moduleDetection": "force" }, "include": ["src/**/*.ts", "src/**/*.js", "src/**/*.svelte"] } ================================================ FILE: ui/tsconfig.json ================================================ { "files": [], "references": [ { "path": "./tsconfig.app.json" }, { "path": "./tsconfig.node.json" } ] } ================================================ FILE: ui/tsconfig.node.json ================================================ { "compilerOptions": { "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", "target": "ES2023", "lib": ["ES2023"], "module": "ESNext", "types": ["node"], "skipLibCheck": true, /* Bundler mode */ "moduleResolution": "bundler", "allowImportingTsExtensions": true, "verbatimModuleSyntax": true, "moduleDetection": "force", "composite": true, "emitDeclarationOnly": true, /* Linting */ "strict": true, "noUnusedLocals": true, "noUnusedParameters": true, "erasableSyntaxOnly": true, "noFallthroughCasesInSwitch": true, "noUncheckedSideEffectImports": true }, "include": ["vite.config.ts"] } ================================================ FILE: ui/vite.config.d.ts ================================================ declare const _default: import("vite").UserConfig; export default _default; ================================================ FILE: ui/vite.config.ts ================================================ import { defineConfig } from 'vite' import { svelte } from '@sveltejs/vite-plugin-svelte' import tailwindcss from '@tailwindcss/vite' export default defineConfig({ appType: 'spa', plugins: [svelte(), tailwindcss()], base: process.env.VITE_BASE_PATH ? process.env.VITE_BASE_PATH + '/' : '/', resolve: { dedupe: [ '@codemirror/state', '@codemirror/view', '@codemirror/language', '@codemirror/autocomplete', '@codemirror/commands', '@codemirror/search', '@lezer/common', '@lezer/highlight', '@lezer/lr', ], }, server: { host: '127.0.0.1', port: 5173, proxy: { '/api': 'http://127.0.0.1:3488', '/connect': { target: 'ws://127.0.0.1:3488', ws: true }, '/health': 'http://127.0.0.1:3488', '/install': 'http://127.0.0.1:3488', '/download': 'http://127.0.0.1:3488', }, }, preview: { host: '127.0.0.1', }, build: { target: 'es2022', minify: process.env.CHUI_VITE_MINIFY !== '0', rollupOptions: { output: { manualChunks(id) { if (!id.includes('node_modules')) return undefined if (id.includes('@codemirror') || id.includes('@lezer')) return 'codemirror' if (id.includes('lucide-svelte')) return 'icons' if (id.includes('uplot')) return 'charts' return 'vendor' }, }, }, }, }) ================================================ FILE: ui/vitest.config.ts ================================================ import { defineConfig } from 'vitest/config' export default defineConfig({ test: { environment: 'node', include: ['src/**/*.test.ts'], }, })